diff --git a/contrib/completion/REVIEWERS b/contrib/completion/REVIEWERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/contrib/completion/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker new file mode 100644 index 0000000000..79209c2941 --- /dev/null +++ b/contrib/completion/bash/docker @@ -0,0 +1,4629 @@ +#!/usr/bin/env bash +# +# bash completion file for core docker commands +# +# This script provides completion of: +# - commands and their options +# - container ids and names +# - image repos and tags +# - filepaths +# +# To enable the completions either: +# - place this file in /etc/bash_completion.d +# or +# - copy this file to e.g. ~/.docker-completion.sh and add the line +# below to your .bashrc after bash completion features are loaded +# . ~/.docker-completion.sh +# +# Configuration: +# +# For several commands, the amount of completions can be configured by +# setting environment variables. +# +# DOCKER_COMPLETION_SHOW_CONTAINER_IDS +# DOCKER_COMPLETION_SHOW_NETWORK_IDS +# DOCKER_COMPLETION_SHOW_NODE_IDS +# DOCKER_COMPLETION_SHOW_PLUGIN_IDS +# DOCKER_COMPLETION_SHOW_SECRET_IDS +# DOCKER_COMPLETION_SHOW_SERVICE_IDS +# "no" - Show names only (default) +# "yes" - Show names and ids +# +# You can tailor completion for the "events", "history", "inspect", "run", +# "rmi" and "save" commands by settings the following environment +# variables: +# +# DOCKER_COMPLETION_SHOW_IMAGE_IDS +# "none" - Show names only (default) +# "non-intermediate" - Show names and ids, but omit intermediate image IDs +# "all" - Show names and ids, including intermediate image IDs +# +# DOCKER_COMPLETION_SHOW_TAGS +# "yes" - include tags in completion options (default) +# "no" - don't include tags in completion options + +# +# Note: +# Currently, the completions will not work if the docker daemon is not +# bound to the default communication port/socket +# If the docker daemon is using a unix socket for communication your user +# must have access to the socket for the completions to function correctly +# +# Note for developers: +# Please arrange options sorted alphabetically by long name with the short +# options immediately following their corresponding long form. +# This order should be applied to lists, alternatives and code blocks. + +__docker_previous_extglob_setting=$(shopt -p extglob) +shopt -s extglob + +__docker_q() { + docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@" +} + +# __docker_containers returns a list of containers. Additional options to +# `docker ps` may be specified in order to filter the list, e.g. +# `__docker_containers --filter status=running` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_CONTAINER_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_containers() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Names}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_CONTAINER_IDS}" = yes ] ; then + format='{{.ID}} {{.Names}}' + else + format='{{.Names}}' + fi + __docker_q ps --format "$format" "$@" +} + +# __docker_complete_containers applies completion of containers based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_containers`. +__docker_complete_containers() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_containers "$@")" -- "$current") ) +} + +__docker_complete_containers_all() { + __docker_complete_containers "$@" --all +} + +__docker_complete_containers_removable() { + __docker_complete_containers "$@" --filter status=created --filter status=exited +} + +__docker_complete_containers_running() { + __docker_complete_containers "$@" --filter status=running +} + +__docker_complete_containers_stopped() { + __docker_complete_containers "$@" --filter status=exited +} + +__docker_complete_containers_unpauseable() { + __docker_complete_containers "$@" --filter status=paused +} + +__docker_complete_container_names() { + local containers=( $(__docker_q ps -aq --no-trunc) ) + local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) + names=( "${names[@]#/}" ) # trim off the leading "/" from the container names + COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) +} + +__docker_complete_container_ids() { + local containers=( $(__docker_q ps -aq) ) + COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) +} + +__docker_images() { + local images_args="" + + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all) + images_args="--no-trunc -a" + ;; + non-intermediate) + images_args="--no-trunc" + ;; + esac + + local repo_print_command + if [ "${DOCKER_COMPLETION_SHOW_TAGS:-yes}" = "yes" ]; then + repo_print_command='print $1; print $1":"$2' + else + repo_print_command='print $1' + fi + + local awk_script + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all|non-intermediate) + awk_script='NR>1 { print $3; if ($1 != "") { '"$repo_print_command"' } }' + ;; + none|*) + awk_script='NR>1 && $1 != "" { '"$repo_print_command"' }' + ;; + esac + + __docker_q images $images_args | awk "$awk_script" | grep -v '$' +} + +__docker_complete_images() { + COMPREPLY=( $(compgen -W "$(__docker_images)" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +__docker_complete_image_repos() { + local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" + COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) +} + +__docker_complete_image_repos_and_tags() { + local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" + COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +# __docker_networks returns a list of all networks. Additional options to +# `docker network ls` may be specified in order to filter the list, e.g. +# `__docker_networks --filter type=custom` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_NETWORK_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_networks() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Name}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_NETWORK_IDS}" = yes ] ; then + format='{{.ID}} {{.Name}}' + else + format='{{.Name}}' + fi + __docker_q network ls --format "$format" "$@" +} + +# __docker_complete_networks applies completion of networks based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_networks`. +__docker_complete_networks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_networks "$@")" -- "$current") ) +} + +__docker_complete_containers_in_network() { + local containers=$(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1") + COMPREPLY=( $(compgen -W "$containers" -- "$cur") ) +} + +# __docker_volumes returns a list of all volumes. Additional options to +# `docker volume ls` may be specified in order to filter the list, e.g. +# `__docker_volumes --filter dangling=true` +# Because volumes do not have IDs, this function does not distinguish between +# IDs and names. +__docker_volumes() { + __docker_q volume ls -q "$@" +} + +# __docker_complete_volumes applies completion of volumes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_volumes`. +__docker_complete_volumes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_volumes "$@")" -- "$current") ) +} + +# __docker_plugins_bundled returns a list of all plugins of a given type. +# The type has to be specified with the mandatory option `--type`. +# Valid types are: Network, Volume, Authorization. +# Completions may be added or removed with `--add` and `--remove` +# This function only deals with plugins that come bundled with Docker. +# For plugins managed by `docker plugin`, see `__docker_plugins_installed`. +__docker_plugins_bundled() { + local type add=() remove=() + while true ; do + case "$1" in + --type) + type="$2" + shift 2 + ;; + --add) + add+=("$2") + shift 2 + ;; + --remove) + remove+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + local plugins=($(__docker_q info --format "{{range \$i, \$p := .Plugins.$type}}{{.}} {{end}}")) + for del in "${remove[@]}" ; do + plugins=(${plugins[@]/$del/}) + done + echo "${plugins[@]} ${add[@]}" +} + +# __docker_complete_plugins_bundled applies completion of plugins based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# The plugin type has to be specified with the next option `--type`. +# This function only deals with plugins that come bundled with Docker. +# For completion of plugins managed by `docker plugin`, see +# `__docker_complete_plugins_installed`. +__docker_complete_plugins_bundled() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_bundled "$@")" -- "$current") ) +} + +# __docker_plugins_installed returns a list of all plugins that were installed with +# the Docker plugin API. +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_PLUGIN_IDS=yes to also complete IDs. +# Additional options to `docker plugin ls` may be specified in order to filter the list, +# e.g. `__docker_plugins_installed --filter enabled=true` +# For built-in pugins, see `__docker_plugins_bundled`. +__docker_plugins_installed() { + local format + if [ "$DOCKER_COMPLETION_SHOW_PLUGIN_IDS" = yes ] ; then + format='{{.ID}} {{.Name}}' + else + format='{{.Name}}' + fi + __docker_q plugin ls --format "$format" "$@" +} + +# __docker_complete_plugins_installed applies completion of plugins that were installed +# with the Docker plugin API, based on the current value of `$cur` or the value of +# the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_plugins_installed`. +# For completion of built-in pugins, see `__docker_complete_plugins_bundled`. +__docker_complete_plugins_installed() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_installed "$@")" -- "$current") ) +} + +__docker_runtimes() { + __docker_q info | sed -n 's/^Runtimes: \(.*\)/\1/p' +} + +__docker_complete_runtimes() { + COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") ) +} + +# __docker_secrets returns a list of secrets. Additional options to +# `docker secret ls` may be specified in order to filter the list, e.g. +# `__docker_secrets --filter label=stage=production` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_SECRET_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_secrets() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Name}}' + shift + elif [ "$DOCKER_COMPLETION_SHOW_SECRET_IDS" = yes ] ; then + format='{{.ID}} {{.Name}}' + else + format='{{.Name}}' + fi + + __docker_q secret ls --format "$format" "$@" +} + +# __docker_complete_secrets applies completion of secrets based on the current value +# of `$cur` or the value of the optional first option `--cur`, if given. +__docker_complete_secrets() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_secrets "$@")" -- "$current") ) +} + +# __docker_stacks returns a list of all stacks. +__docker_stacks() { + __docker_q stack ls | awk 'NR>1 {print $1}' +} + +# __docker_complete_stacks applies completion of stacks based on the current value +# of `$cur` or the value of the optional first option `--cur`, if given. +__docker_complete_stacks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_stacks "$@")" -- "$current") ) +} + +# __docker_nodes returns a list of all nodes. Additional options to +# `docker node ls` may be specified in order to filter the list, e.g. +# `__docker_nodes --filter role=manager` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_NODE_IDS=yes to also complete node IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +# Completions may be added with `--add`, e.g. `--add self`. +__docker_nodes() { + local add=() + local fields='$2' # default: node name only + [ "${DOCKER_COMPLETION_SHOW_NODE_IDS}" = yes ] && fields='$1,$2' # ID and name + + while true ; do + case "$1" in + --id) + fields='$1' # IDs only + shift + ;; + --name) + fields='$2' # names only + shift + ;; + --add) + add+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + echo $(__docker_q node ls "$@" | tr -d '*' | awk "NR>1 {print $fields}") "${add[@]}" +} + +# __docker_complete_nodes applies completion of nodes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_nodes`. +__docker_complete_nodes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_nodes "$@")" -- "$current") ) +} + +# __docker_services returns a list of all services. Additional options to +# `docker service ls` may be specified in order to filter the list, e.g. +# `__docker_services --filter name=xxx` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_SERVICE_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_services() { + local fields='$2' # default: service name only + [ "${DOCKER_COMPLETION_SHOW_SERVICE_IDS}" = yes ] && fields='$1,$2' # ID & name + + if [ "$1" = "--id" ] ; then + fields='$1' # IDs only + shift + elif [ "$1" = "--name" ] ; then + fields='$2' # names only + shift + fi + __docker_q service ls "$@" | awk "NR>1 {print $fields}" +} + +# __docker_complete_services applies completion of services based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_services`. +__docker_complete_services() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") ) +} + +# __docker_tasks returns a list of all task IDs. +__docker_tasks() { + __docker_q service ps --format '{{.ID}}' "" +} + +# __docker_complete_services_and_tasks applies completion of services and task IDs. +__docker_complete_services_and_tasks() { + COMPREPLY=( $(compgen -W "$(__docker_services "$@") $(__docker_tasks)" -- "$cur") ) +} + +# __docker_append_to_completions appends the word passed as an argument to every +# word in `$COMPREPLY`. +# Normally you do this with `compgen -S` while generating the completions. +# This function allows you to append a suffix later. It allows you to use +# the __docker_complete_XXX functions in cases where you need a suffix. +__docker_append_to_completions() { + COMPREPLY=( ${COMPREPLY[@]/%/"$1"} ) +} + +# __docker_daemon_is_experimental tests whether the currently configured Docker +# daemon runs in experimental mode. If so, the function exits with 0 (true). +# Otherwise, or if the result cannot be determined, the exit value is 1 (false). +__docker_daemon_is_experimental() { + [ "$(__docker_q version -f '{{.Server.Experimental}}')" = "true" ] +} + +# __docker_daemon_os_is tests whether the currently configured Docker daemon runs +# on the operating system passed in as the first argument. +# It does so by querying the daemon for its OS. The result is cached for the duration +# of one invocation of bash completion so that this function can be used to test for +# several different operating systems without additional costs. +# Known operating systems: linux, windows. +__docker_daemon_os_is() { + local expected_os="$1" + local actual_os=${daemon_os=$(__docker_q version -f '{{.Server.Os}}')} + [ "$actual_os" = "$expected_os" ] +} + +# __docker_pos_first_nonflag finds the position of the first word that is neither +# option nor an option's argument. If there are options that require arguments, +# you should pass a glob describing those options, e.g. "--option1|-o|--option2" +# Use this function to restrict completions to exact positions after the argument list. +__docker_pos_first_nonflag() { + local argument_flags=$1 + + local counter=$((${subcommand_pos:-${command_pos}} + 1)) + while [ $counter -le $cword ]; do + if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then + (( counter++ )) + # eat "=" in case of --option=arg syntax + [ "${words[$counter]}" = "=" ] && (( counter++ )) + else + case "${words[$counter]}" in + -*) + ;; + *) + break + ;; + esac + fi + + # Bash splits words at "=", retaining "=" as a word, examples: + # "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words + while [ "${words[$counter + 1]}" = "=" ] ; do + counter=$(( counter + 2)) + done + + (( counter++ )) + done + + echo $counter +} + +# __docker_map_key_of_current_option returns `key` if we are currently completing the +# value of a map option (`key=value`) which matches the extglob given as an argument. +# This function is needed for key-specific completions. +__docker_map_key_of_current_option() { + local glob="$1" + + local key glob_pos + if [ "$cur" = "=" ] ; then # key= case + key="$prev" + glob_pos=$((cword - 2)) + elif [[ $cur == *=* ]] ; then # key=value case (OSX) + key=${cur%=*} + glob_pos=$((cword - 1)) + elif [ "$prev" = "=" ] ; then + key=${words[$cword - 2]} # key=value case + glob_pos=$((cword - 3)) + else + return + fi + + [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax + + [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" +} + +# __docker_value_of_option returns the value of the first option matching `option_glob`. +# Valid values for `option_glob` are option names like `--log-level` and globs like +# `--log-level|-l` +# Only positions between the command and the current word are considered. +__docker_value_of_option() { + local option_extglob=$(__docker_to_extglob "$1") + + local counter=$((command_pos + 1)) + while [ $counter -lt $cword ]; do + case ${words[$counter]} in + $option_extglob ) + echo ${words[$counter + 1]} + break + ;; + esac + (( counter++ )) + done +} + +# __docker_to_alternatives transforms a multiline list of strings into a single line +# string with the words separated by `|`. +# This is used to prepare arguments to __docker_pos_first_nonflag(). +__docker_to_alternatives() { + local parts=( $1 ) + local IFS='|' + echo "${parts[*]}" +} + +# __docker_to_extglob transforms a multiline list of options into an extglob pattern +# suitable for use in case statements. +__docker_to_extglob() { + local extglob=$( __docker_to_alternatives "$1" ) + echo "@($extglob)" +} + +# __docker_subcommands processes subcommands +# Locates the first occurrence of any of the subcommands contained in the +# first argument. In case of a match, calls the corresponding completion +# function and returns 0. +# If no match is found, 1 is returned. The calling function can then +# continue processing its completion. +# +# TODO if the preceding command has options that accept arguments and an +# argument is equal ot one of the subcommands, this is falsely detected as +# a match. +__docker_subcommands() { + local subcommands="$1" + + local counter=$(($command_pos + 1)) + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + $(__docker_to_extglob "$subcommands") ) + subcommand_pos=$counter + local subcommand=${words[$counter]} + local completions_func=_docker_${command}_${subcommand//-/_} + declare -F $completions_func >/dev/null && $completions_func + return 0 + ;; + esac + (( counter++ )) + done + return 1 +} + +# __docker_nospace suppresses trailing whitespace +__docker_nospace() { + # compopt is not available in ancient bash versions + type compopt &>/dev/null && compopt -o nospace +} + +__docker_complete_resolved_hostname() { + command -v host >/dev/null 2>&1 || return + COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) +} + +__docker_local_interfaces() { + command -v ip >/dev/null 2>&1 || return + ip addr show scope global 2>/dev/null | sed -n 's| \+inet \([0-9.]\+\).* \([^ ]\+\)|\1 \2|p' +} + +__docker_complete_local_interfaces() { + local additional_interface + if [ "$1" = "--add" ] ; then + additional_interface="$2" + fi + + COMPREPLY=( $( compgen -W "$(__docker_local_interfaces) $additional_interface" -- "$cur" ) ) +} + +# __docker_complete_capabilities_addable completes Linux capabilities which are +# not granted by default and may be added. +# see https://docs.docker.com/engine/reference/run/#/runtime-privilege-and-linux-capabilities +__docker_complete_capabilities_addable() { + COMPREPLY=( $( compgen -W " + ALL + AUDIT_CONTROL + BLOCK_SUSPEND + DAC_READ_SEARCH + IPC_LOCK + IPC_OWNER + LEASE + LINUX_IMMUTABLE + MAC_ADMIN + MAC_OVERRIDE + NET_ADMIN + NET_BROADCAST + SYS_ADMIN + SYS_BOOT + SYSLOG + SYS_MODULE + SYS_NICE + SYS_PACCT + SYS_PTRACE + SYS_RAWIO + SYS_RESOURCE + SYS_TIME + SYS_TTY_CONFIG + WAKE_ALARM + " -- "$cur" ) ) +} + +# __docker_complete_capabilities_droppable completes Linux capability options which are +# allowed by default and can be dropped. +# see https://docs.docker.com/engine/reference/run/#/runtime-privilege-and-linux-capabilities +__docker_complete_capabilities_droppable() { + COMPREPLY=( $( compgen -W " + ALL + AUDIT_WRITE + CHOWN + DAC_OVERRIDE + FOWNER + FSETID + KILL + MKNOD + NET_BIND_SERVICE + NET_RAW + SETFCAP + SETGID + SETPCAP + SETUID + SYS_CHROOT + " -- "$cur" ) ) +} + +__docker_complete_detach_keys() { + case "$prev" in + --detach-keys) + case "$cur" in + *,) + COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) ) + ;; + esac + + __docker_nospace + return + ;; + esac + return 1 +} + +__docker_complete_isolation() { + COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) ) +} + +__docker_complete_log_drivers() { + COMPREPLY=( $( compgen -W " + awslogs + etwlogs + fluentd + gcplogs + gelf + journald + json-file + logentries + none + splunk + syslog + " -- "$cur" ) ) +} + +__docker_complete_log_options() { + # see repository docker/docker.github.io/engine/admin/logging/ + + # really global options, defined in https://github.com/moby/moby/blob/master/daemon/logger/factory.go + local common_options1="max-buffer-size mode" + # common options defined in https://github.com/moby/moby/blob/master/daemon/logger/loginfo.go + # but not implemented in all log drivers + local common_options2="env env-regex labels" + + # awslogs does not implement the $common_options2. + local awslogs_options="$common_options1 awslogs-create-group awslogs-group awslogs-region awslogs-stream tag" + + local fluentd_options="$common_options1 $common_options2 fluentd-address fluentd-async-connect fluentd-buffer-limit fluentd-retry-wait fluentd-max-retries tag" + local gcplogs_options="$common_options1 $common_options2 gcp-log-cmd gcp-meta-id gcp-meta-name gcp-meta-zone gcp-project" + local gelf_options="$common_options1 $common_options2 gelf-address gelf-compression-level gelf-compression-type tag" + local journald_options="$common_options1 $common_options2 tag" + local json_file_options="$common_options1 $common_options2 max-file max-size" + local logentries_options="$common_options1 $common_options2 logentries-token tag" + local splunk_options="$common_options1 $common_options2 splunk-caname splunk-capath splunk-format splunk-gzip splunk-gzip-level splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url splunk-verify-connection tag" + local syslog_options="$common_options1 $common_options2 syslog-address syslog-facility syslog-format syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify tag" + + local all_options="$fluentd_options $gcplogs_options $gelf_options $journald_options $logentries_options $json_file_options $syslog_options $splunk_options" + + case $(__docker_value_of_option --log-driver) in + '') + COMPREPLY=( $( compgen -W "$all_options" -S = -- "$cur" ) ) + ;; + awslogs) + COMPREPLY=( $( compgen -W "$awslogs_options" -S = -- "$cur" ) ) + ;; + fluentd) + COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) ) + ;; + gcplogs) + COMPREPLY=( $( compgen -W "$gcplogs_options" -S = -- "$cur" ) ) + ;; + gelf) + COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) ) + ;; + journald) + COMPREPLY=( $( compgen -W "$journald_options" -S = -- "$cur" ) ) + ;; + json-file) + COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) ) + ;; + logentries) + COMPREPLY=( $( compgen -W "$logentries_options" -S = -- "$cur" ) ) + ;; + syslog) + COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) ) + ;; + splunk) + COMPREPLY=( $( compgen -W "$splunk_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + + __docker_nospace +} + +__docker_complete_log_driver_options() { + local key=$(__docker_map_key_of_current_option '--log-opt') + case "$key" in + awslogs-create-group) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + fluentd-async-connect) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + gelf-address) + COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur##*=}" ) ) + __docker_nospace + return + ;; + gelf-compression-level) + COMPREPLY=( $( compgen -W "1 2 3 4 5 6 7 8 9" -- "${cur##*=}" ) ) + return + ;; + gelf-compression-type) + COMPREPLY=( $( compgen -W "gzip none zlib" -- "${cur##*=}" ) ) + return + ;; + mode) + COMPREPLY=( $( compgen -W "blocking non-blocking" -- "${cur##*=}" ) ) + return + ;; + syslog-address) + COMPREPLY=( $( compgen -W "tcp:// tcp+tls:// udp:// unix://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + syslog-facility) + COMPREPLY=( $( compgen -W " + auth + authpriv + cron + daemon + ftp + kern + local0 + local1 + local2 + local3 + local4 + local5 + local6 + local7 + lpr + mail + news + syslog + user + uucp + " -- "${cur##*=}" ) ) + return + ;; + syslog-format) + COMPREPLY=( $( compgen -W "rfc3164 rfc5424 rfc5424micro" -- "${cur##*=}" ) ) + return + ;; + syslog-tls-ca-cert|syslog-tls-cert|syslog-tls-key) + _filedir + return + ;; + syslog-tls-skip-verify) + COMPREPLY=( $( compgen -W "true" -- "${cur##*=}" ) ) + return + ;; + splunk-url) + COMPREPLY=( $( compgen -W "http:// https://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + splunk-gzip|splunk-insecureskipverify|splunk-verify-connection) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + splunk-format) + COMPREPLY=( $( compgen -W "inline json raw" -- "${cur##*=}" ) ) + return + ;; + esac + return 1 +} + +__docker_complete_log_levels() { + COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) +} + +__docker_complete_restart() { + case "$prev" in + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "always no on-failure on-failure: unless-stopped" -- "$cur") ) + ;; + esac + return + ;; + esac + return 1 +} + +# __docker_complete_signals returns a subset of the available signals that is most likely +# relevant in the context of docker containers +__docker_complete_signals() { + local signals=( + SIGCONT + SIGHUP + SIGINT + SIGKILL + SIGQUIT + SIGSTOP + SIGTERM + SIGUSR1 + SIGUSR2 + ) + COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) +} + +__docker_complete_user_group() { + if [[ $cur == *:* ]] ; then + COMPREPLY=( $(compgen -g -- "${cur#*:}") ) + else + COMPREPLY=( $(compgen -u -S : -- "$cur") ) + __docker_nospace + fi +} + +_docker_docker() { + # global options that may appear after the docker command + local boolean_options=" + $global_boolean_options + --help + --version -v + " + + case "$prev" in + --config) + _filedir -d + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + $(__docker_to_extglob "$global_options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag "$(__docker_to_extglob "$global_options_with_args")" ) + if [ $cword -eq $counter ]; then + __docker_daemon_is_experimental && commands+=(${experimental_commands[*]}) + COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_attach() { + _docker_container_attach +} + +_docker_build() { + _docker_image_build +} + + +_docker_checkpoint() { + local subcommands=" + create + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_checkpoint_create() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help --leave-running" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_checkpoint_ls() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_checkpoint_rm() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + elif [ $cword -eq $(($counter + 1)) ]; then + COMPREPLY=( $( compgen -W "$(__docker_q checkpoint ls "$prev" | sed 1d)" -- "$cur" ) ) + fi + ;; + esac +} + + +_docker_container() { + local subcommands=" + attach + commit + cp + create + diff + exec + export + inspect + kill + logs + ls + pause + port + prune + rename + restart + rm + run + start + stats + stop + top + unpause + update + wait + " + local aliases=" + list + ps + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_container_attach() { + __docker_complete_detach_keys && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--detach-keys') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_commit() { + case "$prev" in + --author|-a|--change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause=false -p=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') + + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_container_cp() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + case "$cur" in + *:) + return + ;; + *) + # combined container and filename completion + _filedir + local files=( ${COMPREPLY[@]} ) + + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + local containers=( ${COMPREPLY[@]} ) + + COMPREPLY=( $( compgen -W "${files[*]} ${containers[*]}" -- "$cur" ) ) + if [[ "$COMPREPLY" == *: ]]; then + __docker_nospace + fi + return + ;; + esac + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + if [ -e "$prev" ]; then + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + else + _filedir + fi + return + fi + ;; + esac +} + +_docker_container_create() { + _docker_container_run_and_create +} + +_docker_container_diff() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_exec() { + __docker_complete_detach_keys && return + + case "$prev" in + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach -d --detach-keys --env -e --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_export() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_inspect() { + _docker_inspect --type container +} + +_docker_container_kill() { + case "$prev" in + --signal|-s) + __docker_complete_signals + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--details --follow -f --help --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_list() { + _docker_container_ls +} + +_docker_container_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + ancestor) + cur="${cur##*=}" + __docker_complete_images + return + ;; + before) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + expose|publish) + return + ;; + id) + __docker_complete_containers_all --cur "${cur##*=}" --id + return + ;; + health) + COMPREPLY=( $( compgen -W "healthy starting none unhealthy" -- "${cur##*=}" ) ) + return + ;; + is-task) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_containers_all --cur "${cur##*=}" --name + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + since) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + status) + COMPREPLY=( $( compgen -W "created dead exited paused restarting running removing" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "ancestor before exited expose health id is-task label name network publish since status volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --format|--last|-n) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --format --help --last -n --latest -l --no-trunc --quiet -q --size -s" -- "$cur" ) ) + ;; + esac +} + +_docker_container_pause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_port() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_prune() { + case "$prev" in + --filter) + COMPREPLY=( $( compgen -W "until" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --filter --help" -- "$cur" ) ) + ;; + esac +} + +_docker_container_ps() { + _docker_container_ls +} + +_docker_container_rename() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_restart() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) + ;; + *) + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --force|-f) + __docker_complete_containers_all + return + ;; + esac + done + __docker_complete_containers_removable + ;; + esac +} + +_docker_container_run() { + _docker_container_run_and_create +} + +# _docker_container_run_and_create is the combined completion for `_docker_container_run` +# and `_docker_container_create` +_docker_container_run_and_create() { + local options_with_args=" + --add-host + --attach -a + --blkio-weight + --blkio-weight-device + --cap-add + --cap-drop + --cgroup-parent + --cidfile + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpuset-cpus + --cpus + --cpuset-mems + --cpu-shares -c + --device + --device-cgroup-rule + --device-read-bps + --device-read-iops + --device-write-bps + --device-write-iops + --dns + --dns-option + --dns-search + --entrypoint + --env -e + --env-file + --expose + --group-add + --health-cmd + --health-interval + --health-retries + --health-start-period + --health-timeout + --hostname -h + --ip + --ip6 + --ipc + --kernel-memory + --label-file + --label -l + --link + --link-local-ip + --log-driver + --log-opt + --mac-address + --memory -m + --memory-swap + --memory-swappiness + --memory-reservation + --mount + --name + --network + --network-alias + --oom-score-adj + --pid + --pids-limit + --publish -p + --restart + --runtime + --security-opt + --shm-size + --stop-signal + --stop-timeout + --storage-opt + --tmpfs + --sysctl + --ulimit + --user -u + --userns + --uts + --volume-driver + --volumes-from + --volume -v + --workdir -w + " + __docker_daemon_os_is windows && options_with_args+=" + --cpu-count + --cpu-percent + --io-maxbandwidth + --io-maxiops + --isolation + " + + local boolean_options=" + --disable-content-trust=false + --help + --init + --interactive -i + --no-healthcheck + --oom-kill-disable + --privileged + --publish-all -P + --read-only + --tty -t + " + + if [ "$command" = "run" -o "$subcommand" = "run" ] ; then + options_with_args="$options_with_args + --detach-keys + " + boolean_options="$boolean_options + --detach -d + --rm + --sig-proxy=false + " + __docker_complete_detach_keys && return + fi + + local all_options="$options_with_args $boolean_options" + + + __docker_complete_log_driver_options && return + __docker_complete_restart && return + + local key=$(__docker_map_key_of_current_option '--security-opt') + case "$key" in + label) + [[ $cur == *: ]] && return + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "${cur##*=}") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + __docker_nospace + fi + return + ;; + seccomp) + local cur=${cur##*=} + _filedir + COMPREPLY+=( $( compgen -W "unconfined" -- "$cur" ) ) + return + ;; + esac + + case "$prev" in + --add-host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --attach|-a) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cap-add) + __docker_complete_capabilities_addable + return + ;; + --cap-drop) + __docker_complete_capabilities_droppable + return + ;; + --cidfile|--env-file|--label-file) + _filedir + return + ;; + --device|--tmpfs|--volume|-v) + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + __docker_nospace + ;; + /*) + _filedir + __docker_nospace + ;; + esac + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --ipc) + case "$cur" in + *:*) + cur="${cur#*:}" + __docker_complete_containers_running + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --isolation) + if __docker_daemon_os_is windows ; then + __docker_complete_isolation + return + fi + ;; + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --pid) + case "$cur" in + *:*) + __docker_complete_containers_running --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --runtime) + __docker_complete_runtimes + return + ;; + --security-opt) + COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp=" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "no-new-privileges" ] ; then + __docker_nospace + fi + return + ;; + --stop-signal) + __docker_complete_signals + return + ;; + --storage-opt) + COMPREPLY=( $( compgen -W "size" -S = -- "$cur") ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + --userns) + COMPREPLY=( $( compgen -W "host" -- "$cur" ) ) + return + ;; + --volume-driver) + __docker_complete_plugins_bundled --type Volume + return + ;; + --volumes-from) + __docker_complete_containers_all + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_container_start() { + __docker_complete_detach_keys && return + + case "$prev" in + --checkpoint) + if [ __docker_daemon_is_experimental ] ; then + return + fi + ;; + --checkpoint-dir) + if [ __docker_daemon_is_experimental ] ; then + _filedir -d + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--attach -a --detach-keys --help --interactive -i" + __docker_daemon_is_experimental && options+=" --checkpoint --checkpoint-dir" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_stopped + ;; + esac +} + +_docker_container_stats() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --format --help --no-stream" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_stop() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_top() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_unpause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_unpauseable + fi + ;; + esac +} + +_docker_container_update() { + local options_with_args=" + --blkio-weight + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpus + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --kernel-memory + --memory -m + --memory-reservation + --memory-swap + --restart + " + + local boolean_options=" + --help + " + + local all_options="$options_with_args $boolean_options" + + __docker_complete_restart && return + + case "$prev" in + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_wait() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + + +_docker_commit() { + _docker_container_commit +} + +_docker_cp() { + _docker_container_cp +} + +_docker_create() { + _docker_container_create +} + +_docker_daemon() { + local boolean_options=" + $global_boolean_options + --disable-legacy-registry + --experimental + --help + --icc=false + --init + --ip-forward=false + --ip-masq=false + --iptables=false + --ipv6 + --live-restore + --raw-logs + --selinux-enabled + --userland-proxy=false + " + local options_with_args=" + $global_options_with_args + --add-runtime + --allow-nondistributable-artifacts + --api-cors-header + --authorization-plugin + --bip + --bridge -b + --cgroup-parent + --cluster-advertise + --cluster-store + --cluster-store-opt + --config-file + --containerd + --data-root + --default-gateway + --default-gateway-v6 + --default-shm-size + --default-ulimit + --dns + --dns-search + --dns-opt + --exec-opt + --exec-root + --fixed-cidr + --fixed-cidr-v6 + --group -G + --init-path + --insecure-registry + --ip + --label + --log-driver + --log-opt + --max-concurrent-downloads + --max-concurrent-uploads + --mtu + --oom-score-adjust + --pidfile -p + --registry-mirror + --seccomp-profile + --shutdown-timeout + --storage-driver -s + --storage-opt + --userland-proxy-path + --userns-remap + " + + __docker_complete_log_driver_options && return + + key=$(__docker_map_key_of_current_option '--cluster-store-opt') + case "$key" in + kv.*file) + cur=${cur##*=} + _filedir + return + ;; + esac + + local key=$(__docker_map_key_of_current_option '--storage-opt') + case "$key" in + dm.blkdiscard|dm.override_udev_sync_check|dm.use_deferred_removal|dm.use_deferred_deletion) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + dm.directlvm_device|dm.thinpooldev) + cur=${cur##*=} + _filedir + return + ;; + dm.fs) + COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --authorization-plugin) + __docker_complete_plugins_bundled --type Authorization + return + ;; + --cluster-store) + COMPREPLY=( $( compgen -W "consul etcd zk" -S "://" -- "$cur" ) ) + __docker_nospace + return + ;; + --cluster-store-opt) + COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --config-file|--containerd|--init-path|--pidfile|-p|--tlscacert|--tlscert|--tlskey|--userland-proxy-path) + _filedir + return + ;; + --exec-root|--data-root) + _filedir -d + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --storage-driver|-s) + COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay overlay2 vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) + return + ;; + --storage-opt) + local btrfs_options="btrfs.min_space" + local devicemapper_options=" + dm.basesize + dm.blkdiscard + dm.blocksize + dm.directlvm_device + dm.fs + dm.loopdatasize + dm.loopmetadatasize + dm.min_free_space + dm.mkfsarg + dm.mountopt + dm.override_udev_sync_check + dm.thinpooldev + dm.thinp_autoextend_percent + dm.thinp_autoextend_threshold + dm.thinp_metapercent + dm.thinp_percent + dm.use_deferred_deletion + dm.use_deferred_removal + " + local zfs_options="zfs.fsname" + + case $(__docker_value_of_option '--storage-driver|-s') in + '') + COMPREPLY=( $( compgen -W "$btrfs_options $devicemapper_options $zfs_options" -S = -- "$cur" ) ) + ;; + btrfs) + COMPREPLY=( $( compgen -W "$btrfs_options" -S = -- "$cur" ) ) + ;; + devicemapper) + COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) ) + ;; + zfs) + COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + __docker_nospace + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --seccomp-profile) + _filedir json + return + ;; + --userns-remap) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + esac +} + +_docker_deploy() { + __docker_daemon_is_experimental && _docker_stack_deploy +} + +_docker_diff() { + _docker_container_diff +} + +_docker_events() { + _docker_system_events +} + +_docker_exec() { + _docker_container_exec +} + +_docker_export() { + _docker_container_export +} + +_docker_help() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) + fi +} + +_docker_history() { + _docker_image_history +} + + +_docker_image() { + local subcommands=" + build + history + import + inspect + load + ls + prune + pull + push + rm + save + tag + " + local aliases=" + images + list + remove + rmi + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_image_build() { + local options_with_args=" + --add-host + --build-arg + --cache-from + --cgroup-parent + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --cpu-period + --cpu-quota + --file -f + --label + --memory -m + --memory-swap + --network + --shm-size + --tag -t + --ulimit + " + __docker_daemon_os_is windows && options_with_args+=" + --isolation + " + + local boolean_options=" + --compress + --disable-content-trust=false + --force-rm + --help + --no-cache + --pull + --quiet -q + --rm + " + __docker_daemon_is_experimental && boolean_options+="--squash" + + local all_options="$options_with_args $boolean_options" + + case "$prev" in + --add-host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --build-arg) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --cache-from) + __docker_complete_image_repos_and_tags + return + ;; + --file|-f) + _filedir + return + ;; + --isolation) + if __docker_daemon_os_is windows ; then + __docker_complete_isolation + return + fi + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --tag|-t) + __docker_complete_image_repos_and_tags + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + _filedir -d + fi + ;; + esac +} + +_docker_image_history() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format --help --human=false -H=false --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_image_images() { + _docker_image_ls +} + +_docker_image_import() { + case "$prev" in + --change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m') + if [ $cword -eq $counter ]; then + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_image_inspect() { + _docker_inspect --type image +} + +_docker_image_load() { + case "$prev" in + --input|-i) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --input -i --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_image_list() { + _docker_image_ls +} + +_docker_image_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + before|since|reference) + cur="${cur##*=}" + __docker_complete_images + return + ;; + dangling) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + label) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "before dangling label reference since" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + =) + return + ;; + *) + __docker_complete_image_repos + ;; + esac +} + +_docker_image_prune() { + case "$prev" in + --filter) + COMPREPLY=( $( compgen -W "until" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --filter --help" -- "$cur" ) ) + ;; + esac +} + +_docker_image_pull() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all-tags -a --disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --all-tags|-a) + __docker_complete_image_repos + return + ;; + esac + done + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_remove() { + _docker_image_rm +} + +_docker_image_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_rmi() { + _docker_image_rm +} + +_docker_image_save() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_tag() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + + +_docker_images() { + _docker_image_ls +} + +_docker_import() { + _docker_image_import +} + +_docker_info() { + _docker_system_info +} + +_docker_inspect() { + local preselected_type + local type + + if [ "$1" = "--type" ] ; then + preselected_type=yes + type="$2" + else + type=$(__docker_value_of_option --type) + fi + + case "$prev" in + --format|-f) + return + ;; + --type) + if [ -z "$preselected_type" ] ; then + COMPREPLY=( $( compgen -W "container image network node plugin secret service volume" -- "$cur" ) ) + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--format -f --help --size -s" + if [ -z "$preselected_type" ] ; then + options+=" --type" + fi + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + case "$type" in + '') + COMPREPLY=( $( compgen -W " + $(__docker_containers --all) + $(__docker_images) + $(__docker_networks) + $(__docker_nodes) + $(__docker_plugins_installed) + $(__docker_secrets) + $(__docker_services) + $(__docker_volumes) + " -- "$cur" ) ) + ;; + container) + __docker_complete_containers_all + ;; + image) + __docker_complete_images + ;; + network) + __docker_complete_networks + ;; + node) + __docker_complete_nodes + ;; + plugin) + __docker_complete_plugins_installed + ;; + secret) + __docker_complete_secrets + ;; + service) + __docker_complete_services + ;; + volume) + __docker_complete_volumes + ;; + esac + esac +} + +_docker_kill() { + _docker_container_kill +} + +_docker_load() { + _docker_image_load +} + +_docker_login() { + case "$prev" in + --password|-p|--username|-u) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --password -p --username -u" -- "$cur" ) ) + ;; + esac +} + +_docker_logout() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_logs() { + _docker_container_logs +} + +_docker_network_connect() { + local options_with_args=" + --alias + --ip + --ip6 + --link + --link-local-ip + " + + local boolean_options=" + --help + " + + case "$prev" in + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_network_create() { + case "$prev" in + --aux-address|--gateway|--internal|--ip-range|--ipam-opt|--ipv6|--opt|-o|--subnet) + return + ;; + --ipam-driver) + COMPREPLY=( $( compgen -W "default" -- "$cur" ) ) + return + ;; + --driver|-d) + # remove drivers that allow one instance only, add drivers missing in `docker info` + __docker_complete_plugins_bundled --type Network --remove host --remove null --add macvlan + return + ;; + --label) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--attachable --aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --ipv6 --label --opt -o --subnet" -- "$cur" ) ) + ;; + esac +} + +_docker_network_disconnect() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_in_network "$prev" + fi + ;; + esac +} + +_docker_network_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --verbose" -- "$cur" ) ) + ;; + *) + __docker_complete_networks + esac +} + +_docker_network_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Network --add macvlan + return + ;; + id) + __docker_complete_networks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_networks --cur "${cur##*=}" --name + return + ;; + scope) + COMPREPLY=( $( compgen -W "global local swarm" -- "${cur##*=}" ) ) + return + ;; + type) + COMPREPLY=( $( compgen -W "builtin custom" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "driver id label name scope type" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_network_prune() { + case "$prev" in + --filter) + COMPREPLY=( $( compgen -W "until" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --filter --help" -- "$cur" ) ) + ;; + esac +} + +_docker_network_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_networks --filter type=custom + esac +} + +_docker_network() { + local subcommands=" + connect + create + disconnect + inspect + ls + prune + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service() { + local subcommands=" + create + inspect + logs + ls + rm + scale + ps + update + " + + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service_create() { + _docker_service_update_and_create +} + +_docker_service_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--follow -f --help --no-resolve --no-task-ids --no-trunc --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_services_and_tasks + fi + ;; + esac +} + +_docker_service_list() { + _docker_service_ls +} + +_docker_service_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + mode) + COMPREPLY=( $( compgen -W "global replicated" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label mode name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_service_remove() { + _docker_service_rm +} + +_docker_service_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_scale() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + __docker_append_to_completions "=" + __docker_nospace + ;; + esac +} + +_docker_service_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running shutdown" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + node) + __docker_complete_nodes --cur "${cur##*=}" --add self + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id name node" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + ;; + esac +} + +_docker_service_update() { + _docker_service_update_and_create +} + +# _docker_service_update_and_create is the combined completion for `docker service create` +# and `docker service update` +_docker_service_update_and_create() { + local options_with_args=" + --endpoint-mode + --env -e + --force + --health-cmd + --health-interval + --health-retries + --health-start-period + --health-timeout + --hostname + --label -l + --limit-cpu + --limit-memory + --log-driver + --log-opt + --mount + --network + --replicas + --reserve-cpu + --reserve-memory + --restart-condition + --restart-delay + --restart-max-attempts + --restart-window + --rollback-delay + --rollback-failure-action + --rollback-max-failure-ratio + --rollback-monitor + --rollback-parallelism + --stop-grace-period + --stop-signal + --update-delay + --update-failure-action + --update-max-failure-ratio + --update-monitor + --update-parallelism + --user -u + --workdir -w + " + + local boolean_options=" + --help + --no-healthcheck + --read-only + --tty -t + --with-registry-auth + " + + __docker_complete_log_driver_options && return + + if [ "$subcommand" = "create" ] ; then + options_with_args="$options_with_args + --constraint + --container-label + --dns + --dns-option + --dns-search + --env-file + --group + --host + --mode + --name + --placement-pref + --publish -p + --secret + " + + case "$prev" in + --env-file) + _filedir + return + ;; + --group) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --mode) + COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) ) + return + ;; + --placement-pref) + COMPREPLY=( $( compgen -W "spread" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --secret) + __docker_complete_secrets + return + ;; + esac + fi + if [ "$subcommand" = "update" ] ; then + options_with_args="$options_with_args + --arg + --constraint-add + --constraint-rm + --container-label-add + --container-label-rm + --dns-add + --dns-option-add + --dns-option-rm + --dns-rm + --dns-search-add + --dns-search-rm + --group-add + --group-rm + --host-add + --host-rm + --image + --placement-pref-add + --placement-pref-rm + --publish-add + --publish-rm + --rollback + --secret-add + --secret-rm + " + + case "$prev" in + --group-add|--group-rm) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --host-add|--host-rm) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --image) + __docker_complete_image_repos_and_tags + return + ;; + --placement-pref-add|--placement-pref-rm) + COMPREPLY=( $( compgen -W "spread" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --secret-add|--secret-rm) + __docker_complete_secrets + return + ;; + esac + fi + + local strategy=$(__docker_map_key_of_current_option '--placement-pref|--placement-pref-add|--placement-pref-rm') + case "$strategy" in + spread) + COMPREPLY=( $( compgen -W "engine.labels node.labels" -S . -- "${cur##*=}" ) ) + __docker_nospace + return + ;; + esac + + case "$prev" in + --endpoint-mode) + COMPREPLY=( $( compgen -W "dnsrr vip" -- "$cur" ) ) + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + __docker_complete_networks + return + ;; + --restart-condition) + COMPREPLY=( $( compgen -W "any none on-failure" -- "$cur" ) ) + return + ;; + --rollback-failure-action) + COMPREPLY=( $( compgen -W "continue pause" -- "$cur" ) ) + return + ;; + --stop-signal) + __docker_complete_signals + return + ;; + --update-failure-action) + COMPREPLY=( $( compgen -W "continue pause rollback" -- "$cur" ) ) + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ "$subcommand" = "update" ] ; then + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + else + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + fi + ;; + esac +} + +_docker_swarm() { + local subcommands=" + init + join + join-token + leave + unlock + unlock-key + update + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_init() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --data-path-addr --autolock --availability --cert-expiry --dispatcher-heartbeat --external-ca --force-new-cluster --help --listen-addr --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_join() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --token) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --data-path-addr --availability --help --listen-addr --token" -- "$cur" ) ) + ;; + *:) + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + ;; + esac +} + +_docker_swarm_join_token() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag ) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_swarm_leave() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock_key() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_update() { + case "$prev" in + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--autolock --cert-expiry --dispatcher-heartbeat --external-ca --help --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_node() { + local subcommands=" + demote + inspect + ls + promote + rm + ps + update + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_node_demote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=manager + esac +} + +_docker_node_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --add self + esac +} + +_docker_node_list() { + _docker_node_ls +} + +_docker_node_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_nodes --cur "${cur##*=}" --id + return + ;; + membership) + COMPREPLY=( $( compgen -W "accepted pending" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_nodes --cur "${cur##*=}" --name + return + ;; + role) + COMPREPLY=( $( compgen -W "manager worker" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label membership name role" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_node_promote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=worker + esac +} + +_docker_node_remove() { + _docker_node_rm +} + +_docker_node_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes + esac +} + +_docker_node_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running shutdown" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --add self + ;; + esac +} + +_docker_node_update() { + case "$prev" in + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --role) + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + return + ;; + --label-add|--label-rm) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--availability --help --label-add --label-rm --role" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--availability|--label-add|--label-rm|--role') + if [ $cword -eq $counter ]; then + __docker_complete_nodes + fi + ;; + esac +} + +_docker_pause() { + _docker_container_pause +} + +_docker_plugin() { + local subcommands=" + create + disable + enable + inspect + install + ls + push + rm + set + upgrade + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_create() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--compress --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + # reponame + return + elif [ $cword -eq $((counter + 1)) ]; then + _filedir -d + fi + ;; + esac +} + +_docker_plugin_disable() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed --filter enabled=true + fi + ;; + esac +} + +_docker_plugin_enable() { + case "$prev" in + --timeout) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --timeout" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--timeout') + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed --filter enabled=false + fi + ;; + esac +} + +_docker_plugin_inspect() { + case "$prev" in + --format|f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_install() { + case "$prev" in + --alias) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--alias --disable --disable-content-trust=false --grant-all-permissions --help" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_list() { + _docker_plugin_ls +} + +_docker_plugin_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + capability) + COMPREPLY=( $( compgen -W "authz ipamdriver networkdriver volumedriver" -- "${cur##*=}" ) ) + return + ;; + enabled) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "capability enabled" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_remove() { + _docker_plugin_rm +} + +_docker_plugin_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_set() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_upgrade() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--disable-content-trust --grant-all-permissions --help --skip-remote-check" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + __ltrim_colon_completions "$cur" + elif [ $cword -eq $((counter + 1)) ]; then + local plugin_images="$(__docker_plugins_installed)" + COMPREPLY=( $(compgen -S : -W "${plugin_images%:*}" -- "$cur") ) + __docker_nospace + fi + ;; + esac +} + + +_docker_port() { + _docker_container_port +} + +_docker_ps() { + _docker_container_ls +} + +_docker_pull() { + _docker_image_pull +} + +_docker_push() { + _docker_image_push +} + +_docker_rename() { + _docker_container_rename +} + +_docker_restart() { + _docker_container_restart +} + +_docker_rm() { + _docker_container_rm +} + +_docker_rmi() { + _docker_image_rm +} + +_docker_run() { + _docker_container_run +} + +_docker_save() { + _docker_image_save +} + + +_docker_secret() { + local subcommands=" + create + inspect + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_create() { + case "$prev" in + --label|-l) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --label -l" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_list() { + _docker_secret_ls +} + +_docker_secret_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_secrets --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_secrets --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id label name" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format --filter -f --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_remove() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_rm() { + _docker_secret_remove +} + + + +_docker_search() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + is-automated) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + is-official) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "is-automated is-official stars" -- "$cur" ) ) + __docker_nospace + return + ;; + --limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter --help --limit --no-trunc" -- "$cur" ) ) + ;; + esac +} + + +_docker_stack() { + local subcommands=" + deploy + ls + ps + rm + services + " + local aliases=" + down + list + remove + up + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_deploy() { + case "$prev" in + --bundle-file) + if __docker_daemon_is_experimental ; then + _filedir dab + return + fi + ;; + --compose-file|-c) + _filedir yml + return + ;; + esac + + case "$cur" in + -*) + local options="--compose-file -c --help --prune --with-registry-auth" + __docker_daemon_is_experimental && options+=" --bundle-file" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_down() { + _docker_stack_rm +} + +_docker_stack_list() { + _docker_stack_ls +} + +_docker_stack_ls() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format --help" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running shutdown" -- "${cur##*=}" ) ) + return + ;; + id) + __docker_complete_stacks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_stacks --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id name desired-state" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --format --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_remove() { + _docker_stack_rm +} + +_docker_stack_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_stacks + esac +} + +_docker_stack_services() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + label) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id label name" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f|--format') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_up() { + _docker_stack_deploy +} + + +_docker_start() { + _docker_container_start +} + +_docker_stats() { + _docker_container_stats +} + +_docker_stop() { + _docker_container_stop +} + + +_docker_system() { + local subcommands=" + df + events + info + prune + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_system_df() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format --help --verbose -v" -- "$cur" ) ) + ;; + esac +} + +_docker_system_events() { + local key=$(__docker_map_key_of_current_option '-f|--filter') + case "$key" in + container) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + daemon) + local name=$(__docker_q info | sed -n 's/^\(ID\|Name\): //p') + COMPREPLY=( $( compgen -W "$name" -- "${cur##*=}" ) ) + return + ;; + event) + COMPREPLY=( $( compgen -W " + attach + commit + connect + copy + create + delete + destroy + detach + die + disconnect + exec_create + exec_detach + exec_start + export + health_status + import + kill + load + mount + oom + pause + pull + push + reload + rename + resize + restart + save + start + stop + tag + top + unmount + unpause + untag + update + " -- "${cur##*=}" ) ) + return + ;; + image) + cur="${cur##*=}" + __docker_complete_images + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + type) + COMPREPLY=( $( compgen -W "container daemon image network volume" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --since|--until) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --since --until --format" -- "$cur" ) ) + ;; + esac +} + +_docker_system_info() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_system_prune() { + case "$prev" in + --filter) + COMPREPLY=( $( compgen -W "until" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --filter --help" -- "$cur" ) ) + ;; + esac +} + + +_docker_tag() { + _docker_image_tag +} + +_docker_unpause() { + _docker_container_unpause +} + +_docker_update() { + _docker_container_update +} + +_docker_top() { + _docker_container_top +} + +_docker_version() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_create() { + case "$prev" in + --driver|-d) + __docker_complete_plugins_bundled --type Volume + return + ;; + --label|--opt|-o) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--driver -d --help --label --opt -o" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume_list() { + _docker_volume_ls +} + +_docker_volume_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + dangling) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Volume + return + ;; + name) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "dangling driver label name" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_remove() { + _docker_volume_rm +} + +_docker_volume_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume() { + local subcommands=" + create + inspect + ls + prune + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_wait() { + _docker_container_wait +} + +_docker() { + local previous_extglob_setting=$(shopt -p extglob) + shopt -s extglob + + local management_commands=( + container + image + network + node + plugin + secret + service + stack + system + volume + ) + + local top_level_commands=( + build + login + logout + run + search + version + ) + + local legacy_commands=( + attach + commit + cp + create + diff + events + exec + export + history + images + import + info + inspect + kill + load + logs + pause + port + ps + pull + push + rename + restart + rm + rmi + save + start + stats + stop + swarm + tag + top + unpause + update + wait + ) + + local experimental_commands=( + checkpoint + deploy + ) + + local commands=(${management_commands[*]} ${top_level_commands[*]}) + [ -z "$DOCKER_HIDE_LEGACY_COMMANDS" ] && commands+=(${legacy_commands[*]}) + + # These options are valid as global options for all client commands + # and valid as command options for `docker daemon` + local global_boolean_options=" + --debug -D + --tls + --tlsverify + " + local global_options_with_args=" + --config + --host -H + --log-level -l + --tlscacert + --tlscert + --tlskey + " + + local host config daemon_os + + COMPREPLY=() + local cur prev words cword + _get_comp_words_by_ref -n : cur prev words cword + + local command='docker' command_pos=0 subcommand_pos + local counter=1 + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + # save host so that completion can use custom daemon + --host|-H) + (( counter++ )) + host="${words[$counter]}" + ;; + # save config so that completion can use custom configuration directories + --config) + (( counter++ )) + config="${words[$counter]}" + ;; + $(__docker_to_extglob "$global_options_with_args") ) + (( counter++ )) + ;; + -*) + ;; + =) + (( counter++ )) + ;; + *) + command="${words[$counter]}" + command_pos=$counter + break + ;; + esac + (( counter++ )) + done + + local binary="${words[0]}" + if [[ $binary == ?(*/)dockerd ]] ; then + # for the dockerd binary, we reuse completion of `docker daemon`. + # dockerd does not have subcommands and global options. + command=daemon + command_pos=0 + fi + + local completions_func=_docker_${command//-/_} + declare -F $completions_func >/dev/null && $completions_func + + eval "$previous_extglob_setting" + return 0 +} + +eval "$__docker_previous_extglob_setting" +unset __docker_previous_extglob_setting + +complete -F _docker docker docker.exe dockerd dockerd.exe diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish new file mode 100644 index 0000000000..38957b8b77 --- /dev/null +++ b/contrib/completion/fish/docker.fish @@ -0,0 +1,409 @@ +# docker.fish - docker completions for fish shell +# +# This file is generated by gen_docker_fish_completions.py from: +# https://github.com/barnybug/docker-fish-completion +# +# To install the completions: +# mkdir -p ~/.config/fish/completions +# cp docker.fish ~/.config/fish/completions +# +# Completion supported: +# - parameters +# - commands +# - containers +# - images +# - repositories + +function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' + for i in (commandline -opc) + if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats + return 1 + end + end + return 0 +end + +function __fish_print_docker_containers --description 'Print a list of docker containers' -a select + switch $select + case running + docker ps -a --no-trunc --filter status=running --format "{{.ID}}\n{{.Names}}" | tr ',' '\n' + case stopped + docker ps -a --no-trunc --filter status=exited --format "{{.ID}}\n{{.Names}}" | tr ',' '\n' + case all + docker ps -a --no-trunc --format "{{.ID}}\n{{.Names}}" | tr ',' '\n' + end +end + +function __fish_print_docker_images --description 'Print a list of docker images' + docker images --format "{{.Repository}}:{{.Tag}}" | command grep -v '' +end + +function __fish_print_docker_repositories --description 'Print a list of docker repositories' + docker images --format "{{.Repository}}" | command grep -v '' | command sort | command uniq +end + +# common options +complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the Engine API. Default is cors disabled" +complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' +complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" +complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' +complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' +complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' +complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" +complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" +complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' +complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level ("debug", "info", "warn", "error", "fatal")' +complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' +complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' +complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' + +# subcommands +# attach +complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" + +# build +complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the build output and print image ID on success' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' + +# commit +complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" + +# cp +complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders between a container and the local filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' + +# create +complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device-cgroup-rule -d 'Add a rule to the cgroup allowed devices list (e.g. --device-cgroup-rule="c 13:37 rwm")' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g., 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mount -d 'Attach a filesystem mount to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" + +# diff +complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" + +# events +complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l format -d 'Format the output using the given go template' + +# exec +complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" + +# export +complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" + +# history +complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" + +# images +complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" + +# import +complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' +complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' + +# info +complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -l help -d 'Print usage' + +# inspect +complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s s -l size -d 'Display total file sizes if the type is container.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" + +# kill +complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" + +# load +complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' + +# login +complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Log in to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' + +# logout +complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' + +# logs +complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" + +# port +complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" + +# pause +complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" + +# ps +complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' + +# pull +complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" + +# push +complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" + +# rename +complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' + +# restart +complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" + +# rm +complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -a '(__fish_print_docker_containers all)' -d "Container" + +# rmi +complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" + +# run +complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device-cgroup-rule -d 'Add a rule to the cgroup allowed devices list (e.g. --device-cgroup-rule="c 13:37 rwm")' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g., 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mount -d 'Attach a filesystem mount to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l stop-signal -d 'Signal to kill a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l tmpfs -d 'Mount tmpfs on a directory' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" + +# save +complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" + +# search +complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' + +# start +complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" + +# stats +complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" + +# stop +complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" + +# tag +complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' + +# top +complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" + +# unpause +complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' +complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" + +# version +complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -l help -d 'Print usage' + +# wait +complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" diff --git a/contrib/completion/powershell/readme.txt b/contrib/completion/powershell/readme.txt new file mode 100644 index 0000000000..18e1b53c13 --- /dev/null +++ b/contrib/completion/powershell/readme.txt @@ -0,0 +1 @@ +See https://github.com/samneirinck/posh-docker \ No newline at end of file diff --git a/contrib/completion/zsh/REVIEWERS b/contrib/completion/zsh/REVIEWERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/contrib/completion/zsh/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker new file mode 100644 index 0000000000..0860907839 --- /dev/null +++ b/contrib/completion/zsh/_docker @@ -0,0 +1,3014 @@ +#compdef docker dockerd +# +# zsh completion for docker (http://docker.com) +# +# version: 0.3.0 +# github: https://github.com/felixr/docker-zsh-completion +# +# contributors: +# - Felix Riedel +# - Steve Durrheimer +# - Vincent Bernat +# +# license: +# +# Copyright (c) 2013, Felix Riedel +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# Short-option stacking can be enabled with: +# zstyle ':completion:*:*:docker:*' option-stacking yes +# zstyle ':completion:*:*:docker-*:*' option-stacking yes +__docker_arguments() { + if zstyle -t ":completion:${curcontext}:" option-stacking; then + print -- -s + fi +} + +__docker_get_containers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local kind type line s + declare -a running stopped lines args names + + kind=$1; shift + type=$1; shift + [[ $kind = (stopped|all) ]] && args=($args -a) + + lines=(${(f)${:-"$(_call_program commands docker $docker_options ps --format 'table' --no-trunc $args)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 # Last column, should go to the end of the line + lines=(${lines[2,-1]}) + + # Container ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}[0,12]}" + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = (Exit*|Created*) ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + # Names: we only display the one without slash. All other names + # are generated and may clutter the completion. However, with + # Swarm, all names may be prefixed by the swarm node name. + if [[ $type = (names|all) ]]; then + for line in $lines; do + names=(${(ps:,:)${${line[${begin[NAMES]},${end[NAMES]}]}%% *}}) + # First step: find a common prefix and strip it (swarm node case) + (( ${#${(u)names%%/*}} == 1 )) && names=${names#${names[1]%%/*}/} + # Second step: only keep the first name without a / + s=${${names:#*/*}[1]} + # If no name, well give up. + (( $#s != 0 )) || continue + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = (Exit*|Created*) ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0 + [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0 + return ret +} + +__docker_complete_stopped_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers stopped all "$@" +} + +__docker_complete_running_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers running all "$@" +} + +__docker_complete_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all all "$@" +} + +__docker_complete_containers_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all ids "$@" +} + +__docker_complete_containers_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all names "$@" +} + +__docker_complete_info_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + emulate -L zsh + setopt extendedglob + local -a plugins + plugins=(${(ps: :)${(M)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Plugins:}%%$'\n'^ *}}:# $1: *}## $1: }) + _describe -t plugins "$1 plugins" plugins && ret=0 + return ret +} + +__docker_complete_images() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a images + images=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) + _describe -t docker-images "images" images && ret=0 + __docker_complete_repositories_with_tags && ret=0 + return ret +} + +__docker_complete_repositories() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos + repos=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}%% *}[2,-1]}) + repos=(${repos#}) + _describe -t docker-repos "repositories" repos && ret=0 + return ret +} + +__docker_complete_repositories_with_tags() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos onlyrepos matched + declare m + repos=(${${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/ ##/:::}%% *}) + repos=(${${repos%:::}#}) + # Check if we have a prefix-match for the current prefix. + onlyrepos=(${repos%::*}) + for m in $onlyrepos; do + [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && { + # Yes, complete with tags + repos=(${${repos/:::/:}/:/\\:}) + _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0 + return ret + } + done + # No, only complete repositories + onlyrepos=(${${repos%:::*}/:/\\:}) + _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0 + + return ret +} + +__docker_search() { + [[ $PREFIX = -* ]] && return 1 + local cache_policy + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + local searchterm cachename + searchterm="${words[$CURRENT]%/}" + cachename=_docker-search-$searchterm + + local expl + local -a result + if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ + && ! _retrieve_cache ${cachename#_}; then + _message "Searching for ${searchterm}..." + result=(${${${(f)${:-"$(_call_program commands docker $docker_options search $searchterm)"$'\n'}}%% *}[2,-1]}) + _store_cache ${cachename#_} result + fi + _wanted dockersearch expl 'available images' compadd -a result +} + +__docker_get_log_options() { + [[ $PREFIX = -* ]] && return 1 + + integer ret=1 + local log_driver=${opt_args[--log-driver]:-"all"} + local -a common_options awslogs_options fluentd_options gelf_options journald_options json_file_options logentries_options syslog_options splunk_options + + common_options=("max-buffer-size" "mode") + awslogs_options=($common_options "awslogs-region" "awslogs-group" "awslogs-stream" "awslogs-create-group") + fluentd_options=($common_options "env" "fluentd-address" "fluentd-async-connect" "fluentd-buffer-limit" "fluentd-retry-wait" "fluentd-max-retries" "labels" "tag") + gcplogs_options=($common_options "env" "gcp-log-cmd" "gcp-project" "labels") + gelf_options=($common_options "env" "gelf-address" "gelf-compression-level" "gelf-compression-type" "labels" "tag") + journald_options=($common_options "env" "labels" "tag") + json_file_options=($common_options "env" "labels" "max-file" "max-size") + logentries_options=($common_options "logentries-token") + syslog_options=($common_options "env" "labels" "syslog-address" "syslog-facility" "syslog-format" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "tag") + splunk_options=($common_options "env" "labels" "splunk-caname" "splunk-capath" "splunk-format" "splunk-gzip" "splunk-gzip-level" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "splunk-verify-connection" "tag") + + [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0 + [[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0 + [[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0 + [[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0 + [[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0 + [[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0 + [[ $log_driver = (logentries|all) ]] && _describe -t logentries-options "logentries options" logentries_options "$@" && ret=0 + [[ $log_driver = (syslog|all) ]] && _describe -t syslog-options "syslog options" syslog_options "$@" && ret=0 + [[ $log_driver = (splunk|all) ]] && _describe -t splunk-options "splunk options" splunk_options "$@" && ret=0 + + return ret +} + +__docker_complete_log_drivers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + drivers=(awslogs etwlogs fluentd gcplogs gelf journald json-file none splunk syslog) + _describe -t log-drivers "log drivers" drivers && ret=0 + return ret +} + +__docker_complete_log_options() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (syslog-format) + local opts=('rfc3164' 'rfc5424' 'rfc5424micro') + _describe -t syslog-format-opts "syslog format options" opts && ret=0 + ;; + (mode) + local opts=('blocking' 'non-blocking') + _describe -t mode-opts "mode options" opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + __docker_get_log_options -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_detach_keys() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + compset -P "*," + keys=(${:-{a-z}}) + ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}}) + _describe -t detach_keys "[a-z]" keys -qS "," && ret=0 + _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0 +} + +__docker_complete_pid() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local -a opts vopts + + opts=('host') + vopts=('container') + + if compset -P '*:'; then + case "${${words[-1]%:*}#*=}" in + (container) + __docker_complete_running_containers && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t pid-value-opts "PID Options with value" vopts -qS ":" && ret=0 + _describe -t pid-opts "PID Options" opts && ret=0 + fi + + return ret +} + +__docker_complete_runtimes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + emulate -L zsh + setopt extendedglob + local -a runtimes_opts + runtimes_opts=(${(ps: :)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Runtimes: }%%$'\n'^ *}}}) + _describe -t runtimes-opts "runtimes options" runtimes_opts && ret=0 +} + +__docker_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (ancestor) + __docker_complete_images && ret=0 + ;; + (before|since) + __docker_complete_containers && ret=0 + ;; + (health) + health_opts=('healthy' 'none' 'starting' 'unhealthy') + _describe -t health-filter-opts "health filter options" health_opts && ret=0 + ;; + (id) + __docker_complete_containers_ids && ret=0 + ;; + (is-task) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + (name) + __docker_complete_containers_names && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (status) + status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running' 'removing') + _describe -t status-filter-opts "status filter options" status_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('ancestor' 'before' 'exited' 'expose' 'health' 'id' 'label' 'name' 'network' 'publish' 'since' 'status' 'volume') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_search_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('is-automated' 'is-official' 'stars') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (is-automated|is-official) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_images_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('before' 'dangling' 'label' 'reference' 'since') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (before|reference|since) + __docker_complete_images && ret=0 + ;; + (dangling) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_events_filter() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'type' 'volume') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (container) + __docker_complete_containers && ret=0 + ;; + (daemon) + emulate -L zsh + setopt extendedglob + local -a daemon_opts + daemon_opts=( + ${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Name: }%%$'\n'^ *}} + ${${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'ID: }%%$'\n'^ *}}//:/\\:} + ) + _describe -t daemon-filter-opts "daemon filter options" daemon_opts && ret=0 + ;; + (event) + local -a event_opts + event_opts=('attach' 'commit' 'connect' 'copy' 'create' 'delete' 'destroy' 'detach' 'die' 'disconnect' 'exec_create' 'exec_detach' + 'exec_start' 'export' 'health_status' 'import' 'kill' 'load' 'mount' 'oom' 'pause' 'pull' 'push' 'reload' 'rename' 'resize' 'restart' 'save' 'start' + 'stop' 'tag' 'top' 'unmount' 'unpause' 'untag' 'update') + _describe -t event-filter-opts "event filter options" event_opts && ret=0 + ;; + (image) + __docker_complete_images && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (type) + local -a type_opts + type_opts=('container' 'daemon' 'image' 'network' 'volume') + _describe -t type-filter-opts "type filter options" type_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_prune_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('until') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +# BO checkpoint + +__docker_checkpoint_commands() { + local -a _docker_checkpoint_subcommands + _docker_checkpoint_subcommands=( + "create:Create a checkpoint from a running container" + "ls:List checkpoints for a container" + "rm:Remove a checkpoint" + ) + _describe -t docker-checkpoint-commands "docker checkpoint command" _docker_checkpoint_subcommands +} + +__docker_checkpoint_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--checkpoint-dir=[Use a custom checkpoint storage directory]:dir:_directories" \ + "($help)--leave-running[Leave the container running after checkpoint]" \ + "($help -)1:container:__docker_complete_running_containers" \ + "($help -)2:checkpoint: " && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--checkpoint-dir=[Use a custom checkpoint storage directory]:dir:_directories" \ + "($help -)1:container:__docker_complete_containers" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--checkpoint-dir=[Use a custom checkpoint storage directory]:dir:_directories" \ + "($help -)1:container:__docker_complete_containers" \ + "($help -)2:checkpoint: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_checkpoint_commands" && ret=0 + ;; + esac + + return ret +} + +# EO checkpoint + +# BO container + +__docker_container_commands() { + local -a _docker_container_subcommands + _docker_container_subcommands=( + "attach:Attach to a running container" + "commit:Create a new image from a container's changes" + "cp:Copy files/folders between a container and the local filesystem" + "create:Create a new container" + "diff:Inspect changes on a container's filesystem" + "exec:Run a command in a running container" + "export:Export a container's filesystem as a tar archive" + "inspect:Display detailed information on one or more containers" + "kill:Kill one or more running containers" + "logs:Fetch the logs of a container" + "ls:List containers" + "pause:Pause all processes within one or more containers" + "port:List port mappings or a specific mapping for the container" + "prune:Remove all stopped containers" + "rename:Rename a container" + "restart:Restart one or more containers" + "rm:Remove one or more containers" + "run:Run a command in a new container" + "start:Start one or more stopped containers" + "stats:Display a live stream of container(s) resource usage statistics" + "stop:Stop one or more running containers" + "top:Display the running processes of a container" + "unpause:Unpause all processes within one or more containers" + "update:Update configuration of one or more containers" + "wait:Block until one or more containers stop, then print their exit codes" + ) + _describe -t docker-container-commands "docker container command" _docker_container_subcommands +} + +__docker_container_subcommand() { + local -a _command_args opts_help opts_attach_exec_run_start opts_create_run opts_create_run_update + local expl help="--help" + integer ret=1 + + opts_attach_exec_run_start=( + "($help)--detach-keys=[Escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys" + ) + opts_create_run=( + "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)" + "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " + "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: " + "($help)*--cap-add=[Add Linux capabilities]:capability: " + "($help)*--cap-drop=[Drop Linux capabilities]:capability: " + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " + "($help)--cidfile=[Write the container ID to the file]:CID file:_files" + "($help)--cpus=[Number of CPUs (default 0.000)]:cpus: " + "($help)*--device=[Add a host device to the container]:device:_files" + "($help)*--device-cgroup-rule=[Add a rule to the cgroup allowed devices list]:device:cgroup: " + "($help)*--device-read-bps=[Limit the read rate (bytes per second) from a device]:device:IO rate: " + "($help)*--device-read-iops=[Limit the read rate (IO per second) from a device]:device:IO rate: " + "($help)*--device-write-bps=[Limit the write rate (bytes per second) to a device]:device:IO rate: " + "($help)*--device-write-iops=[Limit the write rate (IO per second) to a device]:device:IO rate: " + "($help)--disable-content-trust[Skip image verification]" + "($help)*--dns=[Custom DNS servers]:DNS server: " + "($help)*--dns-option=[Custom DNS options]:DNS option: " + "($help)*--dns-search=[Custom DNS search domains]:DNS domains: " + "($help)*"{-e=,--env=}"[Environment variables]:environment variable: " + "($help)--entrypoint=[Overwrite the default entrypoint of the image]:entry point: " + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" + "($help)*--expose=[Expose a port from the container without publishing it]: " + "($help)*--group=[Set one or more supplementary user groups for the container]:group:_groups" + "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts" + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" + "($help)--init[Run an init inside the container that forwards signals and reaps processes]" + "($help)--ip=[IPv4 address]:IPv4: " + "($help)--ip6=[IPv6 address]:IPv6: " + "($help)--ipc=[IPC namespace to use]:IPC namespace: " + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" + "($help)*--link=[Add link to another container]:link:->link" + "($help)*--link-local-ip=[Container IPv4/IPv6 link-local addresses]:IPv4/IPv6: " + "($help)*"{-l=,--label=}"[Container metadata]:label: " + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_complete_log_options" + "($help)--mac-address=[Container MAC address]:MAC address: " + "($help)*--mount=[Attach a filesystem mount to the container]:mount: " + "($help)--name=[Container name]:name: " + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" + "($help)*--network-alias=[Add network-scoped alias for the container]:alias: " + "($help)--oom-kill-disable[Disable OOM Killer]" + "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]" + "($help)--pids-limit[Tune container pids limit (set -1 for unlimited)]" + "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" + "($help)*"{-p=,--publish=}"[Expose a container's port to the host]:port:_ports" + "($help)--pid=[PID namespace to use]:PID namespace:__docker_complete_pid" + "($help)--privileged[Give extended privileges to this container]" + "($help)--read-only[Mount the container's root filesystem as read only]" + "($help)*--security-opt=[Security options]:security option: " + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " + "($help)--stop-signal=[Signal to kill a container]:signal:_signals" + "($help)--stop-timeout=[Timeout (in seconds) to stop a container]:time: " + "($help)*--sysctl=-[sysctl options]:sysctl: " + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)*--ulimit=[ulimit options]:ulimit: " + "($help)--userns=[Container user namespace]:user namespace:(host)" + "($help)--tmpfs[mount tmpfs]" + "($help)*-v[Bind mount a volume]:volume: " + "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)" + "($help)*--volumes-from=[Mount volumes from the specified container]:volume: " + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + opts_create_run_update=( + "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)" + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " + "($help)--kernel-memory=[Kernel memory limit in bytes]:Memory limit: " + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " + "($help)--memory-reservation=[Memory soft limit]:Memory limit: " + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " + "($help)--restart=[Restart policy]:restart policy:(no on-failure always unless-stopped)" + ) + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help)--no-stdin[Do not attach stdin]" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help -):containers:__docker_complete_running_containers" && ret=0 + ;; + (commit) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --author)"{-a=,--author=}"[Author]:author: " \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message]:message: " \ + "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \ + "($help -):container:__docker_complete_containers" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (cp) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link]" \ + "($help -)1:container:->container" \ + "($help -)2:hostpath:_files" && ret=0 + case $state in + (container) + if compset -P "*:"; then + _files && ret=0 + else + __docker_complete_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (diff) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (exec) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)*"{-e=,--env=}"[Set environment variables]:environment variable: " \ + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ + "($help)--privileged[Give extended Linux capabilities to the command]" \ + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \ + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" \ + "($help -):containers:__docker_complete_running_containers" \ + "($help -)*::command:->anycommand" && ret=0 + case $state in + (anycommand) + shift 1 words + (( CURRENT-- )) + _normal && ret=0 + ;; + esac + ;; + (export) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to a file, instead of stdout]:output file:_files" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (kill) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -s --signal)"{-s=,--signal=}"[Signal to send]:signal:_signals" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (logs) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--details[Show extra details provided to logs]" \ + "($help -f --follow)"{-f,--follow}"[Follow log output]" \ + "($help -s --since)"{-s=,--since=}"[Show logs since this timestamp]:timestamp: " \ + "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ + "($help)--tail=[Output the last K lines]:lines:(1 10 20 50 all)" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers]" \ + "($help)--before=[Show only container created before...]:containers:__docker_complete_containers" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_ps_filters" \ + "($help)--format=[Pretty-print containers using a Go template]:template: " \ + "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \ + "($help -n --last)"{-n=,--last=}"[Show n last created containers (includes all states)]:n:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help)--since=[Show only containers created since...]:containers:__docker_complete_containers" && ret=0 + ;; + (pause|unpause) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (port) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)2:port:_ports" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rename) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):old name:__docker_complete_containers" \ + "($help -):new name: " && ret=0 + ;; + (restart) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_containers_ids" && ret=0 + ;; + (rm) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \ + "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \ + "($help -)*:containers:->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)-f]} == -f || ${words[(r)--force]} == --force ]]; then + __docker_complete_containers && ret=0 + else + __docker_complete_stopped_containers && ret=0 + fi + ;; + esac + ;; + (run) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)--health-cmd=[Command to run to check health]:command: " \ + "($help)--health-interval=[Time between running the check]:time: " \ + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" \ + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " \ + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \ + "($help)--rm[Remove intermediate containers when it exits]" \ + "($help)--runtime=[Name of the runtime to be used for that container]:runtime:__docker_complete_runtimes" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help)--storage-opt=[Storage driver options for the container]:storage options:->storage-opt" \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + (storage-opt) + if compset -P "*="; then + _message "value" && ret=0 + else + opts=('size') + _describe -t filter-opts "storage options" opts -qS "=" && ret=0 + fi + ;; + esac + ;; + (start) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ + "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ + "($help -)*:containers:__docker_complete_stopped_containers" && ret=0 + ;; + (stats) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers (default shows just running)]" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-stream[Disable streaming stats and only pull the first result]" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (stop) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (top) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)*:: :->ps-arguments" && ret=0 + case $state in + (ps-arguments) + _ps && ret=0 + ;; + esac + ;; + (update) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + opts_create_run_update \ + "($help -)*: :->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then + __docker_complete_stopped_containers && ret=0 + else + __docker_complete_containers && ret=0 + fi + ;; + esac + ;; + (wait) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO container + +# BO image + +__docker_image_commands() { + local -a _docker_image_subcommands + _docker_image_subcommands=( + "build:Build an image from a Dockerfile" + "history:Show the history of an image" + "import:Import the contents from a tarball to create a filesystem image" + "inspect:Display detailed information on one or more images" + "load:Load an image from a tar archive or STDIN" + "ls:List images" + "prune:Remove unused images" + "pull:Pull an image or a repository from a registry" + "push:Push an image or a repository to a registry" + "rm:Remove one or more images" + "save:Save one or more images to a tar archive (streamed to STDOUT by default)" + "tag:Tag an image into a repository" + ) + _describe -t docker-image-commands "docker image command" _docker_image_subcommands +} + +__docker_image_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (build) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " \ + "($help)*--build-arg=[Build-time variables]:=: " \ + "($help)*--cache-from=[Images to consider as cache sources]: :__docker_complete_repositories_with_tags" \ + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" \ + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " \ + "($help)--compress[Compress the build context using gzip]" \ + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " \ + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " \ + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " \ + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " \ + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " \ + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \ + "($help)--force-rm[Always remove intermediate containers]" \ + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" \ + "($help)*--label=[Set metadata for an image]:label=value: " \ + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " \ + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " \ + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" \ + "($help)--no-cache[Do not use cache when building the image]" \ + "($help)--pull[Attempt to pull a newer version of the image]" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \ + "($help)--rm[Remove intermediate containers after a successful build]" \ + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " \ + "($help)--squash[Squash newly built layers into a single new layer]" \ + "($help -t --tag)*"{-t=,--tag=}"[Repository, name and tag for the image]: :__docker_complete_repositories_with_tags" \ + "($help)*--ulimit=[ulimit options]:ulimit: " \ + "($help)--userns=[Container user namespace]:user namespace:(host)" \ + "($help -):path or URL:_directories" && ret=0 + ;; + (history) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (import) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message for imported image]:message: " \ + "($help -):URL:(- http:// file://)" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)*:images:__docker_complete_images" && ret=0 + ;; + (load) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -i --input)"{-i=,--input=}"[Read from tar archive file]:archive file:_files -g \"*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)\"" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress the load output]" && ret=0 + ;; + (ls|list) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all images]" \ + "($help)--digests[Show digests]" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_images_filters" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -): :__docker_complete_repositories" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused images, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (pull) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -):name:__docker_search" && ret=0 + ;; + (push) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image signing]" \ + "($help -): :__docker_complete_images" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help)--no-prune[Do not delete untagged parents]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (save) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to file]:file:_files" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (tag) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):source:__docker_complete_images"\ + "($help -):destination:__docker_complete_repositories_with_tags" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO image + +# BO network + +__docker_network_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (driver) + __docker_complete_info_plugins Network && ret=0 + ;; + (id) + __docker_complete_networks_ids && ret=0 + ;; + (name) + __docker_complete_networks_names && ret=0 + ;; + (scope) + opts=('global' 'local' 'swarm') + _describe -t scope-filter-opts "Scope filter options" opts && ret=0 + ;; + (type) + opts=('builtin' 'custom') + _describe -t type-filter-opts "Type filter options" opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('driver' 'id' 'label' 'name' 'scope' 'type') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_get_networks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines networks + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options network ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Network ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + _describe -t networks-list "networks" networks "$@" && ret=0 + return ret +} + +__docker_complete_networks() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks all "$@" +} + +__docker_complete_networks_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks ids "$@" +} + +__docker_complete_networks_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks names "$@" +} + +__docker_network_commands() { + local -a _docker_network_subcommands + _docker_network_subcommands=( + "connect:Connect a container to a network" + "create:Creates a new network with a name specified by the user" + "disconnect:Disconnects a container from a network" + "inspect:Displays detailed information on a network" + "ls:Lists all the networks created by the user" + "prune:Remove all unused networks" + "rm:Deletes one or more networks" + ) + _describe -t docker-network-commands "docker network command" _docker_network_subcommands +} + +__docker_network_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (connect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--alias=[Add network-scoped alias for the container]:alias: " \ + "($help)--ip=[IPv4 address]:IPv4: " \ + "($help)--ip6=[IPv6 address]:IPv6: " \ + "($help)*--link=[Add a link to another container]:link:->link" \ + "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--attachable[Enable manual container attachment]" \ + "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \ + "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ + "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \ + "($help)--internal[Restricts external access to the network]" \ + "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ + "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ + "($help)*--ipam-opt=[Custom IPAM plugin options]:opt=value: " \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help)*--label=[Set metadata on a network]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:opt=value: " \ + "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ + "($help -)1:Network Name: " && ret=0 + ;; + (disconnect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--verbose[Show detailed information]" \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--no-trunc[Do not truncate the output]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_network_complete_ls_filters" \ + "($help)--format=[Pretty-print networks using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display numeric IDs]" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO network + +# BO node + +__docker_node_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_nodes_ids && ret=0 + ;; + (membership) + membership_opts=('accepted' 'pending' 'rejected') + _describe -t membership-opts "membership options" membership_opts && ret=0 + ;; + (name) + __docker_complete_nodes_names && ret=0 + ;; + (role) + role_opts=('manager' 'worker') + _describe -t role-opts "role options" role_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'membership' 'name' 'role') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_node_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running' 'shutdown') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_nodes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines nodes args + + type=$1; shift + filter=$1; shift + [[ $filter != "none" ]] && args=("-f $filter") + + lines=(${(f)${:-"$(_call_program commands docker $docker_options node ls $args)"$'\n'}}) + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Node ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + nodes=($nodes $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + nodes=($nodes $s) + done + fi + + _describe -t nodes-list "nodes" nodes "$@" && ret=0 + return ret +} + +__docker_complete_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all none "$@" +} + +__docker_complete_nodes_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes ids none "$@" +} + +__docker_complete_nodes_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes names none "$@" +} + +__docker_complete_pending_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "membership=pending" "$@" +} + +__docker_complete_manager_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=manager" "$@" +} + +__docker_complete_worker_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=worker" "$@" +} + +__docker_node_commands() { + local -a _docker_node_subcommands + _docker_node_subcommands=( + "demote:Demote a node as manager in the swarm" + "inspect:Display detailed information on one or more nodes" + "ls:List nodes in the swarm" + "promote:Promote a node as manager in the swarm" + "rm:Remove one or more nodes from the swarm" + "ps:List tasks running on one or more nodes, defaults to current node" + "update:Update a node" + ) + _describe -t docker-node-commands "docker node command" _docker_node_subcommands +} + +__docker_node_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force remove a node from the swarm]" \ + "($help -)*:node:__docker_complete_pending_nodes" && ret=0 + ;; + (demote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_manager_nodes" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_node_complete_ls_filters" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (promote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_worker_nodes" && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all instances]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_node_complete_ps_filters" \ + "($help)--format=[Format the output using the given go template]:template: " \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--availability=[Availability of the node]:availability:(active pause drain)" \ + "($help)*--label-add=[Add or update a node label]:key=value: " \ + "($help)*--label-rm=[Remove a node label if exists]:label: " \ + "($help)--role=[Role of the node]:role:(manager worker)" \ + "($help -)1:node:__docker_complete_nodes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_node_commands" && ret=0 + ;; + esac + + return ret +} + +# EO node + +# BO plugin + +__docker_plugin_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (capability) + opts=('authz' 'ipamdriver' 'networkdriver' 'volumedriver') + _describe -t capability-opts "capability options" opts && ret=0 + ;; + (enabled) + opts=('false' 'true') + _describe -t enabled-opts "enabled options" opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('capability' 'enabled') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines plugins args + + filter=$1; shift + [[ $filter != "none" ]] && args=("-f $filter") + + lines=(${(f)${:-"$(_call_program commands docker $docker_options plugin ls $args)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Name + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[TAG]},${end[TAG]}]}%% ##}}" + plugins=($plugins $s) + done + + _describe -t plugins-list "plugins" plugins "$@" && ret=0 + return ret +} + +__docker_complete_plugins() { + [[ $PREFIX = -* ]] && return 1 + __docker_plugins none "$@" +} + +__docker_complete_enabled_plugins() { + [[ $PREFIX = -* ]] && return 1 + __docker_plugins enabled=true "$@" +} + +__docker_complete_disabled_plugins() { + [[ $PREFIX = -* ]] && return 1 + __docker_plugins enabled=false "$@" +} + +__docker_plugin_commands() { + local -a _docker_plugin_subcommands + _docker_plugin_subcommands=( + "disable:Disable a plugin" + "enable:Enable a plugin" + "inspect:Return low-level information about a plugin" + "install:Install a plugin" + "ls:List plugins" + "push:Push a plugin" + "rm:Remove a plugin" + "set:Change settings for a plugin" + "upgrade:Upgrade an existing plugin" + ) + _describe -t docker-plugin-commands "docker plugin command" _docker_plugin_subcommands +} + +__docker_plugin_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (disable) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the disable of an active plugin]" \ + "($help -)1:plugin:__docker_complete_enabled_plugins" && ret=0 + ;; + (enable) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--timeout=[HTTP client timeout (in seconds)]:timeout: " \ + "($help -)1:plugin:__docker_complete_disabled_plugins" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \ + "($help -)*:plugin:__docker_complete_plugins" && ret=0 + ;; + (install) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--alias=[Local name for plugin]:alias: " \ + "($help)--disable[Do not enable the plugin on install]" \ + "($help)--disable-content-trust[Skip image verification (default true)]" \ + "($help)--grant-all-permissions[Grant all permissions necessary to run the plugin]" \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help -)*:key=value: " && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_plugin_complete_ls_filters" \ + "($help --format)--format=[Format the output using the given Go template]:template: " \ + "($help)--no-trunc[Don't truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (push) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image verification (default true)]" \ + "($help -)1:plugin:__docker_complete_plugins" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the removal of an active plugin]" \ + "($help -)*:plugin:__docker_complete_plugins" && ret=0 + ;; + (set) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help -)*:key=value: " && ret=0 + ;; + (upgrade) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image verification (default true)]" \ + "($help)--grant-all-permissions[Grant all permissions necessary to run the plugin]" \ + "($help)--skip-remote-check[Do not check if specified remote plugin matches existing plugin image]" \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help -):remote: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_plugin_commands" && ret=0 + ;; + esac + + return ret +} + +# EO plugin + +# BO secret + +__docker_secrets() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines secrets + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options secret ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + secrets=($secrets $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + secrets=($secrets $s) + done + fi + + _describe -t secrets-list "secrets" secrets "$@" && ret=0 + return ret +} + +__docker_complete_secrets() { + [[ $PREFIX = -* ]] && return 1 + __docker_secrets all "$@" +} + +__docker_secret_commands() { + local -a _docker_secret_subcommands + _docker_secret_subcommands=( + "create:Create a secret using stdin as content" + "inspect:Display detailed information on one or more secrets" + "ls:List secrets" + "rm:Remove one or more secrets" + ) + _describe -t docker-secret-commands "docker secret command" _docker_secret_subcommands +} + +__docker_secret_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-l=,--label=}"[Secret labels]:label: " \ + "($help -):secret: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--format=[Format the output using the given go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_secret_commands" && ret=0 + ;; + esac + + return ret +} + +# EO secret + +# BO service + +__docker_service_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_services_ids && ret=0 + ;; + (mode) + opts=('global' 'replicated') + _describe -t mode-opts "mode options" opts && ret=0 + ;; + (name) + __docker_complete_services_names && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'mode' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_service_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running' 'shutdown') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_service_complete_placement_pref() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (spread) + opts=('engine.labels' 'node.labels') + _describe -t spread-opts "spread options" opts -qS "." && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('spread') + _describe -t pref-opts "placement pref options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_services() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines services + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options service ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + _describe -t services-list "services" services "$@" && ret=0 + return ret +} + +__docker_complete_services() { + [[ $PREFIX = -* ]] && return 1 + __docker_services all "$@" +} + +__docker_complete_services_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_services ids "$@" +} + +__docker_complete_services_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_services names "$@" +} + +__docker_service_commands() { + local -a _docker_service_subcommands + _docker_service_subcommands=( + "create:Create a new service" + "inspect:Display detailed information on one or more services" + "logs:Fetch the logs of a service or task" + "ls:List services" + "rm:Remove one or more services" + "scale:Scale one or multiple replicated services" + "ps:List the tasks of a service" + "update:Update a service" + ) + _describe -t docker-service-commands "docker service command" _docker_service_subcommands +} + +__docker_service_subcommand() { + local -a _command_args opts_help opts_create_update + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + opts_create_update=( + "($help)*--constraint=[Placement constraints]:constraint: " + "($help)--endpoint-mode=[Placement constraints]:mode:(dnsrr vip)" + "($help)*"{-e=,--env=}"[Set environment variables]:env: " + "($help)--health-cmd=[Command to run to check health]:command: " + "($help)--health-interval=[Time between running the check]:time: " + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " + "($help)--hostname=[Service container hostname]:hostname: " \ + "($help)*--label=[Service labels]:label: " + "($help)--limit-cpu=[Limit CPUs]:value: " + "($help)--limit-memory=[Limit Memory]:value: " + "($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options" + "($help)*--mount=[Attach a filesystem mount to the service]:mount: " + "($help)*--network=[Network attachments]:network: " + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" + "($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: " + "($help)--read-only[Mount the container's root filesystem as read only]" + "($help)--replicas=[Number of tasks]:replicas: " + "($help)--reserve-cpu=[Reserve CPUs]:value: " + "($help)--reserve-memory=[Reserve Memory]:value: " + "($help)--restart-condition=[Restart when condition is met]:mode:(any none on-failure)" + "($help)--restart-delay=[Delay between restart attempts]:delay: " + "($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: " + "($help)--restart-window=[Window used to evaluate the restart policy]:duration: " + "($help)--rollback-delay=[Delay between task rollbacks]:duration: " + "($help)--rollback-failure-action=[Action on rollback failure]:action:(continue pause)" + "($help)--rollback-max-failure-ratio=[Failure rate to tolerate during a rollback]:failure rate: " + "($help)--rollback-monitor=[Duration after each task rollback to monitor for failure]:duration: " + "($help)--rollback-parallelism=[Maximum number of tasks rolled back simultaneously]:number: " + "($help)*--secret=[Specify secrets to expose to the service]:secret:__docker_complete_secrets" + "($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: " + "($help)--stop-signal=[Signal to stop the container]:signal:_signals" + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-TTY]" + "($help)--update-delay=[Delay between updates]:delay: " + "($help)--update-failure-action=[Action on update failure]:mode:(continue pause rollback)" + "($help)--update-max-failure-ratio=[Failure rate to tolerate during an update]:fraction: " + "($help)--update-monitor=[Duration after each task update to monitor for failure]:window: " + "($help)--update-parallelism=[Maximum number of tasks updated simultaneously]:number: " + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)--with-registry-auth[Send registry authentication details to swarm agents]" + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)*--container-label=[Container labels]:label: " \ + "($help)*--dns=[Set custom DNS servers]:DNS: " \ + "($help)*--dns-option=[Set DNS options]:DNS option: " \ + "($help)*--dns-search=[Set custom DNS search domains]:DNS search: " \ + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" \ + "($help)--mode=[Service Mode]:mode:(global replicated)" \ + "($help)--name=[Service name]:name: " \ + "($help)*--placement-pref=[Add a placement preference]:pref:__docker_service_complete_placement_pref" \ + "($help)*--publish=[Publish a port]:port: " \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (logs) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --follow)"{-f,--follow}"[Follow log output]" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-task-ids[Do not include task IDs]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help)--since=[Show logs since timestamp]:timestamp: " \ + "($help)--tail=[Number of lines to show from the end of the logs]:lines:(1 10 20 50 all)" \ + "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_service_complete_ls_filters" \ + "($help)--format=[Pretty-print services using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (scale) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:->values" && ret=0 + case $state in + (values) + if compset -P '*='; then + _message 'replicas' && ret=0 + else + __docker_complete_services -qS "=" + fi + ;; + esac + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_service_complete_ps_filters" \ + "($help)--format=[Format the output using the given go template]:template: " \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display task IDs]" \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)--arg=[Service command args]:arguments: _normal" \ + "($help)*--container-label-add=[Add or update container labels]:label: " \ + "($help)*--container-label-rm=[Remove a container label by its key]:label: " \ + "($help)*--dns-add=[Add or update custom DNS servers]:DNS: " \ + "($help)*--dns-rm=[Remove custom DNS servers]:DNS: " \ + "($help)*--dns-option-add=[Add or update DNS options]:DNS option: " \ + "($help)*--dns-option-rm=[Remove DNS options]:DNS option: " \ + "($help)*--dns-search-add=[Add or update custom DNS search domains]:DNS search: " \ + "($help)*--dns-search-rm=[Remove DNS search domains]:DNS search: " \ + "($help)--force[Force update]" \ + "($help)*--group-add=[Add additional supplementary user groups to the container]:group:_groups" \ + "($help)*--group-rm=[Remove previously added supplementary user groups from the container]:group:_groups" \ + "($help)--image=[Service image tag]:image:__docker_complete_repositories" \ + "($help)*--placement-pref-add=[Add a placement preference]:pref:__docker_service_complete_placement_pref" \ + "($help)*--placement-pref-rm=[Remove a placement preference]:pref:__docker_service_complete_placement_pref" \ + "($help)*--publish-add=[Add or update a port]:port: " \ + "($help)*--publish-rm=[Remove a port(target-port mandatory)]:port: " \ + "($help)--rollback[Rollback to previous specification]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_service_commands" && ret=0 + ;; + esac + + return ret +} + +# EO service + +# BO stack + +__docker_stack_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running' 'shutdown') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stack_complete_services_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stacks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines stacks + + lines=(${(f)${:-"$(_call_program commands docker $docker_options stack ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + stacks=($stacks $s) + done + + _describe -t stacks-list "stacks" stacks "$@" && ret=0 + return ret +} + +__docker_complete_stacks() { + [[ $PREFIX = -* ]] && return 1 + __docker_stacks "$@" +} + +__docker_stack_commands() { + local -a _docker_stack_subcommands + _docker_stack_subcommands=( + "deploy:Deploy a new stack or update an existing stack" + "ls:List stacks" + "ps:List the tasks in the stack" + "rm:Remove the stack" + "services:List the services in the stack" + ) + _describe -t docker-stack-commands "docker stack command" _docker_stack_subcommands +} + +__docker_stack_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (deploy|up) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--bundle-file=[Path to a Distributed Application Bundle file]:dab:_files -g \"*.dab\"" \ + "($help -c --compose-file)"{-c=,--compose-file=}"[Path to a Compose file]:compose file:_files -g \"*.(yml|yaml)\"" \ + "($help)--with-registry-auth[Send registry authentication details to Swarm agents]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all tasks]" \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_ps_filters" \ + "($help)--format=[Format the output using the given go template]:template: " \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display task IDs]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (rm|remove|down) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (services) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_services_filters" \ + "($help)--format=[Pretty-print services using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_stack_commands" && ret=0 + ;; + esac + + return ret +} + +# EO stack + +# BO swarm + +__docker_swarm_commands() { + local -a _docker_swarm_subcommands + _docker_swarm_subcommands=( + "init:Initialize a swarm" + "join:Join a swarm as a node and/or manager" + "join-token:Manage join tokens" + "leave:Leave a swarm" + "unlock:Unlock swarm" + "unlock-key:Manage the unlock key" + "update:Update the swarm" + ) + _describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands +} + +__docker_swarm_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (init) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--advertise-addr=[Advertised address]:ip\:port: " \ + "($help)--data-path-addr=[Data path IP or interface]:ip " \ + "($help)--autolock[Enable manager autolocking]" \ + "($help)--availability=[Availability of the node]:availability:(active drain pause)" \ + "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ + "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--force-new-cluster[Force create a new cluster from current state]" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (join) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--advertise-addr=[Advertised address]:ip\:port: " \ + "($help)--data-path-addr=[Data path IP or interface]:ip " \ + "($help)--availability=[Availability of the node]:availability:(active drain pause)" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--token=[Token for entry into the swarm]:secret: " \ + "($help -):host\:port: " && ret=0 + ;; + (join-token) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ + "($help)--rotate[Rotate join token]" \ + "($help -):role:(manager worker)" && ret=0 + ;; + (leave) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force this node to leave the swarm, ignoring warnings]" && ret=0 + ;; + (unlock) + _arguments $(__docker_arguments) \ + $opts_help && ret=0 + ;; + (unlock-key) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ + "($help)--rotate[Rotate unlock token]" && ret=0 + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--autolock[Enable manager autolocking]" \ + "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ + "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO swarm + +# BO system + +__docker_system_commands() { + local -a _docker_system_subcommands + _docker_system_subcommands=( + "df:Show docker filesystem usage" + "events:Get real time events from the server" + "info:Display system-wide information" + "prune:Remove unused data" + ) + _describe -t docker-system-commands "docker system command" _docker_system_subcommands +} + +__docker_system_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (df) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -v --verbose)"{-v,--verbose}"[Show detailed information on space usage]" && ret=0 + ;; + (events) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_events_filter" \ + "($help)--since=[Events created since this timestamp]:timestamp: " \ + "($help)--until=[Events created until this timestamp]:timestamp: " \ + "($help)--format=[Format the output using the given go template]:template: " && ret=0 + ;; + (info) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused data, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO system + +# BO volume + +__docker_volume_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (dangling) + dangling_opts=('true' 'false') + _describe -t dangling-filter-opts "Dangling Filter Options" dangling_opts && ret=0 + ;; + (driver) + __docker_complete_info_plugins Volume && ret=0 + ;; + (name) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('dangling' 'driver' 'label' 'name') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_volumes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a lines volumes + + lines=(${(f)${:-"$(_call_program commands docker $docker_options volume ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Names + local line s + for line in $lines; do + s="${line[${begin[VOLUME NAME]},${end[VOLUME NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + volumes=($volumes $s) + done + + _describe -t volumes-list "volumes" volumes && ret=0 + return ret +} + +__docker_volume_commands() { + local -a _docker_volume_subcommands + _docker_volume_subcommands=( + "create:Create a volume" + "inspect:Display detailed information on one or more volumes" + "ls:List volumes" + "prune:Remove all unused volumes" + "rm:Remove one or more volumes" + ) + _describe -t docker-volume-commands "docker volume command" _docker_volume_subcommands +} + +__docker_volume_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -d --driver)"{-d=,--driver=}"[Volume driver name]:Driver name:(local)" \ + "($help)*--label=[Set metadata for a volume]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:Driver option: " \ + "($help -)1:Volume name: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)1:volume:__docker_complete_volumes" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:__docker_volume_complete_ls_filters" \ + "($help)--format=[Pretty-print volumes using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display volume names]" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the removal of one or more volumes]" \ + "($help -):volume:__docker_complete_volumes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO volume + +__docker_caching_policy() { + oldp=( "$1"(Nmh+1) ) # 1 hour + (( $#oldp )) +} + +__docker_commands() { + local cache_policy + integer force_invalidation=0 + + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + if ( (( ! ${+_docker_hide_legacy_commands} )) || _cache_invalid docker_hide_legacy_commands ) \ + && ! _retrieve_cache docker_hide_legacy_commands; + then + _docker_hide_legacy_commands="${DOCKER_HIDE_LEGACY_COMMANDS}" + _store_cache docker_hide_legacy_commands _docker_hide_legacy_commands + fi + + if [[ "${_docker_hide_legacy_commands}" != "${DOCKER_HIDE_LEGACY_COMMANDS}" ]]; then + force_invalidation=1 + _docker_hide_legacy_commands="${DOCKER_HIDE_LEGACY_COMMANDS}" + _store_cache docker_hide_legacy_commands _docker_hide_legacy_commands + fi + + if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands ) \ + && ! _retrieve_cache docker_subcommands || [[ ${force_invalidation} -eq 1 ]]; + then + local -a lines + lines=(${(f)"$(_call_program commands docker 2>&1)"}) + _docker_subcommands=(${${${(M)${lines[$((${lines[(i)*Commands:]} + 1)),-1]}:# *}## #}/ ##/:}) + _docker_subcommands=($_docker_subcommands 'daemon:Enable daemon mode' 'help:Show help for a command') + (( $#_docker_subcommands > 2 )) && _store_cache docker_subcommands _docker_subcommands + fi + _describe -t docker-commands "docker command" _docker_subcommands +} + +__docker_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach|commit|cp|create|diff|exec|export|kill|logs|pause|unpause|port|rename|restart|rm|run|start|stats|stop|top|update|wait) + __docker_container_subcommand && ret=0 + ;; + (build|history|import|load|pull|push|save|tag) + __docker_image_subcommand && ret=0 + ;; + (checkpoint) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_checkpoint_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_checkpoint_subcommand && ret=0 + ;; + esac + ;; + (container) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_container_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_container_subcommand && ret=0 + ;; + esac + ;; + (daemon) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--add-runtime=[Register an additional OCI compatible runtime]:runtime:__docker_complete_runtimes" \ + "($help)*--allow-nondistributable-artifacts=[Push nondistributable artifacts to specified registries]:registry: " \ + "($help)--api-cors-header=[CORS headers in the Engine API]:CORS headers: " \ + "($help)*--authorization-plugin=[Authorization plugins to load]" \ + "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \ + "($help)--bip=[Network bridge IP]:IP address: " \ + "($help)--cgroup-parent=[Parent cgroup for all containers]:cgroup: " \ + "($help)--cluster-advertise=[Address or interface name to advertise]:Instance to advertise (host\:port): " \ + "($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \ + "($help)*--cluster-store-opt=[Cluster store options]:Cluster options:->cluster-store-options" \ + "($help)--config-file=[Path to daemon configuration file]:Config File:_files" \ + "($help)--containerd=[Path to containerd socket]:socket:_files -g \"*.sock\"" \ + "($help)--data-root=[Root directory of persisted Docker data]:path:_directories" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \ + "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \ + "($help)--default-shm-size=[Default shm size for containers]:size:" \ + "($help)*--default-ulimit=[Default ulimits for containers]:ulimit: " \ + "($help)--disable-legacy-registry[Disable contacting legacy registries]" \ + "($help)*--dns=[DNS server to use]:DNS: " \ + "($help)*--dns-opt=[DNS options to use]:DNS option: " \ + "($help)*--dns-search=[DNS search domains to use]:DNS search: " \ + "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \ + "($help)--exec-root=[Root directory for execution state files]:path:_directories" \ + "($help)--experimental[Enable experimental features]" \ + "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \ + "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \ + "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help)--icc[Enable inter-container communication]" \ + "($help)--init[Run an init inside containers to forward signals and reap processes]" \ + "($help)--init-path=[Path to the docker-init binary]:docker-init binary:_files" \ + "($help)*--insecure-registry=[Enable insecure registry communication]:registry: " \ + "($help)--ip=[Default IP when binding container ports]" \ + "($help)--ip-forward[Enable net.ipv4.ip_forward]" \ + "($help)--ip-masq[Enable IP masquerading]" \ + "($help)--iptables[Enable addition of iptables rules]" \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)*--label=[Key=value labels]:label: " \ + "($help)--live-restore[Enable live restore of docker when containers are still running]" \ + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" \ + "($help)*--log-opt=[Default log driver options for containers]:log driver options:__docker_complete_log_options" \ + "($help)--max-concurrent-downloads[Set the max concurrent downloads for each pull]" \ + "($help)--max-concurrent-uploads[Set the max concurrent uploads for each push]" \ + "($help)--mtu=[Network MTU]:mtu:(0 576 1420 1500 9000)" \ + "($help)--oom-score-adjust=[Set the oom_score_adj for the daemon]:oom-score:(-500)" \ + "($help -p --pidfile)"{-p=,--pidfile=}"[Path to use for daemon PID file]:PID file:_files" \ + "($help)--raw-logs[Full timestamps without ANSI coloring]" \ + "($help)*--registry-mirror=[Preferred Docker registry mirror]:registry mirror: " \ + "($help)--seccomp-profile=[Path to seccomp profile]:path:_files -g \"*.json\"" \ + "($help -s --storage-driver)"{-s=,--storage-driver=}"[Storage driver to use]:driver:(aufs btrfs devicemapper overlay overlay2 vfs zfs)" \ + "($help)--selinux-enabled[Enable selinux support]" \ + "($help)--shutdown-timeout=[Set the shutdown timeout value in seconds]:time: " \ + "($help)*--storage-opt=[Storage driver options]:storage driver options: " \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g \"*.(pem|key)\"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help)--userland-proxy-path=[Path to the userland proxy binary]:binary:_files" && ret=0 + + case $state in + (cluster-store) + if compset -P '*://'; then + _message 'host:port' && ret=0 + else + store=('consul' 'etcd' 'zk') + _describe -t cluster-store "Cluster Store" store -qS "://" && ret=0 + fi + ;; + (cluster-store-options) + if compset -P '*='; then + _files && ret=0 + else + opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path') + _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0 + fi + ;; + (users-groups) + if compset -P '*:'; then + _groups && ret=0 + else + _describe -t userns-default "default Docker user management" '(default)' && ret=0 + _users && ret=0 + fi + ;; + esac + ;; + (events|info) + __docker_system_subcommand && ret=0 + ;; + (image) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_image_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_image_subcommand && ret=0 + ;; + esac + ;; + (images) + words[1]='ls' + __docker_image_subcommand && ret=0 + ;; + (inspect) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \ + "($help)--type=[Return JSON for specified type]:type:(container image network node plugin service volume)" \ + "($help -)*: :->values" && ret=0 + + case $state in + (values) + if [[ ${words[(r)--type=container]} == --type=container ]]; then + __docker_complete_containers && ret=0 + elif [[ ${words[(r)--type=image]} == --type=image ]]; then + __docker_complete_images && ret=0 + elif [[ ${words[(r)--type=network]} == --type=network ]]; then + __docker_complete_networks && ret=0 + elif [[ ${words[(r)--type=node]} == --type=node ]]; then + __docker_complete_nodes && ret=0 + elif [[ ${words[(r)--type=plugin]} == --type=plugin ]]; then + __docker_complete_plugins && ret=0 + elif [[ ${words[(r)--type=service]} == --type=secrets ]]; then + __docker_complete_secrets && ret=0 + elif [[ ${words[(r)--type=service]} == --type=service ]]; then + __docker_complete_services && ret=0 + elif [[ ${words[(r)--type=volume]} == --type=volume ]]; then + __docker_complete_volumes && ret=0 + else + __docker_complete_containers + __docker_complete_images + __docker_complete_networks + __docker_complete_nodes + __docker_complete_plugins + __docker_complete_secrets + __docker_complete_services + __docker_complete_volumes && ret=0 + fi + ;; + esac + ;; + (login) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -p --password)"{-p=,--password=}"[Password]:password: " \ + "($help -u --user)"{-u=,--user=}"[Username]:username: " \ + "($help -)1:server: " && ret=0 + ;; + (logout) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -)1:server: " && ret=0 + ;; + (network) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_network_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_network_subcommand && ret=0 + ;; + esac + ;; + (node) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_node_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_node_subcommand && ret=0 + ;; + esac + ;; + (plugin) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_plugin_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_plugin_subcommand && ret=0 + ;; + esac + ;; + (ps) + words[1]='ls' + __docker_container_subcommand && ret=0 + ;; + (rmi) + words[1]='rm' + __docker_image_subcommand && ret=0 + ;; + (search) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_search_filters" \ + "($help)--limit=[Maximum returned search results]:limit:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -):term: " && ret=0 + ;; + (secret) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_secret_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_secret_subcommand && ret=0 + ;; + esac + ;; + (service) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_service_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_service_subcommand && ret=0 + ;; + esac + ;; + (stack) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_stack_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_stack_subcommand && ret=0 + ;; + esac + ;; + (swarm) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_swarm_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_swarm_subcommand && ret=0 + ;; + esac + ;; + (system) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_system_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_system_subcommand && ret=0 + ;; + esac + ;; + (version) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (volume) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_volume_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_volume_subcommand && ret=0 + ;; + esac + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_commands" && ret=0 + ;; + esac + + return ret +} + +_docker() { + # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. + # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. + if [[ $service != docker ]]; then + _call_function - _$service + return + fi + + local curcontext="$curcontext" state line help="-h --help" + integer ret=1 + typeset -A opt_args + + _arguments $(__docker_arguments) -C \ + "(: -)"{-h,--help}"[Print usage]" \ + "($help)--config[Location of client config files]:path:_directories" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help -v --version)"{-v,--version}"[Print version information and quit]" \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + local host=${opt_args[-H]}${opt_args[--host]} + local config=${opt_args[--config]} + local docker_options="${host:+--host $host} ${config:+--config $config}" + + case $state in + (command) + __docker_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-$words[1]: + __docker_subcommand && ret=0 + ;; + esac + + return ret +} + +_dockerd() { + integer ret=1 + words[1]='daemon' + __docker_subcommand && ret=0 + return ret +} + +_docker "$@" + +# Local Variables: +# mode: Shell-Script +# sh-indentation: 4 +# indent-tabs-mode: nil +# sh-basic-offset: 4 +# End: +# vim: ft=zsh sw=4 ts=4 et diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000..da93093075 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,30 @@ +# The non-reference docs have been moved! + + + +The documentation for Docker Engine has been merged into +[the general documentation repo](https://github.com/docker/docker.github.io). + +See the [README](https://github.com/docker/docker.github.io/blob/master/README.md) +for instructions on contributing to and building the documentation. + +If you'd like to edit the current published version of the Engine docs, +do it in the master branch here: +https://github.com/docker/docker.github.io/tree/master/engine + +If you need to document the functionality of an upcoming Engine release, +use the `vnext-engine` branch: +https://github.com/docker/docker.github.io/tree/vnext-engine/engine + +The reference docs have been left in docker/docker (this repo), which remains +the place to edit them. + +The docs in the general repo are open-source and we appreciate +your feedback and pull requests! diff --git a/docs/deprecated.md b/docs/deprecated.md new file mode 100644 index 0000000000..7e0bfc0a60 --- /dev/null +++ b/docs/deprecated.md @@ -0,0 +1,321 @@ +--- +aliases: ["/engine/misc/deprecated/"] +title: "Deprecated Engine Features" +description: "Deprecated Features." +keywords: "docker, documentation, about, technology, deprecate" +--- + + + +# Deprecated Engine Features + +The following list of features are deprecated in Engine. +To learn more about Docker Engine's deprecation policy, +see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy). + +### Asynchronous `service create` and `service update` + +**Deprecated In Release: v17.05.0** + +**Disabled by default in release: v17.09** + +Docker 17.05.0 added an optional `--detach=false` option to make the +`docker service create` and `docker service update` work synchronously. This +option will be enable by default in Docker 17.09, at which point the `--detach` +flag can be used to use the previous (asynchronous) behavior. + +### `-g` and `--graph` flags on `dockerd` + +**Deprecated In Release: v17.05.0** + +The `-g` or `--graph` flag for the `dockerd` or `docker daemon` command was +used to indicate the directory in which to store persistent data and resource +configuration and has been replaced with the more descriptive `--data-root` +flag. + +These flags were added before Docker 1.0, so will not be _removed_, only +_hidden_, to discourage their use. + +### Top-level network properties in NetworkSettings + +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +When inspecting a container, `NetworkSettings` contains top-level information +about the default ("bridge") network; + +`EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, +`IPPrefixLen`, `IPv6Gateway`, and `MacAddress`. + +These properties are deprecated in favor of per-network properties in +`NetworkSettings.Networks`. These properties were already "deprecated" in +docker 1.9, but kept around for backward compatibility. + +Refer to [#17538](https://github.com/docker/docker/pull/17538) for further +information. + +### `filter` param for `/images/json` endpoint +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`. + +### `repository:shortid` image references +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +`repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references. + +### `docker daemon` subcommand +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +The daemon is moved to a separate binary (`dockerd`), and should be used instead. + +### Duplicate keys with conflicting values in engine labels +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +Duplicate keys with conflicting values have been deprecated. A warning is displayed +in the output, and an error will be returned in the future. + +### `MAINTAINER` in Dockerfile +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +`MAINTAINER` was an early very limited form of `LABEL` which should be used instead. + +### API calls without a version +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +API versions should be supplied to all API calls to ensure compatibility with +future Engine versions. Instead of just requesting, for example, the URL +`/containers/json`, you must now request `/v1.25/containers/json`. + +### Backing filesystem without `d_type` support for overlay/overlay2 +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v17.12** + +The overlay and overlay2 storage driver does not work as expected if the backing +filesystem does not support `d_type`. For example, XFS does not support `d_type` +if it is formatted with the `ftype=0` option. + +Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for +further information. + +### Three arguments form in `docker import` +**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported. + +### `-h` shorthand for `--help` + +**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v17.09** + +The shorthand (`-h`) is less common than `--help` on Linux and cannot be used +on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on +`docker create`). For this reason, the `-h` shorthand was not printed in the +"usage" output of subcommands, nor documented, and is now marked "deprecated". + +### `-e` and `--email` flags on `docker login` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v17.06** + +The docker login command is removing the ability to automatically register for an account with the target registry if the given username doesn't exist. Due to this change, the email flag is no longer required, and will be deprecated. + +### Separator (`:`) of `--security-opt` flag on `docker run` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v17.06** + +The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`. + +### `/containers/(id or name)/copy` endpoint + +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`. + +### Ambiguous event fields in API +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure. +See the events API documentation for the new format. + +### `-f` flag on `docker tag` +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use. + +### HostConfig at API container start +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of +defining it at container creation (`POST /containers/create`). + +### `--before` and `--since` flags on `docker ps` + +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker ps --before` and `docker ps --since` options are deprecated. +Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. + +### `--automated` and `--stars` flags on `docker search` + +**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v17.09** + +The `docker search --automated` and `docker search --stars` options are deprecated. +Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead. + +### Driver Specific Log Tags +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Log tags are now generated in a standard way across different logging drivers. +Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and +`fluentd-tag` have been deprecated in favor of the generic `tag` option. + +```bash +{% raw %} +docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" +{% endraw %} +``` + +### LXC built-in exec driver +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed. + +### Old Command Line Options +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: + + docker daemon -H ... + +The following single-dash (`-opt`) variant of certain command line options +are deprecated and replaced with double-dash options (`--opt`): + + docker attach -nostdin + docker attach -sig-proxy + docker build -no-cache + docker build -rm + docker commit -author + docker commit -run + docker events -since + docker history -notrunc + docker images -notrunc + docker inspect -format + docker ps -beforeId + docker ps -notrunc + docker ps -sinceId + docker rm -link + docker run -cidfile + docker run -dns + docker run -entrypoint + docker run -expose + docker run -link + docker run -lxc-conf + docker run -n + docker run -privileged + docker run -volumes-from + docker search -notrunc + docker search -stars + docker search -t + docker search -trusted + docker tag -force + +The following double-dash options are deprecated and have no replacement: + + docker run --cpuset + docker run --networking + docker ps --since-id + docker ps --before-id + docker search --trusted + +**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The single-dash (`-help`) was removed, in favor of the double-dash `--help` + + docker -help + docker [COMMAND] -help + +### `--run` flag on docker commit + +**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** + +**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor +of the `--changes` flag that allows to pass `Dockerfile` commands. + + +### Interacting with V1 registries + +**Disabled By Default In Release: v17.06** + +**Target For Removal In Release: v17.12** + +Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the +docker daemon from `pull`, `push`, and `login` operations against v1 +registries. Though enabled by default, this signals the intent to deprecate +the v1 protocol. + +Support for the v1 protocol to the public registry was removed in 1.13. Any +mirror configurations using v1 should be updated to use a +[v2 registry mirror](https://docs.docker.com/registry/recipes/mirror/). + +### Docker Content Trust ENV passphrase variables name change +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables + +- DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE is now named DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE +- DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE is now named DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE + +### `--api-enable-cors` flag on dockerd + +**Deprecated In Release: [v1.6.0](https://github.com/docker/docker/releases/tag/v1.6.0)** + +**Target For Removal In Release: v17.09** + +The flag `--api-enable-cors` is deprecated since v1.6.0. Use the flag +`--api-cors-header` instead. diff --git a/docs/extend/EBS_volume.md b/docs/extend/EBS_volume.md new file mode 100644 index 0000000000..8c64efa164 --- /dev/null +++ b/docs/extend/EBS_volume.md @@ -0,0 +1,164 @@ +--- +description: Volume plugin for Amazon EBS +keywords: "API, Usage, plugins, documentation, developer, amazon, ebs, rexray, volume" +title: Volume plugin for Amazon EBS +--- + + + +# A proof-of-concept Rexray plugin + +In this example, a simple Rexray plugin will be created for the purposes of using +it on an Amazon EC2 instance with EBS. It is not meant to be a complete Rexray plugin. + +The example source is available at [https://github.com/tiborvass/rexray-plugin](https://github.com/tiborvass/rexray-plugin). + +To learn more about Rexray: [https://github.com/codedellemc/rexray](https://github.com/codedellemc/rexray) + +## 1. Make a Docker image + +The following is the Dockerfile used to containerize rexray. + +```Dockerfile +FROM debian:jessie +RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates +RUN wget https://dl.bintray.com/emccode/rexray/stable/0.6.4/rexray-Linux-x86_64-0.6.4.tar.gz -O rexray.tar.gz && tar -xvzf rexray.tar.gz -C /usr/bin && rm rexray.tar.gz +RUN mkdir -p /run/docker/plugins /var/lib/libstorage/volumes +ENTRYPOINT ["rexray"] +CMD ["--help"] +``` + +To build it you can run `image=$(cat Dockerfile | docker build -q -)` and `$image` +will reference the containerized rexray image. + +## 2. Extract rootfs + +```sh +$ TMPDIR=/tmp/rexray # for the purpose of this example +$ # create container without running it, to extract the rootfs from image +$ docker create --name rexray "$image" +$ # save the rootfs to a tar archive +$ docker export -o $TMPDIR/rexray.tar rexray +$ # extract rootfs from tar archive to a rootfs folder +$ ( mkdir -p $TMPDIR/rootfs; cd $TMPDIR/rootfs; tar xf ../rexray.tar ) +``` + +## 3. Add plugin configuration + +We have to put the following JSON to `$TMPDIR/config.json`: + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A proof-of-concept EBS plugin (using rexray) for Docker", + "Documentation": "https://github.com/tiborvass/rexray-plugin", + "Entrypoint": [ + "/usr/bin/rexray", "service", "start", "-f" + ], + "Env": [ + { + "Description": "", + "Name": "REXRAY_SERVICE", + "Settable": [ + "value" + ], + "Value": "ebs" + }, + { + "Description": "", + "Name": "EBS_ACCESSKEY", + "Settable": [ + "value" + ], + "Value": "" + }, + { + "Description": "", + "Name": "EBS_SECRETKEY", + "Settable": [ + "value" + ], + "Value": "" + } + ], + "Interface": { + "Socket": "rexray.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "AllowAllDevices": true, + "Capabilities": ["CAP_SYS_ADMIN"], + "Devices": null + }, + "Mounts": [ + { + "Source": "/dev", + "Destination": "/dev", + "Type": "bind", + "Options": ["rbind"] + } + ], + "Network": { + "Type": "host" + }, + "PropagatedMount": "/var/lib/libstorage/volumes", + "User": {}, + "WorkDir": "" +} +``` + +Please note a couple of points: +- `PropagatedMount` is needed so that the docker daemon can see mounts done by the +rexray plugin from within the container, otherwise the docker daemon is not able +to mount a docker volume. +- The rexray plugin needs dynamic access to host devices. For that reason, we +have to give it access to all devices under `/dev` and set `AllowAllDevices` to +true for proper access. +- The user of this simple plugin can change only 3 settings: `REXRAY_SERVICE`, +`EBS_ACCESSKEY` and `EBS_SECRETKEY`. This is because of the reduced scope of this +plugin. Ideally other rexray parameters could also be set. + +## 4. Create plugin + +`docker plugin create tiborvass/rexray-plugin "$TMPDIR"` will create the plugin. + +```sh +$ docker plugin ls +ID NAME DESCRIPTION ENABLED +2475a4bd0ca5 tiborvass/rexray-plugin:latest A rexray volume plugin for Docker false +``` + +## 5. Test plugin + +```sh +$ docker plugin set tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY` +$ docker plugin enable tiborvass/rexray-plugin +$ docker volume create -d tiborvass/rexray-plugin my-ebs-volume +$ docker volume ls +DRIVER VOLUME NAME +tiborvass/rexray-plugin:latest my-ebs-volume +$ docker run --rm -v my-ebs-volume:/volume busybox sh -c 'echo bye > /volume/hi' +$ docker run --rm -v my-ebs-volume:/volume busybox cat /volume/hi +bye +``` + +## 6. Push plugin + +First, ensure you are logged in with `docker login`. Then you can run: +`docker plugin push tiborvass/rexray-plugin` to push it like a regular docker +image to a registry, to make it available for others to install via +`docker plugin install tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY`. diff --git a/docs/extend/config.md b/docs/extend/config.md new file mode 100644 index 0000000000..bb6c7f2ceb --- /dev/null +++ b/docs/extend/config.md @@ -0,0 +1,238 @@ +--- +title: "Plugin config" +description: "How develop and use a plugin with the managed plugin system" +keywords: "API, Usage, plugins, documentation, developer" +--- + + + + +# Plugin Config Version 1 of Plugin V2 + +This document outlines the format of the V0 plugin configuration. The plugin +config described herein was introduced in the Docker daemon in the [v1.12.0 +release](https://github.com/docker/docker/commit/f37117045c5398fd3dca8016ea8ca0cb47e7312b). + +Plugin configs describe the various constituents of a docker plugin. Plugin +configs can be serialized to JSON format with the following media types: + +Config Type | Media Type +------------- | ------------- +config | "application/vnd.docker.plugin.v1+json" + + +## *Config* Field Descriptions + +Config provides the base accessible fields for working with V0 plugin format + in the registry. + +- **`description`** *string* + + description of the plugin + +- **`documentation`** *string* + + link to the documentation about the plugin + +- **`interface`** *PluginInterface* + + interface implemented by the plugins, struct consisting of the following fields + + - **`types`** *string array* + + types indicate what interface(s) the plugin currently implements. + + currently supported: + + - **docker.volumedriver/1.0** + + - **docker.networkdriver/1.0** + + - **docker.ipamdriver/1.0** + + - **docker.authz/1.0** + + - **docker.logdriver/1.0** + + - **docker.metricscollector/1.0** + + - **`socket`** *string* + + socket is the name of the socket the engine should use to communicate with the plugins. + the socket will be created in `/run/docker/plugins`. + + +- **`entrypoint`** *string array* + + entrypoint of the plugin, see [`ENTRYPOINT`](../reference/builder.md#entrypoint) + +- **`workdir`** *string* + + workdir of the plugin, see [`WORKDIR`](../reference/builder.md#workdir) + +- **`network`** *PluginNetwork* + + network of the plugin, struct consisting of the following fields + + - **`type`** *string* + + network type. + + currently supported: + + - **bridge** + - **host** + - **none** + +- **`mounts`** *PluginMount array* + + mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts) + + - **`name`** *string* + + name of the mount. + + - **`description`** *string* + + description of the mount. + + - **`source`** *string* + + source of the mount. + + - **`destination`** *string* + + destination of the mount. + + - **`type`** *string* + + mount type. + + - **`options`** *string array* + + options of the mount. + +- **`ipchost`** *boolean* + Access to host ipc namespace. +- **`pidhost`** *boolean* + Access to host pid namespace. + +- **`propagatedMount`** *string* + + path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins. + This path will be bind-mounted outisde of the plugin rootfs so it's contents + are preserved on upgrade. + +- **`env`** *PluginEnv array* + + env of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the env. + + - **`description`** *string* + + description of the env. + + - **`value`** *string* + + value of the env. + +- **`args`** *PluginArgs* + + args of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the args. + + - **`description`** *string* + + description of the args. + + - **`value`** *string array* + + values of the args. + +- **`linux`** *PluginLinux* + + - **`capabilities`** *string array* + + capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security) + + - **`allowAllDevices`** *boolean* + + If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host. + + - **`devices`** *PluginDevice array* + + device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices) + + - **`name`** *string* + + name of the device. + + - **`description`** *string* + + description of the device. + + - **`path`** *string* + + path of the device. + +## Example Config + +*Example showing the 'tiborvass/sample-volume-plugin' plugin config.* + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A sample volume plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Entrypoint": [ + "/usr/bin/sample-volume-plugin", + "/data" + ], + "Env": [ + { + "Description": "", + "Name": "DEBUG", + "Settable": [ + "value" + ], + "Value": "0" + } + ], + "Interface": { + "Socket": "plugin.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "Capabilities": null, + "AllowAllDevices": false, + "Devices": null + }, + "Mounts": null, + "Network": { + "Type": "" + }, + "PropagatedMount": "/data", + "User": {}, + "Workdir": "" +} +``` diff --git a/docs/extend/images/authz_additional_info.png b/docs/extend/images/authz_additional_info.png new file mode 100644 index 0000000000..1a6a6d01d2 Binary files /dev/null and b/docs/extend/images/authz_additional_info.png differ diff --git a/docs/extend/images/authz_allow.png b/docs/extend/images/authz_allow.png new file mode 100644 index 0000000000..f42108040b Binary files /dev/null and b/docs/extend/images/authz_allow.png differ diff --git a/docs/extend/images/authz_chunked.png b/docs/extend/images/authz_chunked.png new file mode 100644 index 0000000000..5bde2c71f9 Binary files /dev/null and b/docs/extend/images/authz_chunked.png differ diff --git a/docs/extend/images/authz_connection_hijack.png b/docs/extend/images/authz_connection_hijack.png new file mode 100644 index 0000000000..f13a2987b2 Binary files /dev/null and b/docs/extend/images/authz_connection_hijack.png differ diff --git a/docs/extend/images/authz_deny.png b/docs/extend/images/authz_deny.png new file mode 100644 index 0000000000..fa4a48584a Binary files /dev/null and b/docs/extend/images/authz_deny.png differ diff --git a/docs/extend/index.md b/docs/extend/index.md new file mode 100644 index 0000000000..2fe2d83687 --- /dev/null +++ b/docs/extend/index.md @@ -0,0 +1,262 @@ +--- +description: Develop and use a plugin with the managed plugin system +keywords: "API, Usage, plugins, documentation, developer" +title: Managed plugin system +--- + + + +# Docker Engine managed plugin system + +* [Installing and using a plugin](index.md#installing-and-using-a-plugin) +* [Developing a plugin](index.md#developing-a-plugin) +* [Debugging plugins](index.md#debugging-plugins) + +Docker Engine's plugin system allows you to install, start, stop, and remove +plugins using Docker Engine. + +For information about the legacy plugin system available in Docker Engine 1.12 +and earlier, see [Understand legacy Docker Engine plugins](legacy_plugins.md). + +> **Note**: Docker Engine managed plugins are currently not supported +on Windows daemons. + +## Installing and using a plugin + +Plugins are distributed as Docker images and can be hosted on Docker Hub or on +a private registry. + +To install a plugin, use the `docker plugin install` command, which pulls the +plugin from Docker Hub or your private registry, prompts you to grant +permissions or capabilities if necessary, and enables the plugin. + +To check the status of installed plugins, use the `docker plugin ls` command. +Plugins that start successfully are listed as enabled in the output. + +After a plugin is installed, you can use it as an option for another Docker +operation, such as creating a volume. + +In the following example, you install the `sshfs` plugin, verify that it is +enabled, and use it to create a volume. + +> **Note**: This example is intended for instructional purposes only. Once the volume is created, your SSH password to the remote host will be exposed as plaintext when inspecting the volume. You should delete the volume as soon as you are done with the example. + +1. Install the `sshfs` plugin. + + ```bash + $ docker plugin install vieux/sshfs + + Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - capabilities: [CAP_SYS_ADMIN] + Do you grant the above permissions? [y/N] y + + vieux/sshfs + ``` + + The plugin requests 2 privileges: + + - It needs access to the `host` network. + - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run + the `mount` command. + +2. Check that the plugin is enabled in the output of `docker plugin ls`. + + ```bash + $ docker plugin ls + + ID NAME TAG DESCRIPTION ENABLED + 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true + ``` + +3. Create a volume using the plugin. + This example mounts the `/remote` directory on host `1.2.3.4` into a + volume named `sshvolume`. + + This volume can now be mounted into containers. + + ```bash + $ docker volume create \ + -d vieux/sshfs \ + --name sshvolume \ + -o sshcmd=user@1.2.3.4:/remote \ + -o password=$(cat file_containing_password_for_remote_host) + + sshvolume + ``` +4. Verify that the volume was created successfully. + + ```bash + $ docker volume ls + + DRIVER NAME + vieux/sshfs sshvolume + ``` + +5. Start a container that uses the volume `sshvolume`. + + ```bash + $ docker run --rm -v sshvolume:/data busybox ls /data + + + ``` + +6. Remove the volume `sshvolume` + ```bash + docker volume rm sshvolume + + sshvolume + ``` +To disable a plugin, use the `docker plugin disable` command. To completely +remove it, use the `docker plugin remove` command. For other available +commands and options, see the +[command line reference](../reference/commandline/index.md). + + +## Developing a plugin + +#### The rootfs directory +The `rootfs` directory represents the root filesystem of the plugin. In this +example, it was created from a Dockerfile: + +>**Note:** The `/run/docker/plugins` directory is mandatory inside of the +plugin's filesystem for docker to communicate with the plugin. + +```bash +$ git clone https://github.com/vieux/docker-volume-sshfs +$ cd docker-volume-sshfs +$ docker build -t rootfsimage . +$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created +$ sudo mkdir -p myplugin/rootfs +$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs +$ docker rm -vf "$id" +$ docker rmi rootfsimage +``` + +#### The config.json file + +The `config.json` file describes the plugin. See the [plugins config reference](config.md). + +Consider the following `config.json` file. + +```json +{ + "description": "sshFS plugin for Docker", + "documentation": "https://docs.docker.com/engine/extend/plugins/", + "entrypoint": ["/docker-volume-sshfs"], + "network": { + "type": "host" + }, + "interface" : { + "types": ["docker.volumedriver/1.0"], + "socket": "sshfs.sock" + }, + "linux": { + "capabilities": ["CAP_SYS_ADMIN"] + } +} +``` + +This plugin is a volume driver. It requires a `host` network and the +`CAP_SYS_ADMIN` capability. It depends upon the `/docker-volume-sshfs` +entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate +with Docker Engine. This plugin has no runtime parameters. + +#### Creating the plugin + +A new plugin can be created by running +`docker plugin create ./path/to/plugin/data` where the plugin +data contains a plugin configuration file `config.json` and a root filesystem +in subdirectory `rootfs`. + +After that the plugin `` will show up in `docker plugin ls`. +Plugins can be pushed to remote registries with +`docker plugin push `. + + +## Debugging plugins + +Stdout of a plugin is redirected to dockerd logs. Such entries have a +`plugin=` suffix. Here are a few examples of commands for pluginID +`f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62` and their +corresponding log entries in the docker daemon logs. + +```bash +$ docker plugin install tiborvass/sample-volume-plugins + +INFO[0036] Starting... Found 0 volumes on startup plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +``` + +```bash +$ docker volume create -d tiborvass/sample-volume-plugins samplevol + +INFO[0193] Create Called... Ensuring directory /data/samplevol exists on host... plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0193] open /var/lib/docker/plugin-data/local-persist.json: no such file or directory plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0193] Created volume samplevol with mountpoint /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0193] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +``` + +```bash +$ docker run -v samplevol:/tmp busybox sh + +INFO[0421] Get Called... Found samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0421] Mount Called... Mounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0421] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +INFO[0421] Unmount Called... Unmounted samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 +``` + +#### Using docker-runc to obtain logfiles and shell into the plugin. + +`docker-runc`, the default docker container runtime can be used for debugging +plugins. This is specifically useful to collect plugin logs if they are +redirected to a file. + +```bash +$ docker-runc list +ID PID STATUS BUNDLE CREATED +f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2679 running /run/docker/libcontainerd/f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2017-02-06T21:53:03.031537592Z +r +``` + +```bash +$ docker-runc exec f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 cat /var/log/plugin.log +``` + +If the plugin has a built-in shell, then exec into the plugin can be done as +follows: +```bash +$ docker-runc exec -t f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 sh +``` + +#### Using curl to debug plugin socket issues. + +To verify if the plugin API socket that the docker daemon communicates with +is responsive, use curl. In this example, we will make API calls from the +docker host to volume and network plugins using curl 7.47.0 to ensure that +the plugin is listening on the said socket. For a well functioning plugin, +these basic requests should work. Note that plugin sockets are available on the host under `/var/run/docker/plugins/` + + +```bash +curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List + +{"Mountpoint":"","Err":"","Volumes":[{"Name":"myvol1","Mountpoint":"/data/myvol1"},{"Name":"myvol2","Mountpoint":"/data/myvol2"}],"Volume":null} +``` + +```bash +curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities + +{"Scope":"local"} +``` +When using curl 7.5 and above, the URL should be of the form +`http://hostname/APICall`, where `hostname` is the valid hostname where the +plugin is installed and `APICall` is the call to the plugin API. + +For example, `http://localhost/VolumeDriver.List` diff --git a/docs/extend/legacy_plugins.md b/docs/extend/legacy_plugins.md new file mode 100644 index 0000000000..68bba59f46 --- /dev/null +++ b/docs/extend/legacy_plugins.md @@ -0,0 +1,101 @@ +--- +redirect_from: +- "/engine/extend/plugins/" +title: "Use Docker Engine plugins" +description: "How to add additional functionality to Docker with plugins extensions" +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker, +refer to [Docker Engine plugin system](index.md). + +You can extend the capabilities of the Docker Engine by loading third-party +plugins. This page explains the types of plugins and provides links to several +volume and network plugins for Docker. + +## Types of plugins + +Plugins extend Docker's functionality. They come in specific types. For +example, a [volume plugin](plugins_volume.md) might enable Docker +volumes to persist across multiple Docker hosts and a +[network plugin](plugins_network.md) might provide network plumbing. + +Currently Docker supports authorization, volume and network driver plugins. In the future it +will support additional plugin types. + +## Installing a plugin + +Follow the instructions in the plugin's documentation. + +## Finding a plugin + +The sections below provide an inexhaustive overview of available plugins. + + + +### Network plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards. +[Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well. +[Weave Network Plugin](https://www.weave.works/docs/net/latest/introducing-weave/) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. + +### Volume plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Azure File Storage plugin](https://github.com/Azure/azurefile-dockervolumedriver) | Lets you mount Microsoft [Azure File Storage](https://azure.microsoft.com/blog/azure-file-storage-now-generally-available/) shares to Docker containers as volumes using the SMB 3.0 protocol. [Learn more](https://azure.microsoft.com/blog/persistent-docker-volumes-with-azure-file-storage/). +[BeeGFS Volume Plugin](https://github.com/RedCoolBeans/docker-volume-beegfs) | An open source volume plugin to create persistent volumes in a BeeGFS parallel file system. +[Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) | A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS. +[Contiv Volume Plugin](https://github.com/contiv/volplugin) | An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption. It has support for Ceph and NFS. +[Convoy plugin](https://github.com/rancher/convoy) | A volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore. +[DigitalOcean Block Storage plugin](https://github.com/omallo/docker-volume-plugin-dostorage) | Integrates DigitalOcean's [block storage solution](https://www.digitalocean.com/products/storage/) into the Docker ecosystem by automatically attaching a given block storage volume to a DigitalOcean droplet and making the contents of the volume available to Docker containers running on that droplet. +[DRBD plugin](https://www.drbd.org/en/supported-projects/docker) | A volume plugin that provides highly available storage replicated by [DRBD](https://www.drbd.org). Data written to the docker volume is replicated in a cluster of DRBD nodes. +[Flocker plugin](https://clusterhq.com/docker-plugin/) | A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines. +[Fuxi Volume Plugin](https://github.com/openstack/fuxi) | A volume plugin that is developed as part of the OpenStack Kuryr project and implements the Docker volume plugin API by utilizing Cinder, the OpenStack block storage service. +[gce-docker plugin](https://github.com/mcuadros/gce-docker) | A volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks). +[GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) | A volume plugin that provides multi-host volumes management for Docker using GlusterFS. +[Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3. +[HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays. +[IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. +[Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository. +[Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`. +[NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future. +[Netshare plugin](https://github.com/ContainX/docker-volume-netshare) | A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems. +[Nimble Storage Volume Plugin](https://connect.nimblestorage.com/community/app-integration/docker)| A volume plug-in that integrates with Nimble Storage Unified Flash Fabric arrays. The plug-in abstracts array volume capabilities to the Docker administrator to allow self-provisioning of secure multi-tenant volumes and clones. +[OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. +[Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler. +[Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. +[REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. +[Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices. +[VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments. + +### Authorization plugins + + Plugin | Description +------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + [Twistlock AuthZ Broker](https://github.com/twistlock/authz) | A basic extendable authorization plugin that runs directly on the host or inside a container. This plugin allows you to define user policies that it evaluates during authorization. Basic authorization is provided if Docker daemon is started with the --tlsverify flag (username is extracted from the certificate common name). + [HBM plugin](https://github.com/kassisol/hbm) | An authorization plugin that prevents from executing commands with certains parameters. + +## Troubleshooting a plugin + +If you are having problems with Docker after loading a plugin, ask the authors +of the plugin for help. The Docker team may not be able to assist you. + +## Writing a plugin + +If you are interested in writing a plugin for Docker, or seeing how they work +under the hood, see the [docker plugins reference](plugin_api.md). diff --git a/docs/extend/plugin_api.md b/docs/extend/plugin_api.md new file mode 100644 index 0000000000..693b77a2f3 --- /dev/null +++ b/docs/extend/plugin_api.md @@ -0,0 +1,196 @@ +--- +title: "Plugins API" +description: "How to write Docker plugins extensions " +keywords: "API, Usage, plugins, documentation, developer" +--- + + + +# Docker Plugin API + +Docker plugins are out-of-process extensions which add capabilities to the +Docker Engine. + +This document describes the Docker Engine plugin API. To view information on +plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +This page is intended for people who want to develop their own Docker plugin. +If you just want to learn about or use Docker plugins, look +[here](legacy_plugins.md). + +## What plugins are + +A plugin is a process running on the same or a different host as the docker daemon, +which registers itself by placing a file on the same docker host in one of the plugin +directories described in [Plugin discovery](#plugin-discovery). + +Plugins have human-readable names, which are short, lowercase strings. For +example, `flocker` or `weave`. + +Plugins can run inside or outside containers. Currently running them outside +containers is recommended. + +## Plugin discovery + +Docker discovers plugins by looking for them in the plugin directory whenever a +user or container tries to use one by name. + +There are three types of files which can be put in the plugin directory. + +* `.sock` files are UNIX domain sockets. +* `.spec` files are text files containing a URL, such as `unix:///other.sock` or `tcp://localhost:8080`. +* `.json` files are text files containing a full json specification for the plugin. + +Plugins with UNIX domain socket files must run on the same docker host, whereas +plugins with spec or json files can run on a different host if a remote URL is specified. + +UNIX domain socket files must be located under `/run/docker/plugins`, whereas +spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. + +The name of the file (excluding the extension) determines the plugin name. + +For example, the `flocker` plugin might create a UNIX socket at +`/run/docker/plugins/flocker.sock`. + +You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. +For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only +mount `/run/docker/plugins/flocker` inside the `flocker` container. + +Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under +`/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as +soon as it finds the first plugin definition with the given name. + +### JSON specification + +This is the JSON format for a plugin: + +```json +{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "InsecureSkipVerify": false, + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +} +``` + +The `TLSConfig` field is optional and TLS will only be verified if this configuration is present. + +## Plugin lifecycle + +Plugins should be started before Docker, and stopped after Docker. For +example, when packaging a plugin for a platform which supports `systemd`, you +might use [`systemd` dependencies]( +http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to +manage startup and shutdown order. + +When upgrading a plugin, you should first stop the Docker daemon, upgrade the +plugin, then start Docker again. + +## Plugin activation + +When a plugin is first referred to -- either by a user referring to it by name +(e.g. `docker run --volume-driver=foo`) or a container already configured to +use a plugin being started -- Docker looks for the named plugin in the plugin +directory and activates it with a handshake. See Handshake API below. + +Plugins are *not* activated automatically at Docker daemon startup. Rather, +they are activated only lazily, or on-demand, when they are needed. + +## Systemd socket activation + +Plugins may also be socket activated by `systemd`. The official [Plugins helpers](https://github.com/docker/go-plugins-helpers) +natively supports socket activation. In order for a plugin to be socket activated it needs +a `service` file and a `socket` file. + +The `service` file (for example `/lib/systemd/system/your-plugin.service`): + +``` +[Unit] +Description=Your plugin +Before=docker.service +After=network.target your-plugin.socket +Requires=your-plugin.socket docker.service + +[Service] +ExecStart=/usr/lib/docker/your-plugin + +[Install] +WantedBy=multi-user.target +``` +The `socket` file (for example `/lib/systemd/system/your-plugin.socket`): + +``` +[Unit] +Description=Your plugin + +[Socket] +ListenStream=/run/docker/plugins/your-plugin.sock + +[Install] +WantedBy=sockets.target +``` + +This will allow plugins to be actually started when the Docker daemon connects to +the sockets they're listening on (for instance the first time the daemon uses them +or if one of the plugin goes down accidentally). + +## API design + +The Plugin API is RPC-style JSON over HTTP, much like webhooks. + +Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to +implement an HTTP server and bind this to the UNIX socket mentioned in the +"plugin discovery" section. + +All requests are HTTP `POST` requests. + +The API is versioned via an Accept header, which currently is always set to +`application/vnd.docker.plugins.v1+json`. + +## Handshake API + +Plugins are activated via the following "handshake" API call. + +### /Plugin.Activate + +**Request:** empty body + +**Response:** +``` +{ + "Implements": ["VolumeDriver"] +} +``` + +Responds with a list of Docker subsystems which this plugin implements. +After activation, the plugin will then be sent events from this subsystem. + +Possible values are: + +* [`authz`](plugins_authorization.md) +* [`NetworkDriver`](plugins_network.md) +* [`VolumeDriver`](plugins_volume.md) + + +## Plugin retries + +Attempts to call a method on a plugin are retried with an exponential backoff +for up to 30 seconds. This may help when packaging plugins as containers, since +it gives plugin containers a chance to start up before failing any user +containers which depend on them. + +## Plugins helpers + +To ease plugins development, we're providing an `sdk` for each kind of plugins +currently supported by Docker at [docker/go-plugins-helpers](https://github.com/docker/go-plugins-helpers). diff --git a/docs/extend/plugins_authorization.md b/docs/extend/plugins_authorization.md new file mode 100644 index 0000000000..ac1837f754 --- /dev/null +++ b/docs/extend/plugins_authorization.md @@ -0,0 +1,260 @@ +--- +title: "Access authorization plugin" +description: "How to create authorization plugins to manage access control to your Docker daemon." +keywords: "security, authorization, authentication, docker, documentation, plugin, extend" +redirect_from: +- "/engine/extend/authorization/" +--- + + + +# Create an authorization plugin + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker Engine, +refer to [Docker Engine plugin system](index.md). + +Docker's out-of-the-box authorization model is all or nothing. Any user with +permission to access the Docker daemon can run any Docker client command. The +same is true for callers using Docker's Engine API to contact the daemon. If you +require greater access control, you can create authorization plugins and add +them to your Docker daemon configuration. Using an authorization plugin, a +Docker administrator can configure granular access policies for managing access +to Docker daemon. + +Anyone with the appropriate skills can develop an authorization plugin. These +skills, at their most basic, are knowledge of Docker, understanding of REST, and +sound programming knowledge. This document describes the architecture, state, +and methods information available to an authorization plugin developer. + +## Basic principles + +Docker's [plugin infrastructure](plugin_api.md) enables +extending Docker by loading, removing and communicating with +third-party components using a generic API. The access authorization subsystem +was built using this mechanism. + +Using this subsystem, you don't need to rebuild the Docker daemon to add an +authorization plugin. You can add a plugin to an installed Docker daemon. You do +need to restart the Docker daemon to add a new plugin. + +An authorization plugin approves or denies requests to the Docker daemon based +on both the current authentication context and the command context. The +authentication context contains all user details and the authentication method. +The command context contains all the relevant request data. + +Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). +Each plugin must reside within directories described under the +[Plugin discovery](plugin_api.md#plugin-discovery) section. + +**Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication +respectively. + +## Default user authorization mechanism + +If TLS is enabled in the [Docker daemon](https://docs.docker.com/engine/security/https/), the default user authorization flow extracts the user details from the certificate subject name. +That is, the `User` field is set to the client certificate subject common name, and the `AuthenticationMethod` field is set to `TLS`. + +## Basic architecture + +You are responsible for registering your plugin as part of the Docker daemon +startup. You can install multiple plugins and chain them together. This chain +can be ordered. Each request to the daemon passes in order through the chain. +Only when all the plugins grant access to the resource, is the access granted. + +When an HTTP request is made to the Docker daemon through the CLI or via the +Engine API, the authentication subsystem passes the request to the installed +authentication plugin(s). The request contains the user (caller) and command +context. The plugin is responsible for deciding whether to allow or deny the +request. + +The sequence diagrams below depict an allow and deny authorization flow: + +![Authorization Allow flow](images/authz_allow.png) + +![Authorization Deny flow](images/authz_deny.png) + +Each request sent to the plugin includes the authenticated user, the HTTP +headers, and the request/response body. Only the user name and the +authentication method used are passed to the plugin. Most importantly, no user +credentials or tokens are passed. Finally, not all request/response bodies +are sent to the authorization plugin. Only those request/response bodies where +the `Content-Type` is either `text/*` or `application/json` are sent. + +For commands that can potentially hijack the HTTP connection (`HTTP +Upgrade`), such as `exec`, the authorization plugin is only called for the +initial HTTP requests. Once the plugin approves the command, authorization is +not applied to the rest of the flow. Specifically, the streaming data is not +passed to the authorization plugins. For commands that return chunked HTTP +response, such as `logs` and `events`, only the HTTP request is sent to the +authorization plugins. + +During request/response processing, some authorization flows might +need to do additional queries to the Docker daemon. To complete such flows, +plugins can call the daemon API similar to a regular user. To enable these +additional queries, the plugin must provide the means for an administrator to +configure proper authentication and security policies. + +## Docker client flows + +To enable and configure the authorization plugin, the plugin developer must +support the Docker client interactions detailed in this section. + +### Setting up Docker daemon + +Enable the authorization plugin with a dedicated command line flag in the +`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID` +value. This value can be the plugin’s socket or a path to a specification file. +Authorization plugins can be loaded without restarting the daemon. Refer +to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information. + +```bash +$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +Docker's authorization subsystem supports multiple `--authorization-plugin` parameters. + +### Calling authorized command (allow) + +```bash +$ docker pull centos +... +f1b10cd84249: Pull complete +... +``` + +### Calling unauthorized command (deny) + +```bash +$ docker pull centos +... +docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed. +``` + +### Error from plugins + +```bash +$ docker pull centos +... +docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?. +``` + +## API schema and implementation + +In addition to Docker's standard plugin registration method, each plugin +should implement the following two methods: + +* `/AuthZPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request. + +* `/AuthZPlugin.AuthZRes` This authorize response method is called before the response is returned from Docker daemon to the client. + +#### /AuthZPlugin.AuthZReq + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string " +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` +#### /AuthZPlugin.AuthZRes + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string", + "ResponseBody": "Byte array containing the raw HTTP response body", + "ResponseHeader": "Byte array containing the raw HTTP response header as a map[string][]string", + "ResponseStatusCode":"Response status code" +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` + +### Request authorization + +Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + +Name | Type | Description +-----------------------|-------------------|------------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | enum | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the request is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) + +### Response authorization + +The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + + +Name | Type | Description +----------------------- |------------------ |---------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | string | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body +Response status code | int | Status code from the docker daemon +Response headers | map[string]string | Response headers as key value pairs +Response body | []byte | Raw docker daemon response body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the response is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) diff --git a/docs/extend/plugins_graphdriver.md b/docs/extend/plugins_graphdriver.md new file mode 100644 index 0000000000..c134b1ebc4 --- /dev/null +++ b/docs/extend/plugins_graphdriver.md @@ -0,0 +1,403 @@ +--- +title: "Graphdriver plugins" +description: "How to manage image and container filesystems with external plugins" +keywords: "Examples, Usage, storage, image, docker, data, graph, plugin, api" +advisory: experimental +--- + + + + +## Changelog + +### 1.13.0 + +- Support v2 plugins + +# Docker graph driver plugins + +Docker graph driver plugins enable admins to use an external/out-of-process +graph driver for use with Docker engine. This is an alternative to using the +built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. + +You need to install and enable the plugin and then restart the Docker daemon +before using the plugin. See the following example for the correct ordering +of steps. + +``` +$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver + +$ pkill dockerd +$ dockerd --experimental -s cpuguy83/docker-overlay2-graphdriver-plugin +``` + +# Write a graph driver plugin + +See the [plugin documentation](https://docs.docker.com/engine/extend/) for detailed information +on the underlying plugin protocol. + + +## Graph Driver plugin protocol + +If a plugin registers itself as a `GraphDriver` when activated, then it is +expected to provide the rootfs for containers as well as image layer storage. + +### /GraphDriver.Init + +**Request**: +```json +{ + "Home": "/graph/home/path", + "Opts": [], + "UIDMaps": [], + "GIDMaps": [] +} +``` + +Initialize the graph driver plugin with a home directory and array of options. +These are passed through from the user, but the plugin is not required to parse +or honor them. + +The request also includes a list of UID and GID mappings, structed as follows: +```json +{ + "ContainerID": 0, + "HostID": 0, + "Size": 0 +} +``` + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Capabilities + +**Request**: +```json +{} +``` + +Get behavioral characteristics of the graph driver. If a plugin does not handle +this request, the engine will use default values for all capabilities. + +**Response**: +```json +{ + "ReproducesExactDiffs": false, +} +``` + +Respond with values of capabilities: + +* **ReproducesExactDiffs** Defaults to false. Flags that this driver is capable +of reproducing exactly equivalent diffs for read-only filesystem layers. + + +### /GraphDriver.Create + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Create a new, empty, read-only filesystem layer with the specified +`ID`, `Parent` and `MountLabel`. If `Parent` is an empty string, there is no +parent layer. `StorageOpt` is map of strings which indicate storage options. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.CreateReadWrite + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Similar to `/GraphDriver.Create` but creates a read-write filesystem layer. + +### /GraphDriver.Remove + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Remove the filesystem layer with this given `ID`. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Get + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "MountLabel": "" +} +``` + +Get the mountpoint for the layered filesystem referred to by the given `ID`. + +**Response**: +```json +{ + "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Err": "" +} +``` + +Respond with the absolute path to the mounted layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Put + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Release the system resources for the specified `ID`, such as unmounting the +filesystem layer. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Exists + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Determine if a filesystem layer with the specified `ID` exists. + +**Response**: +```json +{ + "Exists": true +} +``` + +Respond with a boolean for whether or not the filesystem layer with the specified +`ID` exists. + +### /GraphDriver.Status + +**Request**: +```json +{} +``` + +Get low-level diagnostic information about the graph driver. + +**Response**: +```json +{ + "Status": [[]] +} +``` + +Respond with a 2-D array with key/value pairs for the underlying status +information. + + +### /GraphDriver.GetMetadata + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Get low-level diagnostic information about the layered filesystem with the +with the specified `ID` + +**Response**: +```json +{ + "Metadata": {}, + "Err": "" +} +``` + +Respond with a set of key/value pairs containing the low-level diagnostic +information about the layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Cleanup + +**Request**: +```json +{} +``` + +Perform necessary tasks to release resources help by the plugin, such as +unmounting all the layered file systems. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Diff + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get an archive of the changes between the filesystem layers specified by the `ID` +and `Parent`. `Parent` may be an empty string, in which case there is no parent. + +**Response**: +``` +{% raw %} +{{ TAR STREAM }} +{% endraw %} +``` + +### /GraphDriver.Changes + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get a list of changes between the filesystem layers specified by the `ID` and +`Parent`. If `Parent` is an empty string, there is no parent. + +**Response**: +```json +{ + "Changes": [{}], + "Err": "" +} +``` + +Respond with a list of changes. The structure of a change is: +```json + "Path": "/some/path", + "Kind": 0, +``` + +Where the `Path` is the filesystem path within the layered filesystem that is +changed and `Kind` is an integer specifying the type of change that occurred: + +- 0 - Modified +- 1 - Added +- 2 - Deleted + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.ApplyDiff + +**Request**: +``` +{% raw %} +{{ TAR STREAM }} +{% endraw %} +``` + +Extract the changeset from the given diff into the layer with the specified `ID` +and `Parent` + +**Query Parameters**: + +- id (required)- the `ID` of the new filesystem layer to extract the diff to +- parent (required)- the `Parent` of the given `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size of the new layer in bytes. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.DiffSize + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Calculate the changes between the specified `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size changes between the specified `ID` and `Parent` +Respond with a non-empty string error if an error occurred. diff --git a/docs/extend/plugins_logging.md b/docs/extend/plugins_logging.md new file mode 100644 index 0000000000..8f687b4a01 --- /dev/null +++ b/docs/extend/plugins_logging.md @@ -0,0 +1,220 @@ +--- +title: "Docker log driver plugins" +description: "Log driver plugins." +keywords: "Examples, Usage, plugins, docker, documentation, user guide, logging" +--- + + + +# Logging driver plugins + +This document describes logging driver plugins for Docker. + +Logging drivers enables users to forward container logs to another service for +processing. Docker includes several logging drivers as built-ins, however can +never hope to support all use-cases with built-in drivers. Plugins allow Docker +to support a wide range of logging services without requiring to embed client +libraries for these services in the main Docker codebase. See the +[plugin documentation](legacy_plugins.md) for more information. + +## Create a logging plugin + +The main interface for logging plugins uses the same JSON+HTTP RPC protocol used +by other plugin types. See the +[example](https://github.com/cpuguy83/docker-log-driver-test) plugin for a +reference implementation of a logging plugin. The example wraps the built-in +`jsonfilelog` log driver. + +## LogDriver protocol + +Logging plugins must register as a `LogDriver` during plugin activation. Once +activated users can specify the plugin as a log driver. + +There are two HTTP endpoints that logging plugins must implement: + +### `/LogDriver.StartLogging` + +Signals to the plugin that a container is starting that the plugin should start +receiving logs for. + +Logs will be streamed over the defined file in the request. On Linux this file +is a FIFO. Logging plugins are not currently supported on Windows. + +**Request**: +```json +{ + "File": "/path/to/file/stream", + "Info": { + "ContainerID": "123456" + } +} +``` + +`File` is the path to the log stream that needs to be consumed. Each call to +`StartLogging` should provide a different file path, even if it's a container +that the plugin has already received logs for prior. The file is created by +docker with a randomly generated name. + +`Info` is details about the container that's being logged. This is fairly +free-form, but is defined by the following struct definition: + +```go +type Info struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} +``` + + +`ContainerID` will always be supplied with this struct, but other fields may be +empty or missing. + +**Response** +```json +{ + "Err": "" +} +``` + +If an error occurred during this request, add an error message to the `Err` field +in the response. If no error then you can either send an empty response (`{}`) +or an empty value for the `Err` field. + +The driver should at this point be consuming log messages from the passed in file. +If messages are unconsumed, it may cause the container to block while trying to +write to its stdio streams. + +Log stream messages are encoded as protocol buffers. The protobuf definitions are +in the +[docker repository](https://github.com/docker/docker/blob/master/api/types/plugins/logdriver/entry.proto). + +Since protocol buffers are not self-delimited you must decode them from the stream +using the following stream format: + +``` +[size][message] +``` + +Where `size` is a 4-byte big endian binary encoded uint32. `size` in this case +defines the size of the next message. `message` is the actual log entry. + +A reference golang implementation of a stream encoder/decoder can be found +[here](https://github.com/docker/docker/blob/master/api/types/plugins/logdriver/io.go) + +### `/LogDriver.StopLogging` + +Signals to the plugin to stop collecting logs from the defined file. +Once a response is received, the file will be removed by Docker. You must make +sure to collect all logs on the stream before responding to this request or risk +losing log data. + +Requests on this endpoint does not mean that the container has been removed +only that it has stopped. + +**Request**: +```json +{ + "File": "/path/to/file/stream" +} +``` + +**Response**: +```json +{ + "Err": "" +} +``` + +If an error occurred during this request, add an error message to the `Err` field +in the response. If no error then you can either send an empty response (`{}`) +or an empty value for the `Err` field. + +## Optional endpoints + +Logging plugins can implement two extra logging endpoints: + +### `/LogDriver.Capabilities` + +Defines the capabilities of the log driver. You must implement this endpoint for +Docker to be able to take advantage of any of the defined capabilities. + +**Request**: +```json +{} +``` + +**Response**: +```json +{ + "ReadLogs": true +} +``` + +Supported capabilities: + +- `ReadLogs` - this tells Docker that the plugin is capable of reading back logs +to clients. Plugins that report that they support `ReadLogs` must implement the +`/LogDriver.ReadLogs` endpoint + +### `/LogDriver.ReadLogs` + +Reads back logs to the client. This is used when `docker logs ` is +called. + +In order for Docker to use this endpoint, the plugin must specify as much when +`/LogDriver.Capabilities` is called. + + +**Request**: +```json +{ + "ReadConfig": {}, + "Info": { + "ContainerID": "123456" + } +} +``` + +`ReadConfig` is the list of options for reading, it is defined with the following +golang struct: + +```go +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} +``` + +- `Since` defines the oldest log that should be sent. +- `Tail` defines the number of lines to read (e.g. like the command `tail -n 10`) +- `Follow` signals that the client wants to stay attached to receive new log messages +as they come in once the existing logs have been read. + +`Info` is the same type defined in `/LogDriver.StartLogging`. It should be used +to determine what set of logs to read. + +**Response**: +``` +{{ log stream }} +``` + +The response should be the encoded log message using the same format as the +messages that the plugin consumed from Docker. diff --git a/docs/extend/plugins_metrics.md b/docs/extend/plugins_metrics.md new file mode 100644 index 0000000000..a86c7f22d2 --- /dev/null +++ b/docs/extend/plugins_metrics.md @@ -0,0 +1,85 @@ +--- +title: "Docker metrics collector plugins" +description: "Metrics plugins." +keywords: "Examples, Usage, plugins, docker, documentation, user guide, metrics" +--- + + + +# Metrics Collector Plugins + +Docker exposes internal metrics based on the prometheus format. Metrics plugins +enable accessing these metrics in a consistent way by providing a Unix +socket at a predefined path where the plugin can scrape the metrics. + +> **Note**: that while the plugin interface for metrics is non-experimental, the naming +of the metrics and metric labels is still considered experimental and may change +in a future version. + +## Creating a metrics plugin + +You must currently set `PropagatedMount` in the plugin `config.json` to +`/run/docker`. This allows the plugin to receive updated mounts +(the bind-mounted socket) from Docker after the plugin is already configured. + +## MetricsCollector protocol + +Metrics plugins must register as implementing the`MetricsCollector` interface +in `config.json`. + +On Unix platforms, the socket is located at `/run/docker/metrics.sock` in the +plugin's rootfs. + +`MetricsCollector` must implement two endpoints: + +### `MetricsCollector.StartMetrics` + +Signals to the plugin that the metrics socket is now available for scraping + +**Request** +```json +{} +``` + +The request has no playload. + +**Response** +```json +{ + "Err": "" +} +``` + +If an error occurred during this request, add an error message to the `Err` field +in the response. If no error then you can either send an empty response (`{}`) +or an empty value for the `Err` field. Errors will only be logged. + +### `MetricsCollector.StopMetrics` + +Signals to the plugin that the metrics socket is no longer available. +This may happen when the daemon is shutting down. + +**Request** +```json +{} +``` + +The request has no playload. + +**Response** +```json +{ + "Err": "" +} +``` + +If an error occurred during this request, add an error message to the `Err` field +in the response. If no error then you can either send an empty response (`{}`) +or an empty value for the `Err` field. Errors will only be logged. diff --git a/docs/extend/plugins_network.md b/docs/extend/plugins_network.md new file mode 100644 index 0000000000..a974862fa6 --- /dev/null +++ b/docs/extend/plugins_network.md @@ -0,0 +1,77 @@ +--- +title: "Docker network driver plugins" +description: "Network driver plugins." +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +# Engine network driver plugins + +This document describes Docker Engine network driver plugins generally +available in Docker Engine. To view information on plugins +managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +Docker Engine network plugins enable Engine deployments to be extended to +support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN +or something completely different. Network driver plugins are supported via the +LibNetwork project. Each plugin is implemented as a "remote driver" for +LibNetwork, which shares plugin infrastructure with Engine. Effectively, network +driver plugins are activated in the same way as other plugins, and use the same +kind of protocol. + +## Network driver plugins and swarm mode + +Docker 1.12 adds support for cluster management and orchestration called +[swarm mode](https://docs.docker.com/engine/swarm/). Docker Engine running in swarm mode currently +only supports the built-in overlay driver for networking. Therefore existing +networking plugins will not work in swarm mode. + +When you run Docker Engine outside of swarm mode, all networking plugins that +worked in Docker 1.11 will continue to function normally. They do not require +any modification. + +## Using network driver plugins + +The means of installing and running a network driver plugin depend on the +particular plugin. So, be sure to install your plugin according to the +instructions obtained from the plugin developer. + +Once running however, network driver plugins are used just like the built-in +network drivers: by being mentioned as a driver in network-oriented Docker +commands. For example, + + $ docker network create --driver weave mynet + +Some network driver plugins are listed in [plugins](legacy_plugins.md) + +The `mynet` network is now owned by `weave`, so subsequent commands +referring to that network will be sent to the plugin, + + $ docker run --network=mynet busybox top + + +## Write a network plugin + +Network plugins implement the [Docker plugin +API](plugin_api.md) and the network plugin protocol + +## Network plugin protocol + +The network driver protocol, in addition to the plugin activation call, is +documented as part of libnetwork: +[https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md). + +# Related Information + +To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`. + +- [Docker networks feature overview](https://docs.docker.com/engine/userguide/networking/) +- The [LibNetwork](https://github.com/docker/libnetwork) project diff --git a/docs/extend/plugins_services.md b/docs/extend/plugins_services.md new file mode 100644 index 0000000000..79e344f9ce --- /dev/null +++ b/docs/extend/plugins_services.md @@ -0,0 +1,186 @@ +--- +description: Using services with plugins +keywords: "API, Usage, plugins, documentation, developer" +title: Plugins and Services +--- + + + +# Using Volume and Network plugins in Docker services + +In swarm mode, it is possible to create a service that allows for attaching +to networks or mounting volumes that are backed by plugins. Swarm schedules +services based on plugin availability on a node. + + +### Volume plugins + +In this example, a volume plugin is installed on a swarm worker and a volume +is created using the plugin. In the manager, a service is created with the +relevant mount options. It can be observed that the service is scheduled to +run on the worker node with the said volume plugin and volume. Note that, +node1 is the manager and node2 is the worker. + +1. Prepare manager. In node 1: + + ```bash + $ docker swarm init + Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager. + ``` + +2. Join swarm, install plugin and create volume on worker. In node 2: + + ```bash + $ docker swarm join \ + --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \ + 192.168.99.100:2377 + ``` + + ```bash + $ docker plugin install tiborvass/sample-volume-plugin + latest: Pulling from tiborvass/sample-volume-plugin + eb9c16fbdc53: Download complete + Digest: sha256:00b42de88f3a3e0342e7b35fa62394b0a9ceb54d37f4c50be5d3167899994639 + Status: Downloaded newer image for tiborvass/sample-volume-plugin:latest + Installed plugin tiborvass/sample-volume-plugin + ``` + + ```bash + $ docker volume create -d tiborvass/sample-volume-plugin --name pluginVol + ``` + +3. Create a service using the plugin and volume. In node1: + + ```bash + $ docker service create --name my-service --mount type=volume,volume-driver=tiborvass/sample-volume-plugin,source=pluginVol,destination=/tmp busybox top + + $ docker service ls + z1sj8bb8jnfn my-service replicated 1/1 busybox:latest + ``` + docker service ls shows service 1 instance of service running. + +4. Observe the task getting scheduled in node 2: + + ```bash + {% raw %} + $ docker ps --format '{{.ID}}\t {{.Status}} {{.Names}} {{.Command}}' + 83fc1e842599 Up 2 days my-service.1.9jn59qzn7nbc3m0zt1hij12xs "top" + {% endraw %} + ``` + +### Network plugins + +In this example, a global scope network plugin is installed on both the +swarm manager and worker. A service is created with replicated instances +using the installed plugin. We will observe how the availability of the +plugin determines network creation and container scheduling. + +Note that node1 is the manager and node2 is the worker. + + +1. Install a global scoped network plugin on both manager and worker. On node1 + and node2: + + ```bash + $ docker plugin install bboreham/weave2 + Plugin "bboreham/weave2" is requesting the following privileges: + - network: [host] + - capabilities: [CAP_SYS_ADMIN CAP_NET_ADMIN] + Do you grant the above permissions? [y/N] y + latest: Pulling from bboreham/weave2 + 7718f575adf7: Download complete + Digest: sha256:2780330cc15644b60809637ee8bd68b4c85c893d973cb17f2981aabfadfb6d72 + Status: Downloaded newer image for bboreham/weave2:latest + Installed plugin bboreham/weave2 + ``` + +2. Create a network using plugin on manager. On node1: + + ```bash + $ docker network create --driver=bboreham/weave2:latest globalnet + + $ docker network ls + NETWORK ID NAME DRIVER SCOPE + qlj7ueteg6ly globalnet bboreham/weave2:latest swarm + ``` + +3. Create a service on the manager and have replicas set to 8. Observe that +containers get scheduled on both manager and worker. + + On node 1: + + ```bash + $ docker service create --network globalnet --name myservice --replicas=8 mrjana/simpleweb simpleweb +w90drnfzw85nygbie9kb89vpa + ``` + + ```bash + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 87520965206a mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.4.ytdzpktmwor82zjxkh118uf1v + 15e24de0f7aa mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.2.kh7a9n3iauq759q9mtxyfs9hp + c8c8f0144cdc mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.6.sjhpj5gr3xt33e3u2jycoj195 + 2e8e4b2c5c08 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.8.2z29zowsghx66u2velublwmrh + ``` + + On node 2: + + ```bash + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 53c0ae7c1dae mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.7.x44tvvdm3iwkt9kif35f7ykz1 + 9b56c627fee0 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.1.x7n1rm6lltw5gja3ueikze57q + d4f5927ba52c mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up 1 second myservice.5.i97bfo9uc6oe42lymafs9rz6k + 478c0d395bd7 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.3.yr7nkffa48lff1vrl2r1m1ucs + ``` + +4. Scale down the number of instances. On node1: + + ```bash + $ docker service scale myservice=0 + myservice scaled to 0 + ``` + +5. Disable and uninstall the plugin on the worker. On node2: + + ```bash + $ docker plugin rm -f bboreham/weave2 + bboreham/weave2 + ``` + +6. Scale up the number of instances again. Observe that all containers are +scheduled on the master and not on the worker, because the plugin is not available on the worker anymore. + + On node 1: + + ```bash + $ docker service scale myservice=8 + myservice scaled to 8 + ``` + + ```bash + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + cf4b0ec2415e mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.3.r7p5o208jmlzpcbm2ytl3q6n1 + 57c64a6a2b88 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.4.dwoezsbb02ccstkhlqjy2xe7h + 3ac68cc4e7b8 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 35 seconds myservice.5.zx4ezdrm2nwxzkrwnxthv0284 + 006c3cb318fc mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.8.q0e3umt19y3h3gzo1ty336k5r + dd2ffebde435 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.7.a77y3u22prjipnrjg7vzpv3ba + a86c74d8b84b mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.6.z9nbn14bagitwol1biveeygl7 + 2846a7850ba0 mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 37 seconds myservice.2.ypufz2eh9fyhppgb89g8wtj76 + e2ec01efcd8a mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 38 seconds myservice.1.8w7c4ttzr6zcb9sjsqyhwp3yl + ``` + + On node 2: + + ```bash + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ``` diff --git a/docs/extend/plugins_volume.md b/docs/extend/plugins_volume.md new file mode 100644 index 0000000000..807ab5a486 --- /dev/null +++ b/docs/extend/plugins_volume.md @@ -0,0 +1,360 @@ +--- +title: "Volume plugins" +description: "How to manage data with external volume plugins" +keywords: "Examples, Usage, volume, docker, data, volumes, plugin, api" +--- + + + +# Write a volume plugin + +Docker Engine volume plugins enable Engine deployments to be integrated with +external storage systems such as Amazon EBS, and enable data volumes to persist +beyond the lifetime of a single Docker host. See the +[plugin documentation](legacy_plugins.md) for more information. + +## Changelog + +### 1.13.0 + +- If used as part of the v2 plugin architecture, mountpoints that are part of + paths returned by the plugin must be mounted under the directory specified by + `PropagatedMount` in the plugin configuration + ([#26398](https://github.com/docker/docker/pull/26398)) + +### 1.12.0 + +- Add `Status` field to `VolumeDriver.Get` response + ([#21006](https://github.com/docker/docker/pull/21006#)) +- Add `VolumeDriver.Capabilities` to get capabilities of the volume driver + ([#22077](https://github.com/docker/docker/pull/22077)) + +### 1.10.0 + +- Add `VolumeDriver.Get` which gets the details about the volume + ([#16534](https://github.com/docker/docker/pull/16534)) +- Add `VolumeDriver.List` which lists all volumes owned by the driver + ([#16534](https://github.com/docker/docker/pull/16534)) + +### 1.8.0 + +- Initial support for volume driver plugins + ([#14659](https://github.com/docker/docker/pull/14659)) + +## Command-line changes + +To give a container access to a volume, use the `--volume` and `--volume-driver` +flags on the `docker container run` command. The `--volume` (or `-v`) flag +accepts a volume name and path on the host, and the `--volume-driver` flag +accepts a driver type. + +```bash +$ docker volume create --driver=flocker volumename + +$ docker container run -it --volume volumename:/data busybox sh +``` + +### `--volume` + +The `--volume` (or `-v`) flag takes a value that is in the format +`:`. The two parts of the value are +separated by a colon (`:`) character. + +- The volume name is a human-readable name for the volume, and cannot begin with + a `/` character. It is referred to as `volume_name` in the rest of this topic. +- The `Mountpoint` is the path on the host (v1) or in the plugin (v2) where the + volume has been made available. + +### `volumedriver` + +Specifying a `volumedriver` in conjunction with a `volumename` allows you to +use plugins such as [Flocker](https://github.com/ScatterHQ/flocker) to manage +volumes external to a single host, such as those on EBS. + +## Create a VolumeDriver + +The container creation endpoint (`/containers/create`) accepts a `VolumeDriver` +field of type `string` allowing to specify the name of the driver. If not +specified, it defaults to `"local"` (the default driver for local volumes). + +## Volume plugin protocol + +If a plugin registers itself as a `VolumeDriver` when activated, it must +provide the Docker Daemon with writeable paths on the host filesystem. The Docker +daemon provides these paths to containers to consume. The Docker daemon makes +the volumes available by bind-mounting the provided paths into the containers. + +> **Note**: Volume plugins should *not* write data to the `/var/lib/docker/` +> directory, including `/var/lib/docker/volumes`. The `/var/lib/docker/` +> directory is reserved for Docker. + +### `/VolumeDriver.Create` + +**Request**: +```json +{ + "Name": "volume_name", + "Opts": {} +} +``` + +Instruct the plugin that the user wants to create a volume, given a user +specified volume name. The plugin does not need to actually manifest the +volume on the filesystem yet (until `Mount` is called). +`Opts` is a map of driver specific options passed through from the user request. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### `/VolumeDriver.Remove` + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Delete the specified volume from disk. This request is issued when a user +invokes `docker rm -v` to remove volumes associated with a container. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### `/VolumeDriver.Mount` + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Docker requires the plugin to provide a volume, given a user specified volume +name. `Mount` is called once per container start. If the same `volume_name` is requested +more than once, the plugin may need to keep track of each new mount request and provision +at the first mount request and deprovision at the last corresponding unmount request. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: + +- **v1**: + + ```json + { + "Mountpoint": "/path/to/directory/on/host", + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Mountpoint": "/path/under/PropagatedMount", + "Err": "" + } + ``` + +`Mountpoint` is the path on the host (v1) or in the plugin (v2) where the volume +has been made available. + +`Err` is either empty or contains an error string. + +### `/VolumeDriver.Path` + +**Request**: + +```json +{ + "Name": "volume_name" +} +``` + +Request the path to the volume with the given `volume_name`. + +**Response**: + +- **v1**: + + ```json + { + "Mountpoin": "/path/to/directory/on/host", + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Mountpoint": "/path/under/PropagatedMount", + "Err": "" + } + ``` + +Respond with the path on the host (v1) or inside the plugin (v2) where the +volume has been made available, and/or a string error if an error occurred. + +`Mountpoint` is optional. However, the plugin may be queried again later if one +is not provided. + +### `/VolumeDriver.Unmount` + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Docker is no longer using the named volume. `Unmount` is called once per +container stop. Plugin may deduce that it is safe to deprovision the volume at +this point. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + + +### `/VolumeDriver.Get` + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Get info about `volume_name`. + + +**Response**: + +- **v1**: + + ```json + { + "Volume": { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host", + "Status": {} + }, + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Volume": { + "Name": "volume_name", + "Mountpoint": "/path/under/PropagatedMount", + "Status": {} + }, + "Err": "" + } + ``` + +Respond with a string error if an error occurred. `Mountpoint` and `Status` are +optional. + + +### /VolumeDriver.List + +**Request**: +```json +{} +``` + +Get the list of volumes registered with the plugin. + +**Response**: + +- **v1**: + + ```json + { + "Volumes": [ + { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host" + } + ], + "Err": "" + } + ``` + +- **v2**: + + ```json + { + "Volumes": [ + { + "Name": "volume_name", + "Mountpoint": "/path/under/PropagatedMount" + } + ], + "Err": "" + } + ``` + + +Respond with a string error if an error occurred. `Mountpoint` is optional. + +### /VolumeDriver.Capabilities + +**Request**: +```json +{} +``` + +Get the list of capabilities the driver supports. + +The driver is not required to implement `Capabilities`. If it is not +implemented, the default values are used. + +**Response**: +```json +{ + "Capabilities": { + "Scope": "global" + } +} +``` + +Supported scopes are `global` and `local`. Any other value in `Scope` will be +ignored, and `local` is used. `Scope` allows cluster managers to handle the +volume in different ways. For instance, a scope of `global`, signals to the +cluster manager that it only needs to create the volume once instead of on each +Docker host. More capabilities may be added in the future. diff --git a/docs/reference/builder.md b/docs/reference/builder.md new file mode 100644 index 0000000000..2571511b64 --- /dev/null +++ b/docs/reference/builder.md @@ -0,0 +1,1849 @@ +--- +title: "Dockerfile reference" +description: "Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image." +keywords: "builder, docker, Dockerfile, automation, image creation" +redirect_from: +- /reference/builder/ +--- + + + +# Dockerfile reference + +Docker can build images automatically by reading the instructions from a +`Dockerfile`. A `Dockerfile` is a text document that contains all the commands a +user could call on the command line to assemble an image. Using `docker build` +users can create an automated build that executes several command-line +instructions in succession. + +This page describes the commands you can use in a `Dockerfile`. When you are +done reading this page, refer to the [`Dockerfile` Best +Practices](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/) for a tip-oriented guide. + +## Usage + +The [`docker build`](commandline/build.md) command builds an image from +a `Dockerfile` and a *context*. The build's context is the files at a specified +location `PATH` or `URL`. The `PATH` is a directory on your local filesystem. +The `URL` is a Git repository location. + +A context is processed recursively. So, a `PATH` includes any subdirectories and +the `URL` includes the repository and its submodules. A simple build command +that uses the current directory as context: + + $ docker build . + Sending build context to Docker daemon 6.51 MB + ... + +The build is run by the Docker daemon, not by the CLI. The first thing a build +process does is send the entire context (recursively) to the daemon. In most +cases, it's best to start with an empty directory as context and keep your +Dockerfile in that directory. Add only the files needed for building the +Dockerfile. + +>**Warning**: Do not use your root directory, `/`, as the `PATH` as it causes +>the build to transfer the entire contents of your hard drive to the Docker +>daemon. + +To use a file in the build context, the `Dockerfile` refers to the file specified +in an instruction, for example, a `COPY` instruction. To increase the build's +performance, exclude files and directories by adding a `.dockerignore` file to +the context directory. For information about how to [create a `.dockerignore` +file](#dockerignore-file) see the documentation on this page. + +Traditionally, the `Dockerfile` is called `Dockerfile` and located in the root +of the context. You use the `-f` flag with `docker build` to point to a Dockerfile +anywhere in your file system. + + $ docker build -f /path/to/a/Dockerfile . + +You can specify a repository and tag at which to save the new image if +the build succeeds: + + $ docker build -t shykes/myapp . + +To tag the image into multiple repositories after the build, +add multiple `-t` parameters when you run the `build` command: + + $ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest . + +Before the Docker daemon runs the instructions in the `Dockerfile`, it performs +a preliminary validation of the `Dockerfile` and returns an error if the syntax is incorrect: + + $ docker build -t test/myapp . + Sending build context to Docker daemon 2.048 kB + Error response from daemon: Unknown instruction: RUNCMD + +The Docker daemon runs the instructions in the `Dockerfile` one-by-one, +committing the result of each instruction +to a new image if necessary, before finally outputting the ID of your +new image. The Docker daemon will automatically clean up the context you +sent. + +Note that each instruction is run independently, and causes a new image +to be created - so `RUN cd /tmp` will not have any effect on the next +instructions. + +Whenever possible, Docker will re-use the intermediate images (cache), +to accelerate the `docker build` process significantly. This is indicated by +the `Using cache` message in the console output. +(For more information, see the [Build cache section](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache)) in the +`Dockerfile` best practices guide: + + $ docker build -t svendowideit/ambassador . + Sending build context to Docker daemon 15.36 kB + Step 1/4 : FROM alpine:3.2 + ---> 31f630c65071 + Step 2/4 : MAINTAINER SvenDowideit@home.org.au + ---> Using cache + ---> 2a1c91448f5f + Step 3/4 : RUN apk update && apk add socat && rm -r /var/cache/ + ---> Using cache + ---> 21ed6e7fbb73 + Step 4/4 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh + ---> Using cache + ---> 7ea8aef582cc + Successfully built 7ea8aef582cc + +Build cache is only used from images that have a local parent chain. This means +that these images were created by previous builds or the whole chain of images +was loaded with `docker load`. If you wish to use build cache of a specific +image you can specify it with `--cache-from` option. Images specified with +`--cache-from` do not need to have a parent chain and may be pulled from other +registries. + +When you're done with your build, you're ready to look into [*Pushing a +repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +## Format + +Here is the format of the `Dockerfile`: + +```Dockerfile +# Comment +INSTRUCTION arguments +``` + +The instruction is not case-sensitive. However, convention is for them to +be UPPERCASE to distinguish them from arguments more easily. + + +Docker runs instructions in a `Dockerfile` in order. A `Dockerfile` **must +start with a \`FROM\` instruction**. The `FROM` instruction specifies the [*Base +Image*](glossary.md#base-image) from which you are building. `FROM` may only be +proceeded by one or more `ARG` instructions, which declare arguments that are used +in `FROM` lines in the `Dockerfile`. + +Docker treats lines that *begin* with `#` as a comment, unless the line is +a valid [parser directive](#parser-directives). A `#` marker anywhere +else in a line is treated as an argument. This allows statements like: + +```Dockerfile +# Comment +RUN echo 'we are running some # of cool things' +``` + +Line continuation characters are not supported in comments. + +## Parser directives + +Parser directives are optional, and affect the way in which subsequent lines +in a `Dockerfile` are handled. Parser directives do not add layers to the build, +and will not be shown as a build step. Parser directives are written as a +special type of comment in the form `# directive=value`. A single directive +may only be used once. + +Once a comment, empty line or builder instruction has been processed, Docker +no longer looks for parser directives. Instead it treats anything formatted +as a parser directive as a comment and does not attempt to validate if it might +be a parser directive. Therefore, all parser directives must be at the very +top of a `Dockerfile`. + +Parser directives are not case-sensitive. However, convention is for them to +be lowercase. Convention is also to include a blank line following any +parser directives. Line continuation characters are not supported in parser +directives. + +Due to these rules, the following examples are all invalid: + +Invalid due to line continuation: + +```Dockerfile +# direc \ +tive=value +``` + +Invalid due to appearing twice: + +```Dockerfile +# directive=value1 +# directive=value2 + +FROM ImageName +``` + +Treated as a comment due to appearing after a builder instruction: + +```Dockerfile +FROM ImageName +# directive=value +``` + +Treated as a comment due to appearing after a comment which is not a parser +directive: + +```Dockerfile +# About my dockerfile +# directive=value +FROM ImageName +``` + +The unknown directive is treated as a comment due to not being recognized. In +addition, the known directive is treated as a comment due to appearing after +a comment which is not a parser directive. + +```Dockerfile +# unknowndirective=value +# knowndirective=value +``` + +Non line-breaking whitespace is permitted in a parser directive. Hence, the +following lines are all treated identically: + +```Dockerfile +#directive=value +# directive =value +# directive= value +# directive = value +# dIrEcTiVe=value +``` + +The following parser directive is supported: + +* `escape` + +## escape + + # escape=\ (backslash) + +Or + + # escape=` (backtick) + +The `escape` directive sets the character used to escape characters in a +`Dockerfile`. If not specified, the default escape character is `\`. + +The escape character is used both to escape characters in a line, and to +escape a newline. This allows a `Dockerfile` instruction to +span multiple lines. Note that regardless of whether the `escape` parser +directive is included in a `Dockerfile`, *escaping is not performed in +a `RUN` command, except at the end of a line.* + +Setting the escape character to `` ` `` is especially useful on +`Windows`, where `\` is the directory path separator. `` ` `` is consistent +with [Windows PowerShell](https://technet.microsoft.com/en-us/library/hh847755.aspx). + +Consider the following example which would fail in a non-obvious way on +`Windows`. The second `\` at the end of the second line would be interpreted as an +escape for the newline, instead of a target of the escape from the first `\`. +Similarly, the `\` at the end of the third line would, assuming it was actually +handled as an instruction, cause it be treated as a line continuation. The result +of this dockerfile is that second and third lines are considered a single +instruction: + +```Dockerfile +FROM microsoft/nanoserver +COPY testfile.txt c:\\ +RUN dir c:\ +``` + +Results in: + + PS C:\John> docker build -t cmd . + Sending build context to Docker daemon 3.072 kB + Step 1/2 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/2 : COPY testfile.txt c:\RUN dir c: + GetFileAttributesEx c:RUN: The system cannot find the file specified. + PS C:\John> + +One solution to the above would be to use `/` as the target of both the `COPY` +instruction, and `dir`. However, this syntax is, at best, confusing as it is not +natural for paths on `Windows`, and at worst, error prone as not all commands on +`Windows` support `/` as the path separator. + +By adding the `escape` parser directive, the following `Dockerfile` succeeds as +expected with the use of natural platform semantics for file paths on `Windows`: + + # escape=` + + FROM microsoft/nanoserver + COPY testfile.txt c:\ + RUN dir c:\ + +Results in: + + PS C:\John> docker build -t succeeds --no-cache=true . + Sending build context to Docker daemon 3.072 kB + Step 1/3 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/3 : COPY testfile.txt c:\ + ---> 96655de338de + Removing intermediate container 4db9acbb1682 + Step 3/3 : RUN dir c:\ + ---> Running in a2c157f842f5 + Volume in drive C has no label. + Volume Serial Number is 7E6D-E0F7 + + Directory of c:\ + + 10/05/2016 05:04 PM 1,894 License.txt + 10/05/2016 02:22 PM Program Files + 10/05/2016 02:14 PM Program Files (x86) + 10/28/2016 11:18 AM 62 testfile.txt + 10/28/2016 11:20 AM Users + 10/28/2016 11:20 AM Windows + 2 File(s) 1,956 bytes + 4 Dir(s) 21,259,096,064 bytes free + ---> 01c7f3bef04f + Removing intermediate container a2c157f842f5 + Successfully built 01c7f3bef04f + PS C:\John> + +## Environment replacement + +Environment variables (declared with [the `ENV` statement](#env)) can also be +used in certain instructions as variables to be interpreted by the +`Dockerfile`. Escapes are also handled for including variable-like syntax +into a statement literally. + +Environment variables are notated in the `Dockerfile` either with +`$variable_name` or `${variable_name}`. They are treated equivalently and the +brace syntax is typically used to address issues with variable names with no +whitespace, like `${foo}_bar`. + +The `${variable_name}` syntax also supports a few of the standard `bash` +modifiers as specified below: + +* `${variable:-word}` indicates that if `variable` is set then the result + will be that value. If `variable` is not set then `word` will be the result. +* `${variable:+word}` indicates that if `variable` is set then `word` will be + the result, otherwise the result is the empty string. + +In all cases, `word` can be any string, including additional environment +variables. + +Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, +for example, will translate to `$foo` and `${foo}` literals respectively. + +Example (parsed representation is displayed after the `#`): + + FROM busybox + ENV foo /bar + WORKDIR ${foo} # WORKDIR /bar + ADD . $foo # ADD . /bar + COPY \$foo /quux # COPY $foo /quux + +Environment variables are supported by the following list of instructions in +the `Dockerfile`: + +* `ADD` +* `COPY` +* `ENV` +* `EXPOSE` +* `FROM` +* `LABEL` +* `STOPSIGNAL` +* `USER` +* `VOLUME` +* `WORKDIR` + +as well as: + +* `ONBUILD` (when combined with one of the supported instructions above) + +> **Note**: +> prior to 1.4, `ONBUILD` instructions did **NOT** support environment +> variable, even when combined with any of the instructions listed above. + +Environment variable substitution will use the same value for each variable +throughout the entire instruction. In other words, in this example: + + ENV abc=hello + ENV abc=bye def=$abc + ENV ghi=$abc + +will result in `def` having a value of `hello`, not `bye`. However, +`ghi` will have a value of `bye` because it is not part of the same instruction +that set `abc` to `bye`. + +## .dockerignore file + +Before the docker CLI sends the context to the docker daemon, it looks +for a file named `.dockerignore` in the root directory of the context. +If this file exists, the CLI modifies the context to exclude files and +directories that match patterns in it. This helps to avoid +unnecessarily sending large or sensitive files and directories to the +daemon and potentially adding them to images using `ADD` or `COPY`. + +The CLI interprets the `.dockerignore` file as a newline-separated +list of patterns similar to the file globs of Unix shells. For the +purposes of matching, the root of the context is considered to be both +the working and the root directory. For example, the patterns +`/foo/bar` and `foo/bar` both exclude a file or directory named `bar` +in the `foo` subdirectory of `PATH` or in the root of the git +repository located at `URL`. Neither excludes anything else. + +If a line in `.dockerignore` file starts with `#` in column 1, then this line is +considered as a comment and is ignored before interpreted by the CLI. + +Here is an example `.dockerignore` file: + +``` +# comment +*/temp* +*/*/temp* +temp? +``` + +This file causes the following build behavior: + +| Rule | Behavior | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `# comment` | Ignored. | +| `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. | +| `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | +| `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. + + +Matching is done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. A +preprocessing step removes leading and trailing whitespace and +eliminates `.` and `..` elements using Go's +[filepath.Clean](http://golang.org/pkg/path/filepath/#Clean). Lines +that are blank after preprocessing are ignored. + +Beyond Go's filepath.Match rules, Docker also supports a special +wildcard string `**` that matches any number of directories (including +zero). For example, `**/*.go` will exclude all files that end with `.go` +that are found in all directories, including the root of the build context. + +Lines starting with `!` (exclamation mark) can be used to make exceptions +to exclusions. The following is an example `.dockerignore` file that +uses this mechanism: + +``` + *.md + !README.md +``` + +All markdown files *except* `README.md` are excluded from the context. + +The placement of `!` exception rules influences the behavior: the last +line of the `.dockerignore` that matches a particular file determines +whether it is included or excluded. Consider the following example: + +``` + *.md + !README*.md + README-secret.md +``` + +No markdown files are included in the context except README files other than +`README-secret.md`. + +Now consider this example: + +``` + *.md + README-secret.md + !README*.md +``` + +All of the README files are included. The middle line has no effect because +`!README*.md` matches `README-secret.md` and comes last. + +You can even use the `.dockerignore` file to exclude the `Dockerfile` +and `.dockerignore` files. These files are still sent to the daemon +because it needs them to do its job. But the `ADD` and `COPY` instructions +do not copy them to the image. + +Finally, you may want to specify which files to include in the +context, rather than which to exclude. To achieve this, specify `*` as +the first pattern, followed by one or more `!` exception patterns. + +**Note**: For historical reasons, the pattern `.` is ignored. + +## FROM + + FROM [AS ] + +Or + + FROM [:] [AS ] + +Or + + FROM [@] [AS ] + +The `FROM` instruction initializes a new build stage and sets the +[*Base Image*](glossary.md#base-image) for subsequent instructions. As such, a +valid `Dockerfile` must start with a `FROM` instruction. The image can be +any valid image – it is especially easy to start by **pulling an image** from +the [*Public Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/). + +- `ARG` is the only instruction that may proceed `FROM` in the `Dockerfile`. + See [Understand how ARG and FROM interact](#understand-how-arg-and-from-interact). + +- `FROM` can appear multiple times within a single `Dockerfile` to + create multiple images or use one build stage as a dependency for another. + Simply make a note of the last image ID output by the commit before each new + `FROM` instruction. Each `FROM` instruction clears any state created by previous + instructions. + +- Optionally a name can be given to a new build stage by adding `AS name` to the + `FROM` instruction. The name can be used in subsequent `FROM` and + `COPY --from=` instructions to refer to the image built in this stage. + +- The `tag` or `digest` values are optional. If you omit either of them, the + builder assumes a `latest` tag by default. The builder returns an error if it + cannot find the `tag` value. + +### Understand how ARG and FROM interact + +`FROM` instructions support variables that are declared by any `ARG` +instructions that occur before the first `FROM`. + +```Dockerfile +ARG CODE_VERSION=latest +FROM base:${CODE_VERSION} +CMD /code/run-app + +FROM extras:${CODE_VERSION} +CMD /code/run-extras +``` + +To use the default value of an `ARG` declared before the first `FROM` use an +`ARG` instruction without a value: + +```Dockerfile +ARG SETTINGS=default + +FROM busybox +ARG SETTINGS + +``` + +## RUN + +RUN has 2 forms: + +- `RUN ` (*shell* form, the command is run in a shell, which by +default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows) +- `RUN ["executable", "param1", "param2"]` (*exec* form) + +The `RUN` instruction will execute any commands in a new layer on top of the +current image and commit the results. The resulting committed image will be +used for the next step in the `Dockerfile`. + +Layering `RUN` instructions and generating commits conforms to the core +concepts of Docker where commits are cheap and containers can be created from +any point in an image's history, much like source control. + +The *exec* form makes it possible to avoid shell string munging, and to `RUN` +commands using a base image that does not contain the specified shell executable. + +The default shell for the *shell* form can be changed using the `SHELL` +command. + +In the *shell* form you can use a `\` (backslash) to continue a single +RUN instruction onto the next line. For example, consider these two lines: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; \ +echo $HOME' +``` +Together they are equivalent to this single line: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME' +``` + +> **Note**: +> To use a different shell, other than '/bin/sh', use the *exec* form +> passing in the desired shell. For example, +> `RUN ["/bin/bash", "-c", "echo hello"]` + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. +> +> **Note**: +> In the *JSON* form, it is necessary to escape backslashes. This is +> particularly relevant on Windows where the backslash is the path separator. +> The following line would otherwise be treated as *shell* form due to not +> being valid JSON, and fail in an unexpected way: +> `RUN ["c:\windows\system32\tasklist.exe"]` +> The correct syntax for this example is: +> `RUN ["c:\\windows\\system32\\tasklist.exe"]` + +The cache for `RUN` instructions isn't invalidated automatically during +the next build. The cache for an instruction like +`RUN apt-get dist-upgrade -y` will be reused during the next build. The +cache for `RUN` instructions can be invalidated by using the `--no-cache` +flag, for example `docker build --no-cache`. + +See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + +The cache for `RUN` instructions can be invalidated by `ADD` instructions. See +[below](#add) for details. + +### Known issues (RUN) + +- [Issue 783](https://github.com/docker/docker/issues/783) is about file + permissions problems that can occur when using the AUFS file system. You + might notice it during an attempt to `rm` a file, for example. + + For systems that have recent aufs version (i.e., `dirperm1` mount option can + be set), docker will attempt to fix the issue automatically by mounting + the layers with `dirperm1` option. More details on `dirperm1` option can be + found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs) + + If your system doesn't have support for `dirperm1`, the issue describes a workaround. + +## CMD + +The `CMD` instruction has three forms: + +- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) +- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) +- `CMD command param1 param2` (*shell* form) + +There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` +then only the last `CMD` will take effect. + +**The main purpose of a `CMD` is to provide defaults for an executing +container.** These defaults can include an executable, or they can omit +the executable, in which case you must specify an `ENTRYPOINT` +instruction as well. + +> **Note**: +> If `CMD` is used to provide default arguments for the `ENTRYPOINT` +> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified +> with the JSON array format. + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `CMD [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +When used in the shell or exec formats, the `CMD` instruction sets the command +to be executed when running the image. + +If you use the *shell* form of the `CMD`, then the `` will execute in +`/bin/sh -c`: + + FROM ubuntu + CMD echo "This is a test." | wc - + +If you want to **run your** `` **without a shell** then you must +express the command as a JSON array and give the full path to the executable. +**This array form is the preferred format of `CMD`.** Any additional parameters +must be individually expressed as strings in the array: + + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + +If you would like your container to run the same executable every time, then +you should consider using `ENTRYPOINT` in combination with `CMD`. See +[*ENTRYPOINT*](#entrypoint). + +If the user specifies arguments to `docker run` then they will override the +default specified in `CMD`. + +> **Note**: +> Don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits +> the result; `CMD` does not execute anything at build time, but specifies +> the intended command for the image. + +## LABEL + + LABEL = = = ... + +The `LABEL` instruction adds metadata to an image. A `LABEL` is a +key-value pair. To include spaces within a `LABEL` value, use quotes and +backslashes as you would in command-line parsing. A few usage examples: + + LABEL "com.example.vendor"="ACME Incorporated" + LABEL com.example.label-with-value="foo" + LABEL version="1.0" + LABEL description="This text illustrates \ + that label-values can span multiple lines." + +An image can have more than one label. To specify multiple labels, +Docker recommends combining labels into a single `LABEL` instruction where +possible. Each `LABEL` instruction produces a new layer which can result in an +inefficient image if you use many labels. This example results in a single image +layer. + + LABEL multi.label1="value1" multi.label2="value2" other="value3" + +The above can also be written as: + + LABEL multi.label1="value1" \ + multi.label2="value2" \ + other="value3" + +Labels are additive including `LABEL`s in `FROM` images. If Docker +encounters a label/key that already exists, the new value overrides any previous +labels with identical keys. + +To view an image's labels, use the `docker inspect` command. + + "Labels": { + "com.example.vendor": "ACME Incorporated" + "com.example.label-with-value": "foo", + "version": "1.0", + "description": "This text illustrates that label-values can span multiple lines.", + "multi.label1": "value1", + "multi.label2": "value2", + "other": "value3" + }, + +## MAINTAINER (deprecated) + + MAINTAINER + +The `MAINTAINER` instruction sets the *Author* field of the generated images. +The `LABEL` instruction is a much more flexible version of this and you should use +it instead, as it enables setting any metadata you require, and can be viewed +easily, for example with `docker inspect`. To set a label corresponding to the +`MAINTAINER` field you could use: + + LABEL maintainer="SvenDowideit@home.org.au" + +This will then be visible from `docker inspect` with the other labels. + +## EXPOSE + + EXPOSE [...] + +The `EXPOSE` instruction informs Docker that the container listens on the +specified network ports at runtime. `EXPOSE` does not make the ports of the +container accessible to the host. To do that, you must use either the `-p` flag +to publish a range of ports or the `-P` flag to publish all of the exposed +ports. You can expose one port number and publish it externally under another +number. + +To set up port redirection on the host system, see [using the -P +flag](run.md#expose-incoming-ports). The Docker network feature supports +creating networks without the need to expose ports within the network, for +detailed information see the [overview of this +feature](https://docs.docker.com/engine/userguide/networking/)). + +## ENV + + ENV + ENV = ... + +The `ENV` instruction sets the environment variable `` to the value +``. This value will be in the environment of all "descendant" +`Dockerfile` commands and can be [replaced inline](#environment-replacement) in +many as well. + +The `ENV` instruction has two forms. The first form, `ENV `, +will set a single variable to a value. The entire string after the first +space will be treated as the `` - including characters such as +spaces and quotes. + +The second form, `ENV = ...`, allows for multiple variables to +be set at one time. Notice that the second form uses the equals sign (=) +in the syntax, while the first form does not. Like command line parsing, +quotes and backslashes can be used to include spaces within values. + +For example: + + ENV myName="John Doe" myDog=Rex\ The\ Dog \ + myCat=fluffy + +and + + ENV myName John Doe + ENV myDog Rex The Dog + ENV myCat fluffy + +will yield the same net results in the final image, but the first form +is preferred because it produces a single cache layer. + +The environment variables set using `ENV` will persist when a container is run +from the resulting image. You can view the values using `docker inspect`, and +change them using `docker run --env =`. + +> **Note**: +> Environment persistence can cause unexpected side effects. For example, +> setting `ENV DEBIAN_FRONTEND noninteractive` may confuse apt-get +> users on a Debian-based image. To set a value for a single command, use +> `RUN = `. + +## ADD + +ADD has two forms: + +- `ADD ... ` +- `ADD ["",... ""]` (this form is required for paths containing +whitespace) + +The `ADD` instruction copies new files, directories or remote file URLs from `` +and adds them to the filesystem of the image at the path ``. + +Multiple `` resource may be specified but if they are files or +directories then they must be relative to the source directory that is +being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + ADD hom* /mydir/ # adds all files starting with "hom" + ADD hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + ADD test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + ADD test /absoluteDir/ # adds "test" to /absoluteDir/ + +When adding files or directories that contain special characters (such as `[` +and `]`), you need to escape those paths following the Golang rules to prevent +them from being treated as a matching pattern. For example, to add a file +named `arr[0].txt`, use the following; + + ADD arr[[]0].txt /mydir/ # copy a file named "arr[0].txt" to /mydir/ + + +All new files and directories are created with a UID and GID of 0. + +In the case where `` is a remote file URL, the destination will +have permissions of 600. If the remote file being retrieved has an HTTP +`Last-Modified` header, the timestamp from that header will be used +to set the `mtime` on the destination file. However, like any other file +processed during an `ADD`, `mtime` will not be included in the determination +of whether or not the file has changed and the cache should be updated. + +> **Note**: +> If you build by passing a `Dockerfile` through STDIN (`docker +> build - < somefile`), there is no build context, so the `Dockerfile` +> can only contain a URL based `ADD` instruction. You can also pass a +> compressed archive through STDIN: (`docker build - < archive.tar.gz`), +> the `Dockerfile` at the root of the archive and the rest of the +> archive will be used as the context of the build. + +> **Note**: +> If your URL files are protected using authentication, you +> will need to use `RUN wget`, `RUN curl` or use another tool from +> within the container as the `ADD` instruction does not support +> authentication. + +> **Note**: +> The first encountered `ADD` instruction will invalidate the cache for all +> following instructions from the Dockerfile if the contents of `` have +> changed. This includes invalidating the cache for `RUN` instructions. +> See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + + +`ADD` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `ADD ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a URL and `` does not end with a trailing slash, then a + file is downloaded from the URL and copied to ``. + +- If `` is a URL and `` does end with a trailing slash, then the + filename is inferred from the URL and the file is downloaded to + `/`. For instance, `ADD http://example.com/foobar /` would + create the file `/foobar`. The URL must have a nontrivial path so that an + appropriate filename can be discovered in this case (`http://example.com` + will not work). + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is a *local* tar archive in a recognized compression format + (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources + from *remote* URLs are **not** decompressed. When a directory is copied or + unpacked, it has the same behavior as `tar -x`, the result is the union of: + + 1. Whatever existed at the destination path and + 2. The contents of the source tree, with conflicts resolved in favor + of "2." on a file-by-file basis. + + > **Note**: + > Whether a file is identified as a recognized compression format or not + > is done solely based on the contents of the file, not the name of the file. + > For example, if an empty file happens to end with `.tar.gz` this will not + > be recognized as a compressed file and **will not** generate any kind of + > decompression error message, rather the file will simply be copied to the + > destination. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## COPY + +COPY has two forms: + +- `COPY ... ` +- `COPY ["",... ""]` (this form is required for paths containing +whitespace) + +The `COPY` instruction copies new files or directories from `` +and adds them to the filesystem of the container at the path ``. + +Multiple `` resource may be specified but they must be relative +to the source directory that is being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + COPY hom* /mydir/ # adds all files starting with "hom" + COPY hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + COPY test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + COPY test /absoluteDir/ # adds "test" to /absoluteDir/ + + +When copying files or directories that contain special characters (such as `[` +and `]`), you need to escape those paths following the Golang rules to prevent +them from being treated as a matching pattern. For example, to copy a file +named `arr[0].txt`, use the following; + + COPY arr[[]0].txt /mydir/ # copy a file named "arr[0].txt" to /mydir/ + +All new files and directories are created with a UID and GID of 0. + +> **Note**: +> If you build using STDIN (`docker build - < somefile`), there is no +> build context, so `COPY` can't be used. + +Optionally `COPY` accepts a flag `--from=` that can be used to set +the source location to a previous build stage (created with `FROM .. AS `) +that will be used instead of a build context sent by the user. The flag also +accepts a numeric index assigned for all previous build stages started with +`FROM` instruction. In case a build stage with a specified name can't be found an +image with the same name is attempted to be used instead. + +`COPY` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `COPY ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## ENTRYPOINT + +ENTRYPOINT has two forms: + +- `ENTRYPOINT ["executable", "param1", "param2"]` + (*exec* form, preferred) +- `ENTRYPOINT command param1 param2` + (*shell* form) + +An `ENTRYPOINT` allows you to configure a container that will run as an executable. + +For example, the following will start nginx with its default content, listening +on port 80: + + docker run -i -t --rm -p 80:80 nginx + +Command line arguments to `docker run ` will be appended after all +elements in an *exec* form `ENTRYPOINT`, and will override all elements specified +using `CMD`. +This allows arguments to be passed to the entry point, i.e., `docker run -d` +will pass the `-d` argument to the entry point. +You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` +flag. + +The *shell* form prevents any `CMD` or `run` command line arguments from being +used, but has the disadvantage that your `ENTRYPOINT` will be started as a +subcommand of `/bin/sh -c`, which does not pass signals. +This means that the executable will not be the container's `PID 1` - and +will _not_ receive Unix signals - so your executable will not receive a +`SIGTERM` from `docker stop `. + +Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. + +### Exec form ENTRYPOINT example + +You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands +and arguments and then use either form of `CMD` to set additional defaults that +are more likely to be changed. + + FROM ubuntu + ENTRYPOINT ["top", "-b"] + CMD ["-c"] + +When you run the container, you can see that `top` is the only process: + + $ docker run -it --rm --name test top -H + top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 + Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st + KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers + KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top + +To examine the result further, you can use `docker exec`: + + $ docker exec -it test ps aux + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H + root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux + +And you can gracefully request `top` to shut down using `docker stop test`. + +The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the +foreground (i.e., as `PID 1`): + +``` +FROM debian:stable +RUN apt-get update && apt-get install -y --force-yes apache2 +EXPOSE 80 443 +VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] +ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] +``` + +If you need to write a starter script for a single executable, you can ensure that +the final executable receives the Unix signals by using `exec` and `gosu` +commands: + +```bash +#!/usr/bin/env bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +Lastly, if you need to do some extra cleanup (or communicate with other containers) +on shutdown, or are co-ordinating more than one executable, you may need to ensure +that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then +does some more work: + +``` +#!/bin/sh +# Note: I've written this using sh so it works in the busybox container too + +# USE the trap if you need to also do manual cleanup after the service is stopped, +# or need to start multiple services in the one container +trap "echo TRAPed signal" HUP INT QUIT TERM + +# start service in background here +/usr/sbin/apachectl start + +echo "[hit enter key to exit] or run 'docker stop '" +read + +# stop service and clean up here +echo "stopping apache" +/usr/sbin/apachectl stop + +echo "exited $0" +``` + +If you run this image with `docker run -it --rm -p 80:80 --name test apache`, +you can then examine the container's processes with `docker exec`, or `docker top`, +and then ask the script to stop Apache: + +```bash +$ docker exec -it test ps aux +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 +root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start +www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux +$ docker top test +PID USER COMMAND +10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 +10054 root /usr/sbin/apache2 -k start +10055 33 /usr/sbin/apache2 -k start +10056 33 /usr/sbin/apache2 -k start +$ /usr/bin/time docker stop test +test +real 0m 0.27s +user 0m 0.03s +sys 0m 0.03s +``` + +> **Note:** you can override the `ENTRYPOINT` setting using `--entrypoint`, +> but this can only set the binary to *exec* (no `sh -c` will be used). + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +### Shell form ENTRYPOINT example + +You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. +This form will use shell processing to substitute shell environment variables, +and will ignore any `CMD` or `docker run` command line arguments. +To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable +correctly, you need to remember to start it with `exec`: + + FROM ubuntu + ENTRYPOINT exec top -b + +When you run this image, you'll see the single `PID 1` process: + + $ docker run -it --rm --name test top + Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached + CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq + Load average: 0.08 0.03 0.05 2/98 6 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root R 3164 0% 0% top -b + +Which will exit cleanly on `docker stop`: + + $ /usr/bin/time docker stop test + test + real 0m 0.20s + user 0m 0.02s + sys 0m 0.04s + +If you forget to add `exec` to the beginning of your `ENTRYPOINT`: + + FROM ubuntu + ENTRYPOINT top -b + CMD --ignored-param1 + +You can then run it (giving it a name for the next step): + + $ docker run -it --name test top --ignored-param2 + Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached + CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq + Load average: 0.01 0.02 0.05 2/101 7 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 + 7 1 root R 3164 0% 0% top -b + +You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. + +If you then run `docker stop test`, the container will not exit cleanly - the +`stop` command will be forced to send a `SIGKILL` after the timeout: + + $ docker exec -it test ps aux + PID USER COMMAND + 1 root /bin/sh -c top -b cmd cmd2 + 7 root top -b + 8 root ps aux + $ /usr/bin/time docker stop test + test + real 0m 10.19s + user 0m 0.04s + sys 0m 0.03s + +### Understand how CMD and ENTRYPOINT interact + +Both `CMD` and `ENTRYPOINT` instructions define what command gets executed when running a container. +There are few rules that describe their co-operation. + +1. Dockerfile should specify at least one of `CMD` or `ENTRYPOINT` commands. + +2. `ENTRYPOINT` should be defined when using the container as an executable. + +3. `CMD` should be used as a way of defining default arguments for an `ENTRYPOINT` command +or for executing an ad-hoc command in a container. + +4. `CMD` will be overridden when running the container with alternative arguments. + +The table below shows what command is executed for different `ENTRYPOINT` / `CMD` combinations: + +| | No ENTRYPOINT | ENTRYPOINT exec_entry p1_entry | ENTRYPOINT ["exec_entry", "p1_entry"] | +|--------------------------------|----------------------------|--------------------------------|------------------------------------------------| +| **No CMD** | *error, not allowed* | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry | +| **CMD ["exec_cmd", "p1_cmd"]** | exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry exec_cmd p1_cmd | +| **CMD ["p1_cmd", "p2_cmd"]** | p1_cmd p2_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry p1_cmd p2_cmd | +| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd | + +## VOLUME + + VOLUME ["/data"] + +The `VOLUME` instruction creates a mount point with the specified name +and marks it as holding externally mounted volumes from native host or other +containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain +string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log +/var/db`. For more information/examples and mounting instructions via the +Docker client, refer to +[*Share Directories via Volumes*](https://docs.docker.com/engine/tutorials/dockervolumes/#/mount-a-host-directory-as-a-data-volume) +documentation. + +The `docker run` command initializes the newly created volume with any data +that exists at the specified location within the base image. For example, +consider the following Dockerfile snippet: + + FROM ubuntu + RUN mkdir /myvol + RUN echo "hello world" > /myvol/greeting + VOLUME /myvol + +This Dockerfile results in an image that causes `docker run`, to +create a new mount point at `/myvol` and copy the `greeting` file +into the newly created volume. + +> **Note**: +> When using Windows-based containers, the destination of a volume inside the +> container must be one of: a non-existing or empty directory; or a drive other +> than C:. + +> **Note**: +> If any build steps change the data within the volume after it has been +> declared, those changes will be discarded. + +> **Note**: +> The list is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +## USER + + USER daemon + +The `USER` instruction sets the user name or UID to use when running the image +and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the +`Dockerfile`. + +## WORKDIR + + WORKDIR /path/to/workdir + +The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, +`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. +If the `WORKDIR` doesn't exist, it will be created even if it's not used in any +subsequent `Dockerfile` instruction. + +It can be used multiple times in the one `Dockerfile`. If a relative path +is provided, it will be relative to the path of the previous `WORKDIR` +instruction. For example: + + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/a/b/c`. + +The `WORKDIR` instruction can resolve environment variables previously set using +`ENV`. You can only use environment variables explicitly set in the `Dockerfile`. +For example: + + ENV DIRPATH /path + WORKDIR $DIRPATH/$DIRNAME + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/path/$DIRNAME` + +## ARG + + ARG [=] + +The `ARG` instruction defines a variable that users can pass at build-time to +the builder with the `docker build` command using the `--build-arg =` +flag. If a user specifies a build argument that was not +defined in the Dockerfile, the build outputs a warning. + +``` +[Warning] One or more build-args [foo] were not consumed. +``` + +The Dockerfile author can define a single variable by specifying `ARG` once or many +variables by specifying `ARG` more than once. For example, a valid Dockerfile: + +``` +FROM busybox +ARG user1 +ARG buildno +... +``` + +A Dockerfile author may optionally specify a default value for an `ARG` instruction: + +``` +FROM busybox +ARG user1=someuser +ARG buildno=1 +... +``` + +If an `ARG` value has a default and if there is no value passed at build-time, the +builder uses the default. + +An `ARG` variable definition comes into effect from the line on which it is +defined in the `Dockerfile` not from the argument's use on the command-line or +elsewhere. For example, consider this Dockerfile: + +``` +1 FROM busybox +2 USER ${user:-some_user} +3 ARG user +4 USER $user +... +``` +A user builds this file by calling: + +``` +$ docker build --build-arg user=what_user . +``` + +The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the +subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is +defined and the `what_user` value was passed on the command line. Prior to its definition by an +`ARG` instruction, any use of a variable results in an empty string. + +> **Warning:** It is not recommended to use build-time variables for +> passing secrets like github keys, user credentials etc. Build-time variable +> values are visible to any user of the image with the `docker history` command. + +You can use an `ARG` or an `ENV` instruction to specify variables that are +available to the `RUN` instruction. Environment variables defined using the +`ENV` instruction always override an `ARG` instruction of the same name. Consider +this Dockerfile with an `ENV` and `ARG` instruction. + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER v1.0.0 +4 RUN echo $CONT_IMG_VER +``` +Then, assume this image is built with this command: + +``` +$ docker build --build-arg CONT_IMG_VER=v2.0.1 . +``` + +In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting +passed by the user:`v2.0.1` This behavior is similar to a shell +script where a locally scoped variable overrides the variables passed as +arguments or inherited from environment, from its point of definition. + +Using the example above but a different `ENV` specification you can create more +useful interactions between `ARG` and `ENV` instructions: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} +4 RUN echo $CONT_IMG_VER +``` + +Unlike an `ARG` instruction, `ENV` values are always persisted in the built +image. Consider a docker build without the `--build-arg` flag: + +``` +$ docker build . +``` + +Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but +its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. + +The variable expansion technique in this example allows you to pass arguments +from the command line and persist them in the final image by leveraging the +`ENV` instruction. Variable expansion is only supported for [a limited set of +Dockerfile instructions.](#environment-replacement) + +Docker has a set of predefined `ARG` variables that you can use without a +corresponding `ARG` instruction in the Dockerfile. + +* `HTTP_PROXY` +* `http_proxy` +* `HTTPS_PROXY` +* `https_proxy` +* `FTP_PROXY` +* `ftp_proxy` +* `NO_PROXY` +* `no_proxy` + +To use these, simply pass them on the command line using the flag: + +``` +--build-arg = +``` + +By default, these pre-defined variables are excluded from the output of +`docker history`. Excluding them reduces the risk of accidentally leaking +sensitive authentication information in an `HTTP_PROXY` variable. + +For example, consider building the following Dockerfile using +`--build-arg HTTP_PROXY=http://user:pass@proxy.lon.example.com` + +``` Dockerfile +FROM ubuntu +RUN echo "Hello World" +``` + +In this case, the value of the `HTTP_PROXY` variable is not available in the +`docker history` and is not cached. If you were to change location, and your +proxy server changed to `http://user:pass@proxy.sfo.example.com`, a subsequent +build does not result in a cache miss. + +If you need to override this behaviour then you may do so by adding an `ARG` +statement in the Dockerfile as follows: + +``` Dockerfile +FROM ubuntu +ARG HTTP_PROXY +RUN echo "Hello World" +``` + +When building this Dockerfile, the `HTTP_PROXY` is preserved in the +`docker history`, and changing its value invalidates the build cache. + +### Impact on build caching + +`ARG` variables are not persisted into the built image as `ENV` variables are. +However, `ARG` variables do impact the build cache in similar ways. If a +Dockerfile defines an `ARG` variable whose value is different from a previous +build, then a "cache miss" occurs upon its first usage, not its definition. In +particular, all `RUN` instructions following an `ARG` instruction use the `ARG` +variable implicitly (as an environment variable), thus can cause a cache miss. +All predefined `ARG` variables are exempt from caching unless there is a +matching `ARG` statement in the `Dockerfile`. + +For example, consider these two Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo $CONT_IMG_VER +``` + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo hello +``` + +If you specify `--build-arg CONT_IMG_VER=` on the command line, in both +cases, the specification on line 2 does not cause a cache miss; line 3 does +cause a cache miss.`ARG CONT_IMG_VER` causes the RUN line to be identified +as the same as running `CONT_IMG_VER=` echo hello, so if the `` +changes, we get a cache miss. + +Consider another example under the same command line: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER $CONT_IMG_VER +4 RUN echo $CONT_IMG_VER +``` +In this example, the cache miss occurs on line 3. The miss happens because +the variable's value in the `ENV` references the `ARG` variable and that +variable is changed through the command line. In this example, the `ENV` +command causes the image to include the value. + +If an `ENV` instruction overrides an `ARG` instruction of the same name, like +this Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER hello +4 RUN echo $CONT_IMG_VER +``` + +Line 3 does not cause a cache miss because the value of `CONT_IMG_VER` is a +constant (`hello`). As a result, the environment variables and values used on +the `RUN` (line 4) doesn't change between builds. + +## ONBUILD + + ONBUILD [INSTRUCTION] + +The `ONBUILD` instruction adds to the image a *trigger* instruction to +be executed at a later time, when the image is used as the base for +another build. The trigger will be executed in the context of the +downstream build, as if it had been inserted immediately after the +`FROM` instruction in the downstream `Dockerfile`. + +Any build instruction can be registered as a trigger. + +This is useful if you are building an image which will be used as a base +to build other images, for example an application build environment or a +daemon which may be customized with user-specific configuration. + +For example, if your image is a reusable Python application builder, it +will require application source code to be added in a particular +directory, and it might require a build script to be called *after* +that. You can't just call `ADD` and `RUN` now, because you don't yet +have access to the application source code, and it will be different for +each application build. You could simply provide application developers +with a boilerplate `Dockerfile` to copy-paste into their application, but +that is inefficient, error-prone and difficult to update because it +mixes with application-specific code. + +The solution is to use `ONBUILD` to register advance instructions to +run later, during the next build stage. + +Here's how it works: + +1. When it encounters an `ONBUILD` instruction, the builder adds a + trigger to the metadata of the image being built. The instruction + does not otherwise affect the current build. +2. At the end of the build, a list of all triggers is stored in the + image manifest, under the key `OnBuild`. They can be inspected with + the `docker inspect` command. +3. Later the image may be used as a base for a new build, using the + `FROM` instruction. As part of processing the `FROM` instruction, + the downstream builder looks for `ONBUILD` triggers, and executes + them in the same order they were registered. If any of the triggers + fail, the `FROM` instruction is aborted which in turn causes the + build to fail. If all triggers succeed, the `FROM` instruction + completes and the build continues as usual. +4. Triggers are cleared from the final image after being executed. In + other words they are not inherited by "grand-children" builds. + +For example you might add something like this: + + [...] + ONBUILD ADD . /app/src + ONBUILD RUN /usr/local/bin/python-build --dir /app/src + [...] + +> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. + +> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. + +## STOPSIGNAL + + STOPSIGNAL signal + +The `STOPSIGNAL` instruction sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +## HEALTHCHECK + +The `HEALTHCHECK` instruction has two forms: + +* `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) +* `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) + +The `HEALTHCHECK` instruction tells Docker how to test a container to check that +it is still working. This can detect cases such as a web server that is stuck in +an infinite loop and unable to handle new connections, even though the server +process is still running. + +When a container has a healthcheck specified, it has a _health status_ in +addition to its normal status. This status is initially `starting`. Whenever a +health check passes, it becomes `healthy` (whatever state it was previously in). +After a certain number of consecutive failures, it becomes `unhealthy`. + +The options that can appear before `CMD` are: + +* `--interval=DURATION` (default: `30s`) +* `--timeout=DURATION` (default: `30s`) +* `--start-period=DURATION` (default: `0s`) +* `--retries=N` (default: `3`) + +The health check will first run **interval** seconds after the container is +started, and then again **interval** seconds after each previous check completes. + +If a single run of the check takes longer than **timeout** seconds then the check +is considered to have failed. + +It takes **retries** consecutive failures of the health check for the container +to be considered `unhealthy`. + +**start period** provides initialization time for containers that need time to bootstrap. +Probe failure during that period will not be counted towards the maximum number of retries. +However, if a health check succeeds during the start period, the container is considered +started and all consecutive failures will be counted towards the maximum number of retries. + +There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list +more than one then only the last `HEALTHCHECK` will take effect. + +The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK +CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; +see e.g. `ENTRYPOINT` for details). + +The command's exit status indicates the health status of the container. +The possible values are: + +- 0: success - the container is healthy and ready for use +- 1: unhealthy - the container is not working correctly +- 2: reserved - do not use this exit code + +For example, to check every five minutes or so that a web-server is able to +serve the site's main page within three seconds: + + HEALTHCHECK --interval=5m --timeout=3s \ + CMD curl -f http://localhost/ || exit 1 + +To help debug failing probes, any output text (UTF-8 encoded) that the command writes +on stdout or stderr will be stored in the health status and can be queried with +`docker inspect`. Such output should be kept short (only the first 4096 bytes +are stored currently). + +When the health status of a container changes, a `health_status` event is +generated with the new status. + +The `HEALTHCHECK` feature was added in Docker 1.12. + + +## SHELL + + SHELL ["executable", "parameters"] + +The `SHELL` instruction allows the default shell used for the *shell* form of +commands to be overridden. The default shell on Linux is `["/bin/sh", "-c"]`, and on +Windows is `["cmd", "/S", "/C"]`. The `SHELL` instruction *must* be written in JSON +form in a Dockerfile. + +The `SHELL` instruction is particularly useful on Windows where there are +two commonly used and quite different native shells: `cmd` and `powershell`, as +well as alternate shells available including `sh`. + +The `SHELL` instruction can appear multiple times. Each `SHELL` instruction overrides +all previous `SHELL` instructions, and affects all subsequent instructions. For example: + + FROM microsoft/windowsservercore + + # Executed as cmd /S /C echo default + RUN echo default + + # Executed as cmd /S /C powershell -command Write-Host default + RUN powershell -command Write-Host default + + # Executed as powershell -command Write-Host hello + SHELL ["powershell", "-command"] + RUN Write-Host hello + + # Executed as cmd /S /C echo hello + SHELL ["cmd", "/S"", "/C"] + RUN echo hello + +The following instructions can be affected by the `SHELL` instruction when the +*shell* form of them is used in a Dockerfile: `RUN`, `CMD` and `ENTRYPOINT`. + +The following example is a common pattern found on Windows which can be +streamlined by using the `SHELL` instruction: + + ... + RUN powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + ... + +The command invoked by docker will be: + + cmd /S /C powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + +This is inefficient for two reasons. First, there is an un-necessary cmd.exe command +processor (aka shell) being invoked. Second, each `RUN` instruction in the *shell* +form requires an extra `powershell -command` prefixing the command. + +To make this more efficient, one of two mechanisms can be employed. One is to +use the JSON form of the RUN command such as: + + ... + RUN ["powershell", "-command", "Execute-MyCmdlet", "-param1 \"c:\\foo.txt\""] + ... + +While the JSON form is unambiguous and does not use the un-necessary cmd.exe, +it does require more verbosity through double-quoting and escaping. The alternate +mechanism is to use the `SHELL` instruction and the *shell* form, +making a more natural syntax for Windows users, especially when combined with +the `escape` parser directive: + + # escape=` + + FROM microsoft/nanoserver + SHELL ["powershell","-command"] + RUN New-Item -ItemType Directory C:\Example + ADD Execute-MyCmdlet.ps1 c:\example\ + RUN c:\example\Execute-MyCmdlet -sample 'hello world' + +Resulting in: + + PS E:\docker\build\shell> docker build -t shell . + Sending build context to Docker daemon 4.096 kB + Step 1/5 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/5 : SHELL powershell -command + ---> Running in 6fcdb6855ae2 + ---> 6331462d4300 + Removing intermediate container 6fcdb6855ae2 + Step 3/5 : RUN New-Item -ItemType Directory C:\Example + ---> Running in d0eef8386e97 + + + Directory: C:\ + + + Mode LastWriteTime Length Name + ---- ------------- ------ ---- + d----- 10/28/2016 11:26 AM Example + + + ---> 3f2fbf1395d9 + Removing intermediate container d0eef8386e97 + Step 4/5 : ADD Execute-MyCmdlet.ps1 c:\example\ + ---> a955b2621c31 + Removing intermediate container b825593d39fc + Step 5/5 : RUN c:\example\Execute-MyCmdlet 'hello world' + ---> Running in be6d8e63fe75 + hello world + ---> 8e559e9bf424 + Removing intermediate container be6d8e63fe75 + Successfully built 8e559e9bf424 + PS E:\docker\build\shell> + +The `SHELL` instruction could also be used to modify the way in which +a shell operates. For example, using `SHELL cmd /S /C /V:ON|OFF` on Windows, delayed +environment variable expansion semantics could be modified. + +The `SHELL` instruction can also be used on Linux should an alternate shell be +required such as `zsh`, `csh`, `tcsh` and others. + +The `SHELL` feature was added in Docker 1.12. + +## Dockerfile examples + +Below you can see some examples of Dockerfile syntax. If you're interested in +something more realistic, take a look at the list of [Dockerization examples](https://docs.docker.com/engine/examples/). + +``` +# Nginx +# +# VERSION 0.0.1 + +FROM ubuntu +LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" +RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server +``` + +``` +# Firefox over VNC +# +# VERSION 0.3 + +FROM ubuntu + +# Install vnc, xvfb in order to create a 'fake' display and firefox +RUN apt-get update && apt-get install -y x11vnc xvfb firefox +RUN mkdir ~/.vnc +# Setup a password +RUN x11vnc -storepasswd 1234 ~/.vnc/passwd +# Autostart firefox (might not be the best way, but it does the trick) +RUN bash -c 'echo "firefox" >> /.bashrc' + +EXPOSE 5900 +CMD ["x11vnc", "-forever", "-usepw", "-create"] +``` + +``` +# Multiple images example +# +# VERSION 0.1 + +FROM ubuntu +RUN echo foo > bar +# Will output something like ===> 907ad6c2736f + +FROM ubuntu +RUN echo moo > oink +# Will output something like ===> 695d7793cbe4 + +# You'll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with +# /oink. +``` diff --git a/docs/reference/commandline/attach.md b/docs/reference/commandline/attach.md new file mode 100644 index 0000000000..4153331bad --- /dev/null +++ b/docs/reference/commandline/attach.md @@ -0,0 +1,160 @@ +--- +title: "attach" +description: "The attach command description and usage" +keywords: "attach, running, container" +--- + + + +# attach + +```markdown +Usage: docker attach [OPTIONS] CONTAINER + +Attach local standard input, output, and error streams to a running container + +Options: + --detach-keys string Override the key sequence for detaching a container + --help Print usage + --no-stdin Do not attach STDIN + --sig-proxy Proxy all received signals to the process (default true) +``` + +## Description + +Use `docker attach` to attach your terminal's standard input, output, and error +(or any combination of the three) to a running container using the container's +ID or name. This allows you to view its ongoing output or to control it +interactively, as though the commands were running directly in your terminal. + +> **Note:** +> The `attach` command will display the output of the `ENTRYPOINT/CMD` process. This +> can appear as if the attach command is hung when in fact the process may simply +> not be interacting with the terminal at that time. + +You can attach to the same contained process multiple times simultaneously, +even as a different user with the appropriate permissions. + +To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the +container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to +the container. You can detach from a container and leave it running using the + `CTRL-p CTRL-q` key sequence. + +> **Note:** +> A process running as PID 1 inside a container is treated specially by +> Linux: it ignores any signal with the default action. So, the process +> will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do +> so. + +It is forbidden to redirect the standard input of a `docker attach` command +while attaching to a tty-enabled container (i.e.: launched with `-t`). + +While a client is connected to container's stdio using `docker attach`, Docker +uses a ~1MB memory buffer to maximize the throughput of the application. If +this buffer is filled, the speed of the API connection will start to have an +effect on the process output writing speed. This is similar to other +applications like SSH. Because of this, it is not recommended to run +performance critical applications that generate a lot of output in the +foreground over a slow client connection. Instead, users should use the +`docker logs` command to get access to the logs. + +### Override the detach sequence + +If you want, you can configure an override the Docker key sequence for detach. +This is useful if the Docker default sequence conflicts with key sequence you +use for other applications. There are two ways to define your own detach key +sequence, as a per-container override or as a configuration property on your +entire configuration. + +To override the sequence for an individual container, use the +`--detach-keys=""` flag with the `docker attach` command. The format of +the `` is either a letter [a-Z], or the `ctrl-` combined with any of +the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key +sequences. To configure a different configuration default key sequence for all +containers, see [**Configuration file** section](cli.md#configuration-files). + +## Examples + +### Attach to and detach from a running container + +```bash +$ docker run -d --name topdemo ubuntu /usr/bin/top -b + +$ docker attach topdemo + +top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 +Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie +Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st +Mem: 373572k total, 355560k used, 18012k free, 27872k buffers +Swap: 786428k total, 0k used, 786428k free, 221740k cached + +PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + + + top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355780k used, 17792k free, 27880k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top +^C$ + +$ echo $? +0 +$ docker ps -a | grep topdemo + +7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo +``` + +### Get the exit code of the container's command + +And in this second example, you can see the exit code returned by the `bash` +process is returned by the `docker attach` command to its caller too: + +```bash + $ docker run --name test -d -it debian + + 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab + + $ docker attach test + + root@f38c87f2a42d:/# exit 13 + + exit + + $ echo $? + + 13 + + $ docker ps -a | grep test + + 275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test +``` diff --git a/docs/reference/commandline/build.md b/docs/reference/commandline/build.md new file mode 100644 index 0000000000..9f587372c6 --- /dev/null +++ b/docs/reference/commandline/build.md @@ -0,0 +1,581 @@ +--- +title: "build" +description: "The build command description and usage" +keywords: "build, docker, image" +--- + + + +# build + +```markdown +Usage: docker build [OPTIONS] PATH | URL | - + +Build an image from a Dockerfile + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + --build-arg value Set build-time variables (default []) + --cache-from value Images to consider as cache sources (default []) + --cgroup-parent string Optional parent cgroup for the container + --compress Compress the build context using gzip + --cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --disable-content-trust Skip image verification (default true) + -f, --file string Name of the Dockerfile (Default is 'PATH/Dockerfile') + --force-rm Always remove intermediate containers + --help Print usage + --iidfile string Write the image ID to the file + --isolation string Container isolation technology + --label value Set metadata for an image (default []) + -m, --memory string Memory limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --network string Set the networking mode for the RUN instructions during build + 'bridge': use default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-cache Do not use cache when building the image + --pull Always attempt to pull a newer version of the image + -q, --quiet Suppress the build output and print image ID on success + --rm Remove intermediate containers after a successful build (default true) + --security-opt value Security Options (default []) + --shm-size bytes Size of /dev/shm + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --squash Squash newly built layers into a single new layer (**Experimental Only**) + -t, --tag value Name and optionally a tag in the 'name:tag' format (default []) + --target string Set the target build stage to build. + --ulimit value Ulimit options (default []) +``` + +## Description + +Builds Docker images from a Dockerfile and a "context". A build's context is +the files located in the specified `PATH` or `URL`. The build process can refer +to any of the files in the context. For example, your build can use an +[*ADD*](../builder.md#add) instruction to reference a file in the +context. + +The `URL` parameter can refer to three kinds of resources: Git repositories, +pre-packaged tarball contexts and plain text files. + +### Git repositories + +When the `URL` parameter points to the location of a Git repository, the +repository acts as the build context. The system recursively fetches the +repository and its submodules. The commit history is not preserved. A +repository is first pulled into a temporary directory on your local host. After +the that succeeds, the directory is sent to the Docker daemon as the context. +Local copy gives you the ability to access private repositories using local +user credentials, VPN's, and so forth. + +> **Note:** +> If the `URL` parameter contains a fragment the system will recursively clone +> the repository and its submodules using a `git clone --recursive` command. + +Git URLs accept context configuration in their fragment section, separated by a +colon `:`. The first part represents the reference that Git will check out, +this can be either a branch, a tag, or a remote reference. The second part +represents a subdirectory inside the repository that will be used as a build +context. + +For example, run this command to use a directory called `docker` in the branch +`container`: + +```bash +$ docker build https://github.com/docker/rootfs.git#container:docker +``` + +The following table represents all the valid suffixes with their build +contexts: + +Build Syntax Suffix | Commit Used | Build Context Used +--------------------------------|-----------------------|------------------- +`myrepo.git` | `refs/heads/master` | `/` +`myrepo.git#mytag` | `refs/tags/mytag` | `/` +`myrepo.git#mybranch` | `refs/heads/mybranch` | `/` +`myrepo.git#pull/42/head` | `refs/pull/42/head` | `/` +`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` +`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` + + +### Tarball contexts + +If you pass an URL to a remote tarball, the URL itself is sent to the daemon: + +```bash +$ docker build http://server/context.tar.gz +``` + +The download operation will be performed on the host the Docker daemon is +running on, which is not necessarily the same host from which the build command +is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the +build context. Tarball contexts must be tar archives conforming to the standard +`tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2', +'gzip' or 'identity' (no compression) formats. + +### Text files + +Instead of specifying a context, you can pass a single `Dockerfile` in the +`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`: + +```bash +$ docker build - < Dockerfile +``` + +With Powershell on Windows, you can run: + +```powershell +Get-Content Dockerfile | docker build - +``` + +If you use `STDIN` or specify a `URL` pointing to a plain text file, the system +places the contents into a file called `Dockerfile`, and any `-f`, `--file` +option is ignored. In this scenario, there is no context. + +By default the `docker build` command will look for a `Dockerfile` at the root +of the build context. The `-f`, `--file`, option lets you specify the path to +an alternative file to use instead. This is useful in cases where the same set +of files are used for multiple builds. The path must be to a file within the +build context. If a relative path is specified then it is interpreted as +relative to the root of the context. + +In most cases, it's best to put each Dockerfile in an empty directory. Then, +add to that directory only the files needed for building the Dockerfile. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to that directory as well. For information on +creating one, see the [.dockerignore file](../builder.md#dockerignore-file). + +If the Docker client loses connection to the daemon, the build is canceled. +This happens if you interrupt the Docker client with `CTRL-c` or if the Docker +client is killed for any reason. If the build initiated a pull which is still +running at the time the build is cancelled, the pull is cancelled as well. + +## Return code + +On a successful build, a return code of success `0` will be returned. When the +build fails, a non-zero failure code will be returned. + +There should be informational output of the reason for failure output to +`STDERR`: + +```bash +$ docker build -t fail . + +Sending build context to Docker daemon 2.048 kB +Sending build context to Docker daemon +Step 1/3 : FROM busybox + ---> 4986bf8c1536 +Step 2/3 : RUN exit 13 + ---> Running in e26670ec7a0a +INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 +$ echo $? +1 +``` + +See also: + +[*Dockerfile Reference*](../builder.md). + +## Examples + +### Build with PATH + +```bash +$ docker build . + +Uploading context 10240 bytes +Step 1/3 : FROM busybox +Pulling repository busybox + ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ +Step 2/3 : RUN ls -lh / + ---> Running in 9c9e81692ae9 +total 24 +drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin +drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev +drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib +lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib +dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc +lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin +dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys +drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr + ---> b35f4035db3f +Step 3/3 : CMD echo Hello world + ---> Running in 02071fceb21b + ---> f52f38b7823e +Successfully built f52f38b7823e +Removing intermediate container 9c9e81692ae9 +Removing intermediate container 02071fceb21b +``` + +This example specifies that the `PATH` is `.`, and so all the files in the +local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies +where to find the files for the "context" of the build on the Docker daemon. +Remember that the daemon could be running on a remote machine and that no +parsing of the Dockerfile happens at the client side (where you're running +`docker build`). That means that *all* the files at `PATH` get sent, not just +the ones listed to [*ADD*](../builder.md#add) in the Dockerfile. + +The transfer of context from the local machine to the Docker daemon is what the +`docker` client means when you see the "Sending build context" message. + +If you wish to keep the intermediate containers after the build is complete, +you must use `--rm=false`. This does not affect the build cache. + +### Build with URL + +```bash +$ docker build github.com/creack/docker-firefox +``` + +This will clone the GitHub repository and use the cloned repository as context. +The Dockerfile at the root of the repository is used as Dockerfile. You can +specify an arbitrary Git repository by using the `git://` or `git@` scheme. + +```bash +$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz + +Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B +Step 1/3 : FROM busybox + ---> 8c2e06607696 +Step 2/3 : ADD ctx/container.cfg / + ---> e7829950cee3 +Removing intermediate container b35224abf821 +Step 3/3 : CMD /bin/ls + ---> Running in fbc63d321d73 + ---> 3286931702ad +Removing intermediate container fbc63d321d73 +Successfully built 377c409b35e4 +``` + +This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which +downloads and extracts the referenced tarball. The `-f ctx/Dockerfile` +parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that is used +to build the image. Any `ADD` commands in that `Dockerfile` that refers to local +paths must be relative to the root of the contents inside `ctx.tar.gz`. In the +example above, the tarball contains a directory `ctx/`, so the `ADD +ctx/container.cfg /` operation works as expected. + +### Build with - + +```bash +$ docker build - < Dockerfile +``` + +This will read a Dockerfile from `STDIN` without context. Due to the lack of a +context, no contents of any local directory will be sent to the Docker daemon. +Since there is no context, a Dockerfile `ADD` only works if it refers to a +remote URL. + +```bash +$ docker build - < context.tar.gz +``` + +This will build an image for a compressed context read from `STDIN`. Supported +formats are: bzip2, gzip and xz. + +### Use a .dockerignore file + +```bash +$ docker build . + +Uploading context 18.829 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +$ echo ".git" > .dockerignore +$ docker build . +Uploading context 6.76 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +``` + +This example shows the use of the `.dockerignore` file to exclude the `.git` +directory from the context. Its effect can be seen in the changed size of the +uploaded context. The builder reference contains detailed information on +[creating a .dockerignore file](../builder.md#dockerignore-file) + +### Tag an image (-t) + +```bash +$ docker build -t vieux/apache:2.0 . +``` + +This will build like the previous example, but it will then tag the resulting +image. The repository name will be `vieux/apache` and the tag will be `2.0`. +[Read more about valid tags](tag.md). + +You can apply multiple tags to an image. For example, you can apply the `latest` +tag to a newly built image and add another tag that references a specific +version. +For example, to tag an image both as `whenry/fedora-jboss:latest` and +`whenry/fedora-jboss:v2.1`, use the following: + +```bash +$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . +``` + +### Specify a Dockerfile (-f) + +```bash +$ docker build -f Dockerfile.debug . +``` + +This will use a file called `Dockerfile.debug` for the build instructions +instead of `Dockerfile`. + +```bash +$ curl example.com/remote/Dockerfile | docker build -f - . +``` + +The above command will use the current directory as the build context and read +a Dockerfile from stdin. + +```bash +$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . +$ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . +``` + +The above commands will build the current build context (as specified by the +`.`) twice, once using a debug version of a `Dockerfile` and once using a +production version. + +```bash +$ cd /home/me/myapp/some/dir/really/deep +$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp +$ docker build -f ../../../../dockerfiles/debug /home/me/myapp +``` + +These two `docker build` commands do the exact same thing. They both use the +contents of the `debug` file instead of looking for a `Dockerfile` and will use +`/home/me/myapp` as the root of the build context. Note that `debug` is in the +directory structure of the build context, regardless of how you refer to it on +the command line. + +> **Note:** +> `docker build` will return a `no such file or directory` error if the +> file or directory does not exist in the uploaded context. This may +> happen if there is no context, or if you specify a file that is +> elsewhere on the Host system. The context is limited to the current +> directory (and its children) for security reasons, and to ensure +> repeatable builds on remote Docker hosts. This is also the reason why +> `ADD ../file` will not work. + +### Use a custom parent cgroup (--cgroup-parent) + +When `docker build` is run with the `--cgroup-parent` option the containers +used in the build will be run with the [corresponding `docker run` +flag](../run.md#specifying-custom-cgroups). + +### Set ulimits in container (--ulimit) + +Using the `--ulimit` option with `docker build` will cause each build step's +container to be started using those [`--ulimit` +flag values](./run.md#set-ulimits-in-container-ulimit). + +### Set build-time variables (--build-arg) + +You can use `ENV` instructions in a Dockerfile to define variable +values. These values persist in the built image. However, often +persistence is not what you want. Users want to specify variables differently +depending on which host they build an image on. + +A good example is `http_proxy` or source versions for pulling intermediate +files. The `ARG` instruction lets Dockerfile authors define values that users +can set at build-time using the `--build-arg` flag: + +```bash +$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 . +``` + +This flag allows you to pass the build-time variables that are +accessed like regular environment variables in the `RUN` instruction of the +Dockerfile. Also, these values don't persist in the intermediate or final images +like `ENV` values do. + +Using this flag will not alter the output you see when the `ARG` lines from the +Dockerfile are echoed during the build process. + +For detailed information on using `ARG` and `ENV` instructions, see the +[Dockerfile reference](../builder.md). + +### Optional security options (--security-opt) + +This flag is only supported on a daemon running on Windows, and only supports +the `credentialspec` option. The `credentialspec` must be in the format +`file://spec.txt` or `registry://keyname`. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +### Add entries to container hosts file (--add-host) + +You can add other hosts into a container's `/etc/hosts` file by using one or +more `--add-host` flags. This example adds a static address for a host named +`docker`: + + $ docker build --add-host=docker:10.180.0.1 . + +### Specifying target build stage (--target) + +When building a Dockerfile with multiple build stages, `--target` can be used to +specify an intermediate build stage by name as a final stage for the resulting +image. Commands after the target stage will be skipped. + +```Dockerfile +FROM debian AS build-env +... + +FROM alpine AS production-env +... +``` + +```bash +$ docker build -t mybuildimage --target build-env . +``` + +### Squash an image's layers (--squash) **Experimental Only** + +#### Overview + +Once the image is built, squash the new layers into a new image with a single +new layer. Squashing does not destroy any existing image, rather it creates a new +image with the content of the squashed layers. This effectively makes it look +like all `Dockerfile` commands were created with a single layer. The build +cache is preserved with this method. + +**Note**: using this option means the new image will not be able to take +advantage of layer sharing with other images and may use significantly more +space. + +**Note**: using this option you may see significantly more space used due to +storing two copies of the image, one for the build cache with all the cache +layers in tact, and one for the squashed version. + +#### Prerequisites + +The example on this page is using experimental mode in Docker 1.13. + +Experimental mode can be enabled by using the `--experimental` flag when starting the Docker daemon or setting `experimental: true` in the `daemon.json` configuration file. + +By default, experimental mode is disabled. To see the current configuration, use the `docker version` command. + +```none + +Server: + Version: 1.13.1 + API version: 1.26 (minimum version 1.12) + Go version: go1.7.5 + Git commit: 092cba3 + Built: Wed Feb 8 06:35:24 2017 + OS/Arch: linux/amd64 + Experimental: false + + [...] + +``` + +To enable experimental mode, users need to restart the docker daemon with the experimental flag enabled. + +#### Enable Docker experimental + +Experimental features are now included in the standard Docker binaries as of version 1.13.0. For enabling experimental features, you need to start the Docker daemon with `--experimental` flag. You can also enable the daemon flag via /etc/docker/daemon.json. e.g. + +``` + +{ + "experimental": true +} + +``` +Then make sure the experimental flag is enabled: + +```bash + +$ docker version -f '{{.Server.Experimental}}' +true + +``` + +#### Build an image with `--squash` argument + +The following is an example of docker build with `--squash` argument + +```Dockerfile + +FROM busybox +RUN echo hello > /hello +RUN echo world >> /hello +RUN touch remove_me /remove_me +ENV HELLO world +RUN rm /remove_me + +``` +An image named `test` is built with `--squash` argument. + +```bash + +$ docker build --squash -t test . + +[...] + +``` + +If everything is right, the history will look like this: + +```bash +$ docker history test + +IMAGE CREATED CREATED BY SIZE COMMENT +4e10cb5b4cac 3 seconds ago 12 B merge sha256:88a7b0112a41826885df0e7072698006ee8f621c6ab99fca7fe9151d7b599702 to sha256:47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb + 5 minutes ago /bin/sh -c rm /remove_me 0 B + 5 minutes ago /bin/sh -c #(nop) ENV HELLO=world 0 B + 5 minutes ago /bin/sh -c touch remove_me /remove_me 0 B + 5 minutes ago /bin/sh -c echo world >> /hello 0 B + 6 minutes ago /bin/sh -c echo hello > /hello 0 B + 7 weeks ago /bin/sh -c #(nop) CMD ["sh"] 0 B + 7 weeks ago /bin/sh -c #(nop) ADD file:47ca6e777c36a4cfff 1.113 MB + +``` +We could find that all layer's name is ``, and there is a new layer with COMMENT `merge`. + +Test the image, check for `/remove_me` being gone, make sure `hello\nworld` is in `/hello`, make sure the `HELLO` envvar's value is `world`. diff --git a/docs/reference/commandline/cli.md b/docs/reference/commandline/cli.md new file mode 100644 index 0000000000..4d1cd2a638 --- /dev/null +++ b/docs/reference/commandline/cli.md @@ -0,0 +1,317 @@ +--- +title: "Use the Docker command line" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +--- + + + +# docker + +To list available commands, either run `docker` with no parameters +or execute `docker help`: + +```bash +$ docker +Usage: docker [OPTIONS] COMMAND [ARG...] + docker [ --help | -v | --version ] + +A self-sufficient runtime for containers. + +Options: + --config string Location of client config files (default "/root/.docker") + -D, --debug Enable debug mode + --help Print usage + -H, --host value Daemon socket(s) to connect to (default []) + -l, --log-level string Set the logging level ("debug"|"info"|"warn"|"error"|"fatal") (default "info") + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") + --tlskey string Path to TLS key file (default "/root/.docker/key.pem") + --tlsverify Use TLS and verify the remote + -v, --version Print version information and quit + +Commands: + attach Attach to a running container + # […] +``` + +## Description + +Depending on your Docker system configuration, you may be required to preface +each `docker` command with `sudo`. To avoid having to use `sudo` with the +`docker` command, your system administrator can create a Unix group called +`docker` and add users to it. + +For more information about installing Docker or `sudo` configuration, refer to +the [installation](https://docs.docker.com/engine/installation/) instructions for your operating system. + +### Environment variables + +For easy reference, the following list of environment variables are supported +by the `docker` command line: + +* `DOCKER_API_VERSION` The API version to use (e.g. `1.19`) +* `DOCKER_CONFIG` The location of your client configuration files. +* `DOCKER_CERT_PATH` The location of your authentication keys. +* `DOCKER_DRIVER` The graph driver to use. +* `DOCKER_HOST` Daemon socket to connect to. +* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is + unsuitable for Docker. +* `DOCKER_RAMDISK` If set this will disable 'pivot_root'. +* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote. +* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images. + Equates to `--disable-content-trust=false` for build, create, pull, push, run. +* `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults + to the same URL as the registry. +* `DOCKER_HIDE_LEGACY_COMMANDS` When set, Docker hides "legacy" top-level commands (such as `docker rm`, and + `docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are + printed. This may become the default in a future release, at which point this environment-variable is removed. +* `DOCKER_TMPDIR` Location for temporary Docker files. + +Because Docker is developed using Go, you can also use any environment +variables used by the Go runtime. In particular, you may find these useful: + +* `HTTP_PROXY` +* `HTTPS_PROXY` +* `NO_PROXY` + +These Go environment variables are case-insensitive. See the +[Go specification](http://golang.org/pkg/net/http/) for details on these +variables. + +### Configuration files + +By default, the Docker command line stores its configuration files in a +directory called `.docker` within your `$HOME` directory. However, you can +specify a different location via the `DOCKER_CONFIG` environment variable +or the `--config` command line option. If both are specified, then the +`--config` option overrides the `DOCKER_CONFIG` environment variable. +For example: + + docker --config ~/testconfigs/ ps + +Instructs Docker to use the configuration files in your `~/testconfigs/` +directory when running the `ps` command. + +Docker manages most of the files in the configuration directory +and you should not modify them. However, you *can modify* the +`config.json` file to control certain aspects of how the `docker` +command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of several properties: + +The property `HttpHeaders` specifies a set of headers to include in all messages +sent from the Docker client to the daemon. Docker does not try to interpret or +understand these header; it simply puts them into the messages. Docker does +not allow these headers to change any headers it sets for itself. + +The property `psFormat` specifies the default format for `docker ps` output. +When the `--format` flag is not provided with the `docker ps` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the +[**Formatting** section in the `docker ps` documentation](ps.md) + +The property `imagesFormat` specifies the default format for `docker images` output. +When the `--format` flag is not provided with the `docker images` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker images` documentation](images.md) + +The property `pluginsFormat` specifies the default format for `docker plugin ls` output. +When the `--format` flag is not provided with the `docker plugin ls` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker plugin ls` documentation](plugin_ls.md) + +The property `servicesFormat` specifies the default format for `docker +service ls` output. When the `--format` flag is not provided with the +`docker service ls` command, Docker's client uses this property. If this +property is not set, the client falls back to the default json format. For a +list of supported formatting directives, see the +[**Formatting** section in the `docker service ls` documentation](service_ls.md) + +The property `serviceInspectFormat` specifies the default format for `docker +service inspect` output. When the `--format` flag is not provided with the +`docker service inspect` command, Docker's client uses this property. If this +property is not set, the client falls back to the default json format. For a +list of supported formatting directives, see the +[**Formatting** section in the `docker service inspect` documentation](service_inspect.md) + +The property `statsFormat` specifies the default format for `docker +stats` output. When the `--format` flag is not provided with the +`docker stats` command, Docker's client uses this property. If this +property is not set, the client falls back to the default table +format. For a list of supported formatting directives, see +[**Formatting** section in the `docker stats` documentation](stats.md) + +The property `secretFormat` specifies the default format for `docker +secret ls` output. When the `--format` flag is not provided with the +`docker secret ls` command, Docker's client uses this property. If this +property is not set, the client falls back to the default table +format. For a list of supported formatting directives, see +[**Formatting** section in the `docker secret ls` documentation](secret_ls.md) + + +The property `nodesFormat` specifies the default format for `docker node ls` output. +When the `--format` flag is not provided with the `docker node ls` command, +Docker's client uses the value of `nodesFormat`. If the value of `nodesFormat` is not set, +the client uses the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker node ls` documentation](node_ls.md) + +The property `configFormat` specifies the default format for `docker +config ls` output. When the `--format` flag is not provided with the +`docker config ls` command, Docker's client uses this property. If this +property is not set, the client falls back to the default table +format. For a list of supported formatting directives, see +[**Formatting** section in the `docker config ls` documentation](config_ls.md) + +The property `credsStore` specifies an external binary to serve as the default +credential store. When this property is set, `docker login` will attempt to +store credentials in the binary specified by `docker-credential-` which +is visible on `$PATH`. If this property is not set, credentials will be stored +in the `auths` property of the config. For more information, see the +[**Credentials store** section in the `docker login` documentation](login.md#credentials-store) + +The property `credHelpers` specifies a set of credential helpers to use +preferentially over `credsStore` or `auths` when storing and retrieving +credentials for specific registries. If this property is set, the binary +`docker-credential-` will be used when storing or retrieving credentials +for a specific registry. For more information, see the +[**Credential helpers** section in the `docker login` documentation](login.md#credential-helpers) + +Once attached to a container, users detach from it and leave it running using +the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable +using the `detachKeys` property. Specify a `` value for the +property. The format of the `` is a comma-separated list of either +a letter [a-Z], or the `ctrl-` combined with any of the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +Your customization applies to all containers started in with your Docker client. +Users can override your custom or the default key sequence on a per-container +basis. To do this, the user specifies the `--detach-keys` flag with the `docker +attach`, `docker exec`, `docker run` or `docker start` command. + +Following is a sample `config.json` file: + +```json +{ + "HttpHeaders": { + "MyHeader": "MyValue" + }, + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", + "pluginsFormat": "table {{.ID}}\t{{.Name}}\t{{.Enabled}}", + "statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", + "servicesFormat": "table {{.ID}}\t{{.Name}}\t{{.Mode}}", + "secretFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}", + "configFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}", + "serviceInspectFormat": "pretty", + "nodesFormat": "table {{.ID}}\t{{.Hostname}}\t{{.Availability}}", + "detachKeys": "ctrl-e,e", + "credsStore": "secretservice", + "credHelpers": { + "awesomereg.example.org": "hip-star", + "unicorn.example.com": "vcbait" + } +} +``` + +### Notary + +If using your own notary server and a self-signed certificate or an internal +Certificate Authority, you need to place the certificate at +`tls//ca.crt` in your docker config directory. + +Alternatively you can trust the certificate globally by adding it to your system's +list of root Certificate Authorities. + +## Examples + +### Display help text + +To list the help on any command just execute the command, followed by the +`--help` option. + + $ docker run --help + + Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + + Run a command in a new container + + Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + ... + +### Option types + +Single character command line options can be combined, so rather than +typing `docker run -i -t --name test busybox sh`, +you can write `docker run -it --name test busybox sh`. + +#### Boolean + +Boolean options take the form `-d=false`. The value you see in the help text is +the default value which is set if you do **not** specify that flag. If you +specify a Boolean flag without a value, this will set the flag to `true`, +irrespective of the default value. + +For example, running `docker run -d` will set the value to `true`, so your +container **will** run in "detached" mode, in the background. + +Options which default to `true` (e.g., `docker build --rm=true`) can only be +set to the non-default value by explicitly setting them to `false`: + +```bash +$ docker build --rm=false . +``` + +#### Multi + +You can specify options like `-a=[]` multiple times in a single command line, +for example in these commands: + +```bash +$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + +$ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls +``` + +Sometimes, multiple options can call for a more complex value string as for +`-v`: + +```bash +$ docker run -v /host:/container example/mysql +``` + +> **Note**: Do not use the `-t` and `-a stderr` options together due to +> limitations in the `pty` implementation. All `stderr` in `pty` mode +> simply goes to `stdout`. + +#### Strings and Integers + +Options like `--name=""` expect a string, and they +can only be specified once. Options like `-c=0` +expect an integer, and they can only be specified once. diff --git a/docs/reference/commandline/commit.md b/docs/reference/commandline/commit.md new file mode 100644 index 0000000000..f713eeab97 --- /dev/null +++ b/docs/reference/commandline/commit.md @@ -0,0 +1,117 @@ +--- +title: "commit" +description: "The commit command description and usage" +keywords: "commit, file, changes" +--- + + + +# commit + +```markdown +Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] + +Create a new image from a container's changes + +Options: + -a, --author string Author (e.g., "John Hannibal Smith ") + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Commit message + -p, --pause Pause container during commit (default true) +``` + +## Description + +It can be useful to commit a container's file changes or settings into a new +image. This allows you to debug a container by running an interactive shell, or to +export a working dataset to another server. Generally, it is better to use +Dockerfiles to manage your images in a documented and maintainable way. +[Read more about valid image names and tags](tag.md). + +The commit operation will not include any data contained in +volumes mounted inside the container. + +By default, the container being committed and its processes will be paused +while the image is committed. This reduces the likelihood of encountering data +corruption during the process of creating the commit. If this behavior is +undesired, set the `--pause` option to false. + +The `--change` option will apply `Dockerfile` instructions to the image that is +created. Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Examples + +### Commit a container + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + +$ docker commit c3f279d17e0a svendowideit/testimage:version3 + +f5283438590d + +$ docker images + +REPOSITORY TAG ID CREATED SIZE +svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB +``` + +### Commit a container with new configurations + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + +$ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a + +[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] + +$ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3 + +f5283438590d + +$ docker inspect -f "{{ .Config.Env }}" f5283438590d + +[HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] +``` + +### Commit a container with new `CMD` and `EXPOSE` instructions + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton + +$ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 + +f5283438590d + +$ docker run -d svendowideit/testimage:version4 + +89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 + +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp distracted_fermat +c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours desperate_dubinsky +197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours focused_hamilton +``` diff --git a/docs/reference/commandline/container.md b/docs/reference/commandline/container.md new file mode 100644 index 0000000000..5eefbf2c3e --- /dev/null +++ b/docs/reference/commandline/container.md @@ -0,0 +1,61 @@ + +--- +title: "container" +description: "The container command description and usage" +keywords: "container" +--- + + + +# container + +```markdown +Usage: docker container COMMAND + +Manage containers + +Options: + --help Print usage + +Commands: + attach Attach to a running container + commit Create a new image from a container's changes + cp Copy files/folders between a container and the local filesystem + create Create a new container + diff Inspect changes to files or directories on a container's filesystem + exec Run a command in a running container + export Export a container's filesystem as a tar archive + inspect Display detailed information on one or more containers + kill Kill one or more running containers + logs Fetch the logs of a container + ls List containers + pause Pause all processes within one or more containers + port List port mappings or a specific mapping for the container + prune Remove all stopped containers + rename Rename a container + restart Restart one or more containers + rm Remove one or more containers + run Run a command in a new container + start Start one or more stopped containers + stats Display a live stream of container(s) resource usage statistics + stop Stop one or more running containers + top Display the running processes of a container + unpause Unpause all processes within one or more containers + update Update configuration of one or more containers + wait Block until one or more containers stop, then print their exit codes + +Run 'docker container COMMAND --help' for more information on a command. + +``` + +## Description + +Manage containers. + diff --git a/docs/reference/commandline/container_prune.md b/docs/reference/commandline/container_prune.md new file mode 100644 index 0000000000..72488901ed --- /dev/null +++ b/docs/reference/commandline/container_prune.md @@ -0,0 +1,126 @@ +--- +title: "container prune" +description: "Remove all stopped containers" +keywords: container, prune, delete, remove +--- + + + +# container prune + +```markdown +Usage: docker container prune [OPTIONS] + +Remove all stopped containers + +Options: +Options: + --filter filter Provide filter values (e.g. 'until=') + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Removes all stopped containers. + +## Examples + +### Prune containers + +```bash +$ docker container prune +WARNING! This will remove all stopped containers. +Are you sure you want to continue? [y/N] y +Deleted Containers: +4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063 +f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360 + +Total reclaimed space: 212 B +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* until (``) - only remove containers created before given timestamp +* label (`label=`, `label==`, `label!=`, or `label!==`) - only remove containers with (or without, in case `label!=...` is used) the specified labels. + +The `until` filter can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the daemon machine’s time. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the daemon will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +The `label` filter accepts two formats. One is the `label=...` (`label=` or `label==`), +which removes containers with the specified labels. The other +format is the `label!=...` (`label!=` or `label!==`), which removes +containers without the specified labels. + +The following removes containers created more than 5 minutes ago: + +```bash +$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' + +CONTAINER ID IMAGE COMMAND CREATED AT STATUS +61b9efa71024 busybox "sh" 2017-01-04 13:23:33 -0800 PST Exited (0) 41 seconds ago +53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 12 minutes ago + +$ docker container prune --force --filter "until=5m" + +Deleted Containers: +53a9bc23a5168b6caa2bfbefddf1b30f93c7ad57f3dec271fd32707497cb9369 + +Total reclaimed space: 25 B + +$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' + +CONTAINER ID IMAGE COMMAND CREATED AT STATUS +61b9efa71024 busybox "sh" 2017-01-04 13:23:33 -0800 PST Exited (0) 44 seconds ago +``` + +The following removes containers created before `2017-01-04T13:10:00`: + +```bash +$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' + +CONTAINER ID IMAGE COMMAND CREATED AT STATUS +53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 7 minutes ago +4a75091a6d61 busybox "sh" 2017-01-04 13:09:53 -0800 PST Exited (0) 9 minutes ago + +$ docker container prune --force --filter "until=2017-01-04T13:10:00" + +Deleted Containers: +4a75091a6d618526fcd8b33ccd6e5928ca2a64415466f768a6180004b0c72c6c + +Total reclaimed space: 27 B + +$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}' + +CONTAINER ID IMAGE COMMAND CREATED AT STATUS +53a9bc23a516 busybox "sh" 2017-01-04 13:11:59 -0800 PST Exited (0) 9 minutes ago +``` + +## Related commands + +* [system df](system_df.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/docs/reference/commandline/cp.md b/docs/reference/commandline/cp.md new file mode 100644 index 0000000000..5cbbee25ae --- /dev/null +++ b/docs/reference/commandline/cp.md @@ -0,0 +1,115 @@ +--- +title: "cp" +description: "The cp command description and usage" +keywords: "copy, container, files, folders" +--- + + + +# cp + +```markdown +Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH + +Copy files/folders between a container and the local filesystem + +Use '-' as the source to read a tar archive from stdin +and extract it to a directory destination in a container. +Use '-' as the destination to stream a tar archive of a +container source to stdout. + +Options: + -L, --follow-link Always follow symbol link in SRC_PATH + --help Print usage +``` + +## Description + +The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. +You can copy from the container's file system to the local machine or the +reverse, from the local filesystem to the container. If `-` is specified for +either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from +`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. +The `SRC_PATH` or `DEST_PATH` can be a file or directory. + +The `docker cp` command assumes container paths are relative to the container's +`/` (root) directory. This means supplying the initial forward slash is optional; +The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can +be an absolute or relative value. The command interprets a local machine's +relative paths as relative to the current working directory where `docker cp` is +run. + +The `cp` command behaves like the Unix `cp -a` command in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group at the destination. For example, files copied to a +container are created with `UID:GID` of the root user. Files copied to the local +machine are created with the `UID:GID` of the user which invoked the `docker cp` +command. If you specify the `-L` option, `docker cp` follows any symbolic link +in the `SRC_PATH`. `docker cp` does *not* create parent directories for +`DEST_PATH` if they do not exist. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DEST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DEST_PATH` does not exist + - the file is saved to a file created at `DEST_PATH` + - `DEST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DEST_PATH` exists and is a file + - the destination is overwritten with the source file's contents + - `DEST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DEST_PATH` does not exist + - `DEST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DEST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DEST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` (that is: _slash_ followed by _dot_) + - the source directory is copied into this directory + - `SRC_PATH` does end with `/.` (that is: _slash_ followed by _dot_) + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied by default. To copy the link target and not the link, specify +the `-L` option. + +A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can +also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local +machine, for example `file:name.txt`. If you use a `:` in a local machine path, +you must be explicit with a relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by +the user in the container. However, you can still copy such files by manually +running `tar` in `docker exec`. Both of the following examples do the same thing +in different ways (consider `SRC_PATH` and `DEST_PATH` are directories): + +```bash +$ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - +``` + +```bash +$ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - +``` + +Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. +The command extracts the content of the tar to the `DEST_PATH` in container's +filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as +the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. diff --git a/docs/reference/commandline/create.md b/docs/reference/commandline/create.md new file mode 100644 index 0000000000..8a57f2ffe9 --- /dev/null +++ b/docs/reference/commandline/create.md @@ -0,0 +1,260 @@ +--- +title: "create" +description: "The create command description and usage" +keywords: "docker, create, container" +--- + + + +# create + +Creates a new container. + +```markdown +Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int CPU percent (Windows only) + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --device value Add a host device to the container (default []) + --device-cgroup-rule value Add a rule to the cgroup allowed devices list + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string IPv4 address (e.g., 172.30.100.104) + --ip6 string IPv6 address (e.g., 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g., 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --mount value Attach a filesytem mount to the container (default []) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network (default "default") + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited), kernel >= 4.3 + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are: no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size bytes Size of /dev/shm + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --stop-signal string Signal to stop a container (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], + [delegated|cached|consistent], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` +## Description + +The `docker create` command creates a writeable container layer over the +specified image and prepares it for running the specified command. The +container ID is then printed to `STDOUT`. This is similar to `docker run -d` +except the container is never started. You can then use the +`docker start ` command to start the container at any point. + +This is useful when you want to set up a container configuration ahead of time +so that it is ready to start when you need it. The initial status of the +new container is `created`. + +Please see the [run command](run.md) section and the [Docker run reference](../run.md) for more details. + +## Examples + +### Create and start a container + +```bash +$ docker create -t -i fedora bash + +6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 + +$ docker start -a -i 6d8af538ec5 + +bash-4.2# +``` + +### Initialize volumes + +As of v1.4.0 container volumes are initialized during the `docker create` phase +(i.e., `docker run` too). For example, this allows you to `create` the `data` +volume container, and then use it from another container: + +```bash +$ docker create -v /data --name data ubuntu + +240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 + +$ docker run --rm --volumes-from data ubuntu ls -la /data + +total 8 +drwxr-xr-x 2 root root 4096 Dec 5 04:10 . +drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. +``` + +Similarly, `create` a host directory bind mounted volume container, which can +then be used from the subsequent container: + +```bash +$ docker create -v /home/docker:/docker --name docker ubuntu + +9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 + +$ docker run --rm --volumes-from docker ubuntu ls -la /docker + +total 20 +drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . +drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. +-rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history +-rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc +-rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig +drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local +-rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile +drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh +drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker +``` + + +Set storage driver options per container. + +```bash +$ docker create -it --storage-opt size=120G fedora /bin/bash +``` + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the +daemon is running on Windows server, or `hyperv` if running on Windows client. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +### Dealing with dynamically created devices (--device-cgroup-rule) + +Devices available to a container are assigned at creation time. The +assigned devices will both be added to the cgroup.allow file and +created into the container once it is run. This poses a problem when +a new device needs to be added to running container. + +One of the solution is to add a more permissive rule to a container +allowing it access to a wider range of devices. For example, supposing +our container needs access to a character device with major `42` and +any number of minor number (added as new devices appear), the +following rule would be added: + +``` +docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image +``` + +Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 ` +the required device when it is added. + +NOTE: initially present devices still need to be explicitely added to +the create/run command diff --git a/docs/reference/commandline/deploy.md b/docs/reference/commandline/deploy.md new file mode 100644 index 0000000000..a86b2b4b45 --- /dev/null +++ b/docs/reference/commandline/deploy.md @@ -0,0 +1,112 @@ +--- +title: "deploy" +description: "The deploy command description and usage" +keywords: "stack, deploy" +advisory: "experimental" +--- + + + +# deploy (experimental) + +An alias for `stack deploy`. + +```markdown +Usage: docker deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + --compose-file string Path to a Compose file + --help Print usage + --prune Prune services that are no longer referenced + --with-registry-auth Send registry authentication details to Swarm agents +``` + +## Description + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Examples + +### Compose file + +The `deploy` command supports compose file version `3.0` and above. + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility + +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +### DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility + +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related commands + +* [stack config](stack_config.md) +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/docs/reference/commandline/diff.md b/docs/reference/commandline/diff.md new file mode 100644 index 0000000000..e6e12cef80 --- /dev/null +++ b/docs/reference/commandline/diff.md @@ -0,0 +1,67 @@ +--- +title: "diff" +description: "The diff command description and usage" +keywords: "list, changed, files, container" +--- + + + +# diff + +```markdown +Usage: docker diff CONTAINER + +Inspect changes to files or directories on a container's filesystem + +Options: + --help Print usage +``` + +## Description + +List the changed files and directories in a container᾿s filesystem since the +container was created. Three different types of change are tracked: + +| Symbol | Description | +|--------|---------------------------------| +| `A` | A file or directory was added | +| `D` | A file or directory was deleted | +| `C` | A file or directory was changed | + +You can use the full or shortened container ID or the container name set using +`docker run --name` option. + +## Examples + +Inspect the changes to an `nginx` container: + +```bash +$ docker diff 1fdfd1f54c1b + +C /dev +C /dev/console +C /dev/core +C /dev/stdout +C /dev/fd +C /dev/ptmx +C /dev/stderr +C /dev/stdin +C /run +A /run/nginx.pid +C /var/lib/nginx/tmp +A /var/lib/nginx/tmp/client_body +A /var/lib/nginx/tmp/fastcgi +A /var/lib/nginx/tmp/proxy +A /var/lib/nginx/tmp/scgi +A /var/lib/nginx/tmp/uwsgi +C /var/log/nginx +A /var/log/nginx/access.log +A /var/log/nginx/error.log +``` diff --git a/docs/reference/commandline/dockerd.md b/docs/reference/commandline/dockerd.md new file mode 100644 index 0000000000..93774c841b --- /dev/null +++ b/docs/reference/commandline/dockerd.md @@ -0,0 +1,1469 @@ +--- +title: "dockerd" +aliases: ["/engine/reference/commandline/daemon/"] +description: "The daemon command description and usage" +keywords: "container, daemon, runtime" +--- + + + +# daemon + +```markdown +Usage: dockerd COMMAND + +A self-sufficient runtime for containers. + +Options: + --add-runtime runtime Register an additional OCI compatible runtime (default []) + --allow-nondistributable-artifacts list Push nondistributable artifacts to specified registries (default []) + --api-cors-header string Set CORS headers in the Engine API + --authorization-plugin list Authorization plugins to load (default []) + --bip string Specify network bridge IP + -b, --bridge string Attach containers to a network bridge + --cgroup-parent string Set parent cgroup for all containers + --cluster-advertise string Address or interface name to advertise + --cluster-store string URL of the distributed storage backend + --cluster-store-opt map Set cluster store options (default map[]) + --config-file string Daemon configuration file (default "/etc/docker/daemon.json") + --containerd string Path to containerd socket + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --data-root string Root directory of persistent Docker state (default "/var/lib/docker") + -D, --debug Enable debug mode + --default-gateway ip Container default gateway IPv4 address + --default-gateway-v6 ip Container default gateway IPv6 address + --default-runtime string Default OCI runtime for containers (default "runc") + --default-ulimit ulimit Default ulimits for containers (default []) + --disable-legacy-registry Disable contacting legacy registries + --dns list DNS server to use (default []) + --dns-opt list DNS options to use (default []) + --dns-search list DNS search domains to use (default []) + --exec-opt list Runtime execution options (default []) + --exec-root string Root directory for execution state files (default "/var/run/docker") + --experimental Enable experimental features + --fixed-cidr string IPv4 subnet for fixed IPs + --fixed-cidr-v6 string IPv6 subnet for fixed IPs + -G, --group string Group for the unix socket (default "docker") + --help Print usage + -H, --host list Daemon socket(s) to connect to (default []) + --icc Enable inter-container communication (default true) + --init Run an init in the container to forward signals and reap processes + --init-path string Path to the docker-init binary + --insecure-registry list Enable insecure registry communication (default []) + --ip ip Default IP when binding container ports (default 0.0.0.0) + --ip-forward Enable net.ipv4.ip_forward (default true) + --ip-masq Enable IP masquerading (default true) + --iptables Enable addition of iptables rules (default true) + --ipv6 Enable IPv6 networking + --label list Set key=value labels to the daemon (default []) + --live-restore Enable live restore of docker when containers are still running + --log-driver string Default driver for container logs (default "json-file") + -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") + --log-opt map Default log driver options for containers (default map[]) + --max-concurrent-downloads int Set the max concurrent downloads for each pull (default 3) + --max-concurrent-uploads int Set the max concurrent uploads for each push (default 5) + --metrics-addr string Set default address and port to serve the metrics api on + --mtu int Set the containers network MTU + --no-new-privileges Set no-new-privileges by default for new containers + --oom-score-adjust int Set the oom_score_adj for the daemon (default -500) + -p, --pidfile string Path to use for daemon PID file (default "/var/run/docker.pid") + --raw-logs Full timestamps without ANSI coloring + --registry-mirror list Preferred Docker registry mirror (default []) + --seccomp-profile string Path to seccomp profile + --selinux-enabled Enable selinux support + --shutdown-timeout int Set the default shutdown timeout (default 15) + -s, --storage-driver string Storage driver to use + --storage-opt list Storage driver options (default []) + --swarm-default-advertise-addr string Set default address or interface for swarm advertised address + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "~/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "~/.docker/cert.pem") + --tlskey string Path to TLS key file (default ~/.docker/key.pem") + --tlsverify Use TLS and verify the remote + --userland-proxy Use userland proxy for loopback traffic (default true) + --userland-proxy-path string Path to the userland proxy binary + --userns-remap string User/Group setting for user namespaces + -v, --version Print version information and quit +``` + +Options with [] may be specified multiple times. + +## Description + +`dockerd` is the persistent process that manages containers. Docker +uses different binaries for the daemon and client. To run the daemon you +type `dockerd`. + +To run the daemon with debug output, use `dockerd -D` or add `debug: true` to +the `daemon.json` file. + +> **Note**: In Docker 1.13 and higher, enable experimental features by starting +> `dockerd` with the `--experimental` flag or adding `experimental: true` to the +> `daemon.json` file. In earlier Docker versions, a different build was required +> to enable experimental features. + +## Examples + +### Daemon socket option + +The Docker daemon can listen for [Docker Engine API](../api/) +requests via three different types of Socket: `unix`, `tcp`, and `fd`. + +By default, a `unix` domain socket (or IPC socket) is created at +`/var/run/docker.sock`, requiring either `root` permission, or `docker` group +membership. + +If you need to access the Docker daemon remotely, you need to enable the `tcp` +Socket. Beware that the default setup provides un-encrypted and +un-authenticated direct access to the Docker daemon - and should be secured +either using the [built in HTTPS encrypted socket](https://docs.docker.com/engine/security/https/), or by +putting a secure web proxy in front of it. You can listen on port `2375` on all +network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network +interface using its IP address: `-H tcp://192.168.59.103:2375`. It is +conventional to use port `2375` for un-encrypted, and port `2376` for encrypted +communication with the daemon. + +> **Note**: If you're using an HTTPS encrypted socket, keep in mind that only +> TLS1.0 and greater are supported. Protocols SSLv3 and under are not +> supported anymore for security reasons. + +On Systemd based systems, you can communicate with the daemon via +[Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), +use `dockerd -H fd://`. Using `fd://` will work perfectly for most setups but +you can also specify individual sockets: `dockerd -H fd://3`. If the +specified socket activated files aren't found, then Docker will exit. You can +find examples of using Systemd socket activation with Docker and Systemd in the +[Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). + +You can configure the Docker daemon to listen to multiple sockets at the same +time using multiple `-H` options: + +```bash +# listen using the default unix socket, and on 2 specific IP addresses on this host. + +$ sudo dockerd -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 +``` + +The Docker client will honor the `DOCKER_HOST` environment variable to set the +`-H` flag for the client. Use **one** of the following commands: + +```bash +$ docker -H tcp://0.0.0.0:2375 ps +``` + +```bash +$ export DOCKER_HOST="tcp://0.0.0.0:2375" + +$ docker ps +``` + +Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than +the empty string is equivalent to setting the `--tlsverify` flag. The following +are equivalent: + +```bash +$ docker --tlsverify ps +# or +$ export DOCKER_TLS_VERIFY=1 +$ docker ps +``` + +The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes +precedence over `HTTP_PROXY`. + +#### Bind Docker to another host/port or a Unix socket + +> **Warning**: +> Changing the default `docker` daemon binding to a +> TCP port or Unix *docker* user group will increase your security risks +> by allowing non-root users to gain *root* access on the host. Make sure +> you control access to `docker`. If you are binding +> to a TCP port, anyone with access to that port has full Docker access; +> so it is not advisable on an open network. + +With `-H` it is possible to make the Docker daemon to listen on a +specific IP and port. By default, it will listen on +`unix:///var/run/docker.sock` to allow only local connections by the +*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP +to give access to everybody, but that is **not recommended** because +then it is trivial for someone to gain root access to the host where the +daemon is running. + +Similarly, the Docker client can use `-H` to connect to a custom port. +The Docker client will default to connecting to `unix:///var/run/docker.sock` +on Linux, and `tcp://127.0.0.1:2376` on Windows. + +`-H` accepts host and port assignment in the following format: + + tcp://[host]:[port][path] or unix://path + +For example: + +- `tcp://` -> TCP connection to `127.0.0.1` on either port `2376` when TLS encryption + is on, or port `2375` when communication is in plain text. +- `tcp://host:2375` -> TCP connection on + host:2375 +- `tcp://host:2375/path` -> TCP connection on + host:2375 and prepend path to all requests +- `unix://path/to/socket` -> Unix socket located + at `path/to/socket` + +`-H`, when empty, will default to the same value as +when no `-H` was passed in. + +`-H` also accepts short form for TCP bindings: `host:` or `host:port` or `:port` + +Run Docker in daemon mode: + +```bash +$ sudo /dockerd -H 0.0.0.0:5555 & +``` + +Download an `ubuntu` image: + +```bash +$ docker -H :5555 pull ubuntu +``` + +You can use multiple `-H`, for example, if you want to listen on both +TCP and a Unix socket + +```bash +# Run docker in daemon mode +$ sudo /dockerd -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock & +# Download an ubuntu image, use default Unix socket +$ docker pull ubuntu +# OR use the TCP port +$ docker -H tcp://127.0.0.1:2375 pull ubuntu +``` + +### Daemon storage-driver + +The Docker daemon has support for several different image layer storage +drivers: `aufs`, `devicemapper`, `btrfs`, `zfs`, `overlay` and `overlay2`. + +The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that +is unlikely to be merged into the main kernel. These are also known to cause +some serious kernel crashes. However `aufs` allows containers to share +executable and shared library memory, so is a useful choice when running +thousands of containers with the same program or libraries. + +The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) +snapshots. For each devicemapper graph location – typically +`/var/lib/docker/devicemapper` – a thin pool is created based on two block +devices, one for data and one for metadata. By default, these block devices +are created automatically by using loopback mounts of automatically created +sparse files. Refer to [Storage driver options](#storage-driver-options) below +for a way how to customize this setup. +[~jpetazzo/Resizing Docker containers with the Device Mapper plugin](http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) +article explains how to tune your existing setup without the use of options. + +The `btrfs` driver is very fast for `docker build` - but like `devicemapper` +does not share executable memory between devices. Use +`dockerd -s btrfs -g /mnt/btrfs_partition`. + +The `zfs` driver is probably not as fast as `btrfs` but has a longer track record +on stability. Thanks to `Single Copy ARC` shared blocks between clones will be +cached only once. Use `dockerd -s zfs`. To select a different zfs filesystem +set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options). + +The `overlay` is a very fast union filesystem. It is now merged in the main +Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). `overlay` +also supports page cache sharing, this means multiple containers accessing +the same file can share a single page cache entry (or entries), it makes +`overlay` as efficient with memory as `aufs` driver. Call +`dockerd -s overlay` to use it. + +> **Note**: As promising as `overlay` is, the feature is still quite young and +> should not be used in production. Most notably, using `overlay` can cause +> excessive inode consumption (especially as the number of images grows), as +> well as > being incompatible with the use of RPMs. + +The `overlay2` uses the same fast union filesystem but takes advantage of +[additional features](https://lkml.org/lkml/2015/2/11/106) added in Linux +kernel 4.0 to avoid excessive inode consumption. Call `dockerd -s overlay2` +to use it. + +> **Note**: Both `overlay` and `overlay2` are currently unsupported on `btrfs` +> or any Copy on Write filesystem and should only be used over `ext4` partitions. + +### Options per storage driver + +Particular storage-driver can be configured with options specified with +`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm`, +options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. + +#### Devicemapper options + +This is an example of the configuration file for devicemapper on Linux: + +```json +{ + "storage-driver": "devicemapper", + "storage-opts": [ + "dm.thinpooldev=/dev/mapper/thin-pool", + "dm.use_deferred_deletion=true", + "dm.use_deferred_removal=true" + ] +} +``` + +##### `dm.thinpooldev` + +Specifies a custom block storage device to use for the thin pool. + +If using a block device for device mapper storage, it is best to use `lvm` +to create and manage the thin-pool volume. This volume is then handed to Docker +to exclusively create snapshot volumes needed for images and containers. + +Managing the thin-pool outside of Engine makes for the most feature-rich +method of having Docker utilize device mapper thin provisioning as the +backing storage for Docker containers. The highlights of the lvm-based +thin-pool management feature include: automatic or interactive thin-pool +resize support, dynamically changing thin-pool features, automatic thinp +metadata checking when lvm activates the thin-pool, etc. + +As a fallback if no thin pool is provided, loopback files are +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Engine daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool +``` + +##### `dm.directlvm_device` + +As an alternative to providing a thin pool as above, Docker can setup a block +device for you. + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.directlvm_device=/dev/xvdf +``` + +##### `dm.thinp_percent` + +Sets the percentage of passed in block device to use for storage. + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinp_percent=95 +``` + +##### `dm.thinp_metapercent` + +Sets the percentage of the passed in block device to use for metadata storage. + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinp_metapercent=1 +``` + +##### `dm.thinp_autoextend_threshold` + +Sets the value of the percentage of space used before `lvm` attempts to +autoextend the available space [100 = disabled] + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinp_autoextend_threshold=80 +``` + +##### `dm.thinp_autoextend_percent` + +Sets the value percentage value to increase the thin pool by when when `lvm` +attempts to autoextend the available space [100 = disabled] + +###### Example: + +```bash +$ sudo dockerd --storage-opt dm.thinp_autoextend_percent=20 +``` + + +##### `dm.basesize` + +Specifies the size to use when creating the base device, which limits the +size of images and containers. The default value is 10G. Note, thin devices +are inherently "sparse", so a 10G device which is mostly empty doesn't use +10 GB of space on the pool. However, the filesystem will use more space for +the empty case the larger the device is. + +The base device size can be increased at daemon restart which will allow +all future images and containers (based on those new images) to be of the +new base device size. + +###### Examples + +```bash +$ sudo dockerd --storage-opt dm.basesize=50G +``` + +This will increase the base device size to 50G. The Docker daemon will throw an +error if existing base device size is larger than 50G. A user can use +this option to expand the base device size however shrinking is not permitted. + +This value affects the system-wide "base" empty filesystem +that may already be initialized and inherited by pulled images. Typically, +a change to this value requires additional steps to take effect: + + ```bash +$ sudo service docker stop + +$ sudo rm -rf /var/lib/docker + +$ sudo service docker start +``` + + +##### `dm.loopdatasize` + +> **Note**: This option configures devicemapper loopback, which should not +> be used in production. + +Specifies the size to use when creating the loopback file for the +"data" device which is used for the thin pool. The default size is +100G. The file is sparse, so it will not initially take up this +much space. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.loopdatasize=200G +``` + +##### `dm.loopmetadatasize` + +> **Note**: This option configures devicemapper loopback, which should not +> be used in production. + +Specifies the size to use when creating the loopback file for the +"metadata" device which is used for the thin pool. The default size +is 2G. The file is sparse, so it will not initially take up +this much space. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.loopmetadatasize=4G +``` + +##### `dm.fs` + +Specifies the filesystem type to use for the base device. The supported +options are "ext4" and "xfs". The default is "xfs" + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.fs=ext4 +``` + +##### `dm.mkfsarg` + +Specifies extra mkfs arguments to be used when creating the base device. + +###### Example + +```bash +$ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal" +``` + +##### `dm.mountopt` + +Specifies extra mount options used when mounting the thin devices. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.mountopt=nodiscard +``` + +##### `dm.datadev` + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for data for the thin pool. + +If using a block device for device mapper storage, ideally both `datadev` and +`metadatadev` should be specified to completely avoid using the loopback +device. + +###### Example + +```bash +$ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 +``` + +##### `dm.metadatadev` + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for metadata for the thin pool. + +For best performance the metadata should be on a different spindle than the +data, or even better on an SSD. + +If setting up a new metadata pool it is required to be valid. This can be +achieved by zeroing the first 4k to indicate empty metadata, like this: + +```bash +$ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 +``` + +###### Example + +```bash +$ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 +``` + +##### `dm.blocksize` + +Specifies a custom blocksize to use for the thin pool. The default +blocksize is 64K. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.blocksize=512K +``` + +##### `dm.blkdiscard` + +Enables or disables the use of `blkdiscard` when removing devicemapper +devices. This is enabled by default (only) if using loopback devices and is +required to resparsify the loopback file on image/container removal. + +Disabling this on loopback can lead to *much* faster container removal +times, but will make the space used in `/var/lib/docker` directory not be +returned to the system for other use when containers are removed. + +###### Examples + +```bash +$ sudo dockerd --storage-opt dm.blkdiscard=false +``` + +##### `dm.override_udev_sync_check` + +Overrides the `udev` synchronization checks between `devicemapper` and `udev`. +`udev` is the device manager for the Linux kernel. + +To view the `udev` sync support of a Docker daemon that is using the +`devicemapper` driver, run: + +```bash +$ docker info +[...] +Udev Sync Supported: true +[...] +``` + +When `udev` sync support is `true`, then `devicemapper` and udev can +coordinate the activation and deactivation of devices for containers. + +When `udev` sync support is `false`, a race condition occurs between +the`devicemapper` and `udev` during create and cleanup. The race condition +results in errors and failures. (For information on these failures, see +[docker#4036](https://github.com/docker/docker/issues/4036)) + +To allow the `docker` daemon to start, regardless of `udev` sync not being +supported, set `dm.override_udev_sync_check` to true: + +```bash +$ sudo dockerd --storage-opt dm.override_udev_sync_check=true +``` + +When this value is `true`, the `devicemapper` continues and simply warns +you the errors are happening. + +> **Note**: The ideal is to pursue a `docker` daemon and environment that does +> support synchronizing with `udev`. For further discussion on this +> topic, see [docker#4036](https://github.com/docker/docker/issues/4036). +> Otherwise, set this flag for migrating existing Docker daemons to +> a daemon with a supported environment. + +##### `dm.use_deferred_removal` + +Enables use of deferred device removal if `libdm` and the kernel driver +support the mechanism. + +Deferred device removal means that if device is busy when devices are +being removed/deactivated, then a deferred removal is scheduled on +device. And devices automatically go away when last user of the device +exits. + +For example, when a container exits, its associated thin device is removed. +If that device has leaked into some other mount namespace and can't be +removed, the container exit still succeeds and this option causes the +system to schedule the device for deferred removal. It does not wait in a +loop trying to remove a busy device. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.use_deferred_removal=true +``` + +##### `dm.use_deferred_deletion` + +Enables use of deferred device deletion for thin pool devices. By default, +thin pool device deletion is synchronous. Before a container is deleted, +the Docker daemon removes any associated devices. If the storage driver +can not remove a device, the container deletion fails and daemon returns. + +```none +Error deleting container: Error response from daemon: Cannot destroy container +``` + +To avoid this failure, enable both deferred device deletion and deferred +device removal on the daemon. + +```bash +$ sudo dockerd \ + --storage-opt dm.use_deferred_deletion=true \ + --storage-opt dm.use_deferred_removal=true +``` + +With these two options enabled, if a device is busy when the driver is +deleting a container, the driver marks the device as deleted. Later, when +the device isn't in use, the driver deletes it. + +In general it should be safe to enable this option by default. It will help +when unintentional leaking of mount point happens across multiple mount +namespaces. + +##### `dm.min_free_space` + +Specifies the min free space percent in a thin pool require for new device +creation to succeed. This check applies to both free data space as well +as free metadata space. Valid values are from 0% - 99%. Value 0% disables +free space checking logic. If user does not specify a value for this option, +the Engine uses a default value of 10%. + +Whenever a new a thin pool device is created (during `docker pull` or during +container creation), the Engine checks if the minimum free space is +available. If sufficient space is unavailable, then device creation fails +and any relevant `docker` operation fails. + +To recover from this error, you must create more free space in the thin pool +to recover from the error. You can create free space by deleting some images +and containers from the thin pool. You can also add more storage to the thin +pool. + +To add more space to a LVM (logical volume management) thin pool, just add +more storage to the volume group container thin pool; this should automatically +resolve any errors. If your configuration uses loop devices, then stop the +Engine daemon, grow the size of loop files and restart the daemon to resolve +the issue. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.min_free_space=10% +``` + +##### `dm.xfs_nospace_max_retries` + +Specifies the maximum number of retries XFS should attempt to complete +IO when ENOSPC (no space) error is returned by underlying storage device. + +By default XFS retries infinitely for IO to finish and this can result +in unkillable process. To change this behavior one can set +xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting +ENOSPC and will shutdown filesystem. + +###### Example + +```bash +$ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 +``` + +#### ZFS options + +##### `zfs.fsname` + +Set zfs filesystem under which docker will create its own datasets. +By default docker will pick up the zfs filesystem where docker graph +(`/var/lib/docker`) is located. + +###### Example + +```bash +$ sudo dockerd -s zfs --storage-opt zfs.fsname=zroot/docker +``` + +#### Btrfs options + +##### `btrfs.min_space` + +Specifies the minimum size to use when creating the subvolume which is used +for containers. If user uses disk quota for btrfs when creating or running +a container with **--storage-opt size** option, docker should ensure the +**size** cannot be smaller than **btrfs.min_space**. + +###### Example + +```bash +$ sudo dockerd -s btrfs --storage-opt btrfs.min_space=10G +``` + +#### Overlay2 options + +##### `overlay2.override_kernel_check` + +Overrides the Linux kernel version check allowing overlay2. Support for +specifying multiple lower directories needed by overlay2 was added to the +Linux kernel in 4.0.0. However, some older kernel versions may be patched +to add multiple lower directory support for OverlayFS. This option should +only be used after verifying this support exists in the kernel. Applying +this option on a kernel without this support will cause failures on mount. + +### Docker runtime execution options + +The Docker daemon relies on a +[OCI](https://github.com/opencontainers/runtime-spec) compliant runtime +(invoked via the `containerd` daemon) as its interface to the Linux +kernel `namespaces`, `cgroups`, and `SELinux`. + +By default, the Docker daemon automatically starts `containerd`. If you want to +control `containerd` startup, manually start `containerd` and pass the path to +the `containerd` socket using the `--containerd` flag. For example: + +```bash +$ sudo dockerd --containerd /var/run/dev/docker-containerd.sock +``` + +Runtimes can be registered with the daemon either via the +configuration file or using the `--add-runtime` command line argument. + +The following is an example adding 2 runtimes via the configuration: + +```json +{ + "default-runtime": "runc", + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +This is the same example via the command line: + +```bash +$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement +``` + +> **Note**: Defining runtime arguments via the command line is not supported. + +#### Options for the runtime + +You can configure the runtime using options specified +with the `--exec-opt` flag. All the flag's options have the `native` prefix. A +single `native.cgroupdriver` option is available. + +The `native.cgroupdriver` option specifies the management of the container's +cgroups. You can only specify `cgroupfs` or `systemd`. If you specify +`systemd` and it is not available, the system errors out. If you omit the +`native.cgroupdriver` option,` cgroupfs` is used. + +This example sets the `cgroupdriver` to `systemd`: + +```bash +$ sudo dockerd --exec-opt native.cgroupdriver=systemd +``` + +Setting this option applies to all containers the daemon launches. + +Also Windows Container makes use of `--exec-opt` for special purpose. Docker user +can specify default container isolation technology with this, for example: + +```console +> dockerd --exec-opt isolation=hyperv +``` + +Will make `hyperv` the default isolation technology on Windows. If no isolation +value is specified on daemon start, on Windows client, the default is +`hyperv`, and on Windows server, the default is `process`. + +#### Daemon DNS options + +To set the DNS server for all Docker containers, use: + +```bash +$ sudo dockerd --dns 8.8.8.8 +``` + +To set the DNS search domain for all Docker containers, use: + +```bash +$ sudo dockerd --dns-search example.com +``` + +#### Allow push of nondistributable artifacts + +Some images (e.g., Windows base images) contain artifacts whose distribution is +restricted by license. When these images are pushed to a registry, restricted +artifacts are not included. + +To override this behavior for specific registries, use the +`--allow-nondistributable-artifacts` option in one of the following forms: + +* `--allow-nondistributable-artifacts myregistry:5000` tells the Docker daemon + to push nondistributable artifacts to myregistry:5000. +* `--allow-nondistributable-artifacts 10.1.0.0/16` tells the Docker daemon to + push nondistributable artifacts to all registries whose resolved IP address + is within the subnet described by the CIDR syntax. + +This option can be used multiple times. + +This option is useful when pushing images containing nondistributable artifacts +to a registry on an air-gapped network so hosts on that network can pull the +images without connecting to another server. + +> **Warning**: Nondistributable artifacts typically have restrictions on how +> and where they can be distributed and shared. Only use this feature to push +> artifacts to private registries and ensure that you are in compliance with +> any terms that cover redistributing nondistributable artifacts. + +#### Insecure registries + +Docker considers a private registry either secure or insecure. In the rest of +this section, *registry* is used for *private registry*, and `myregistry:5000` +is a placeholder example for a private registry. + +A secure registry uses TLS and a copy of its CA certificate is placed on the +Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure +registry is either not using TLS (i.e., listening on plain text HTTP), or is +using TLS with a CA certificate not known by the Docker daemon. The latter can +happen when the certificate was not found under +`/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification +failed (i.e., wrong CA). + +By default, Docker assumes all, but local (see local registries below), +registries are secure. Communicating with an insecure registry is not possible +if Docker assumes that registry is secure. In order to communicate with an +insecure registry, the Docker daemon requires `--insecure-registry` in one of +the following two forms: + +* `--insecure-registry myregistry:5000` tells the Docker daemon that + myregistry:5000 should be considered insecure. +* `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries + whose domain resolve to an IP address is part of the subnet described by the + CIDR syntax, should be considered insecure. + +The flag can be used multiple times to allow multiple registries to be marked +as insecure. + +If an insecure registry is not marked as insecure, `docker pull`, +`docker push`, and `docker search` will result in an error message prompting +the user to either secure or pass the `--insecure-registry` flag to the Docker +daemon as described above. + +Local registries, whose IP address falls in the 127.0.0.0/8 range, are +automatically marked as insecure as of Docker 1.3.2. It is not recommended to +rely on this, as it may change in the future. + +Enabling `--insecure-registry`, i.e., allowing un-encrypted and/or untrusted +communication, can be useful when running a local registry. However, +because its use creates security vulnerabilities it should ONLY be enabled for +testing purposes. For increased security, users should add their CA to their +system's list of trusted CAs instead of enabling `--insecure-registry`. + +##### Legacy Registries + +Enabling `--disable-legacy-registry` forces a docker daemon to only interact with registries which support the V2 protocol. Specifically, the daemon will not attempt `push`, `pull` and `login` to v1 registries. The exception to this is `search` which can still be performed on v1 registries. + +#### Running a Docker daemon behind an HTTPS_PROXY + +When running inside a LAN that uses an `HTTPS` proxy, the Docker Hub +certificates will be replaced by the proxy's certificates. These certificates +need to be added to your Docker host's configuration: + +1. Install the `ca-certificates` package for your distribution +2. Ask your network admin for the proxy's CA certificate and append them to + `/etc/pki/tls/certs/ca-bundle.crt` +3. Then start your Docker daemon with `HTTPS_PROXY=http://username:password@proxy:port/ dockerd`. + The `username:` and `password@` are optional - and are only needed if your + proxy is set up to require authentication. + +This will only add the proxy and authentication to the Docker daemon's requests - +your `docker build`s and running containers will need extra configuration to +use the proxy + +#### Default `ulimit` settings + +`--default-ulimit` allows you to set the default `ulimit` options to use for +all containers. It takes the same options as `--ulimit` for `docker run`. If +these defaults are not set, `ulimit` settings will be inherited, if not set on +`docker run`, from the Docker daemon. Any `--ulimit` options passed to +`docker run` will overwrite these defaults. + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to +set the maximum number of processes available to a user, not to a container. For details +please check the [run](run.md) reference. + +#### Node discovery + +The `--cluster-advertise` option specifies the `host:port` or `interface:port` +combination that this particular daemon instance should use when advertising +itself to the cluster. The daemon is reached by remote hosts through this value. +If you specify an interface, make sure it includes the IP address of the actual +Docker host. For Engine installation created through `docker-machine`, the +interface is typically `eth1`. + +The daemon uses [libkv](https://github.com/docker/libkv/) to advertise +the node within the cluster. Some key-value backends support mutual +TLS. To configure the client TLS settings used by the daemon can be configured +using the `--cluster-store-opt` flag, specifying the paths to PEM encoded +files. For example: + +```bash +$ sudo dockerd \ + --cluster-advertise 192.168.1.2:2376 \ + --cluster-store etcd://192.168.1.2:2379 \ + --cluster-store-opt kv.cacertfile=/path/to/ca.pem \ + --cluster-store-opt kv.certfile=/path/to/cert.pem \ + --cluster-store-opt kv.keyfile=/path/to/key.pem +``` + +The currently supported cluster store options are: + +| Option | Description | +|-----------------------|-------------| +| `discovery.heartbeat` | Specifies the heartbeat timer in seconds which is used by the daemon as a `keepalive` mechanism to make sure discovery module treats the node as alive in the cluster. If not configured, the default value is 20 seconds. | +| `discovery.ttl` | Specifies the TTL (time-to-live) in seconds which is used by the discovery module to timeout a node if a valid heartbeat is not received within the configured ttl value. If not configured, the default value is 60 seconds. | +| `kv.cacertfile` | Specifies the path to a local file with PEM encoded CA certificates to trust. | +| `kv.certfile` | Specifies the path to a local file with a PEM encoded certificate. This certificate is used as the client cert for communication with the Key/Value store. | +| `kv.keyfile` | Specifies the path to a local file with a PEM encoded private key. This private key is used as the client key for communication with the Key/Value store. | +| `kv.path` | Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. | + +#### Access authorization + +Docker's access authorization can be extended by authorization plugins that your +organization can purchase or build themselves. You can install one or more +authorization plugins when you start the Docker `daemon` using the +`--authorization-plugin=PLUGIN_ID` option. + +```bash +$ sudo dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +The `PLUGIN_ID` value is either the plugin's name or a path to its specification +file. The plugin's implementation determines whether you can specify a name or +path. Consult with your Docker administrator to get information about the +plugins available to you. + +Once a plugin is installed, requests made to the `daemon` through the +command line or Docker's Engine API are allowed or denied by the plugin. +If you have multiple plugins installed, each plugin, in order, must +allow the request for it to complete. + +For information about how to create an authorization plugin, see [authorization +plugin](../../extend/plugins_authorization.md) section in the Docker extend section of this documentation. + + +#### Daemon user namespace options + +The Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling +a process, and therefore a container, to have a unique range of user and +group IDs which are outside the traditional user and group range utilized by +the host system. Potentially the most important security improvement is that, +by default, container processes running as the `root` user will have expected +administrative privilege (with some restrictions) inside the container but will +effectively be mapped to an unprivileged `uid` on the host. + +When user namespace support is enabled, Docker creates a single daemon-wide mapping +for all containers running on the same engine instance. The mappings will +utilize the existing subordinate user and group ID feature available on all modern +Linux distributions. +The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and +[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be +read for the user, and optional group, specified to the `--userns-remap` +parameter. If you do not wish to specify your own user and/or group, you can +provide `default` as the value to this flag, and a user will be created on your behalf +and provided subordinate uid and gid ranges. This default user will be named +`dockremap`, and entries will be created for it in `/etc/passwd` and +`/etc/group` using your distro's standard user and group creation tools. + +> **Note**: The single mapping per-daemon restriction is in place for now +> because Docker shares image layers from its local cache across all +> containers running on the engine instance. Since file ownership must be +> the same for all containers sharing the same layer content, the decision +> was made to map the file ownership on `docker pull` to the daemon's user and +> group mappings so that there is no delay for running containers once the +> content is downloaded. This design preserves the same performance for `docker +> pull`, `docker push`, and container startup as users expect with +> user namespaces disabled. + +##### Start the daemon with user namespaces enabled + +To enable user namespace support, start the daemon with the +`--userns-remap` flag, which accepts values in the following formats: + + - uid + - uid:gid + - username + - username:groupname + +If numeric IDs are provided, translation back to valid user or group names +will occur so that the subordinate uid and gid information can be read, given +these resources are name-based, not id-based. If the numeric ID information +provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon +startup will fail with an error message. + +**Example: starting with default Docker user management:** + +```bash +$ sudo dockerd --userns-remap=default +``` + +When `default` is provided, Docker will create - or find the existing - user and group +named `dockremap`. If the user is created, and the Linux distribution has +appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated +with a contiguous 65536 length range of subordinate user and group IDs, starting +at an offset based on prior entries in those files. For example, Ubuntu will +create the following range, based on an existing user named `user1` already owning +the first 65536 range: + +```bash +$ cat /etc/subuid +user1:100000:65536 +dockremap:165536:65536 +``` + +If you have a preferred/self-managed user with subordinate ID mappings already +configured, you can provide that username or uid to the `--userns-remap` flag. +If you have a group that doesn't match the username, you may provide the `gid` +or group name as well; otherwise the username will be used as the group name +when querying the system for the subordinate group ID range. + +The output of `docker info` can be used to determine if the daemon is running +with user namespaces enabled or not. If the daemon is configured with user +namespaces, the Security Options entry in the response will list "userns" as +one of the enabled security features. + +##### Behavior differences when user namespaces are enabled + +When you start the Docker daemon with `--userns-remap`, Docker segregates the graph directory +where the images are stored by adding an extra directory with a name corresponding to the +remapped UID and GID. For example, if the remapped UID and GID begin with `165536`, all +images and containers running with that remap setting are located in `/var/lib/docker/165536.165536` +instead of `/var/lib/docker/`. + +In addition, the files and directories within the new directory, which correspond to +images and container layers, are also owned by the new UID and GID. To set the ownership +correctly, you need to re-pull the images and restart the containers after starting the +daemon with `--userns-remap`. + +##### Detailed information on `subuid`/`subgid` ranges + +Given potential advanced use of the subordinate ID ranges by power users, the +following paragraphs define how the Docker daemon currently uses the range entries +found within the subordinate range files. + +The simplest case is that only one contiguous range is defined for the +provided user or group. In this case, Docker will use that entire contiguous +range for the mapping of host uids and gids to the container process. This +means that the first ID in the range will be the remapped root user, and the +IDs above that initial ID will map host ID 1 through the end of the range. + +From the example `/etc/subuid` content shown above, the remapped root +user would be uid 165536. + +If the system administrator has set up multiple ranges for a single user or +group, the Docker daemon will read all the available ranges and use the +following algorithm to create the mapping ranges: + +1. The range segments found for the particular user will be sorted by *start ID* ascending. +2. Map segments will be created from each range in increasing value with a length matching the length of each segment. Therefore the range segment with the lowest numeric starting value will be equal to the remapped root, and continue up through host uid/gid equal to the range segment length. As an example, if the lowest segment starts at ID 1000 and has a length of 100, then a map of 1000 -> 0 (the remapped root) up through 1100 -> 100 will be created from this segment. If the next segment starts at ID 10000, then the next map will start with mapping 10000 -> 101 up to the length of this second segment. This will continue until no more segments are found in the subordinate files for this user. +3. If more than five range segments exist for a single user, only the first five will be utilized, matching the kernel's limitation of only five entries in `/proc/self/uid_map` and `proc/self/gid_map`. + +##### Disable user namespace for a container + +If you enable user namespaces on the daemon, all containers are started +with user namespaces enabled. In some situations you might want to disable +this feature for a container, for example, to start a privileged container (see +[user namespace known restrictions](#user-namespace-known-restrictions)). +To enable those advanced features for a specific container use `--userns=host` +in the `run/exec/create` command. +This option will completely disable user namespace mapping for the container's user. + +##### User namespace known restrictions + +The following standard Docker features are currently incompatible when +running a Docker daemon with user namespaces enabled: + + - sharing PID or NET namespaces with the host (`--pid=host` or `--net=host`) + - Using `--privileged` mode flag on `docker run` (unless also specifying `--userns=host`) + +In general, user namespaces are an advanced feature and will require +coordination with other capabilities. For example, if volumes are mounted from +the host, file ownership will have to be pre-arranged if the user or +administrator wishes the containers to have expected access to the volume +contents. Note that when using external volume or graph driver plugins, those +external software programs must be made aware of user and group mapping ranges +if they are to work seamlessly with user namespace support. + +Finally, while the `root` user inside a user namespaced container process has +many of the expected admin privileges that go along with being the superuser, the +Linux kernel has restrictions based on internal knowledge that this is a user namespaced +process. The most notable restriction that we are aware of at this time is the +inability to use `mknod`. Permission will be denied for device creation even as +container `root` inside a user namespace. + +### Miscellaneous options + +IP masquerading uses address translation to allow containers without a public +IP to talk to other machines on the Internet. This may interfere with some +network topologies and can be disabled with `--ip-masq=false`. + +Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and +for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be +set like this: + + DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + # or + export DOCKER_TMPDIR=/mnt/disk2/tmp + /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + +#### Default cgroup parent + +The `--cgroup-parent` option allows you to set the default cgroup parent +to use for containers. If this option is not set, it defaults to `/docker` for +fs cgroup driver and `system.slice` for systemd cgroup driver. + +If the cgroup has a leading forward slash (`/`), the cgroup is created +under the root cgroup, otherwise the cgroup is created under the daemon +cgroup. + +Assuming the daemon is running in cgroup `daemoncgroup`, +`--cgroup-parent=/foobar` creates a cgroup in +`/sys/fs/cgroup/memory/foobar`, whereas using `--cgroup-parent=foobar` +creates the cgroup in `/sys/fs/cgroup/memory/daemoncgroup/foobar` + +The systemd cgroup driver has different rules for `--cgroup-parent`. Systemd +represents hierarchy by slice and the name of the slice encodes the location in +the tree. So `--cgroup-parent` for systemd cgroups should be a slice name. A +name can consist of a dash-separated series of names, which describes the path +to the slice from the root slice. For example, `--cgroup-parent=user-a-b.slice` +means the memory cgroup for the container is created in +`/sys/fs/cgroup/memory/user.slice/user-a.slice/user-a-b.slice/docker-.scope`. + +This setting can also be set per container, using the `--cgroup-parent` +option on `docker create` and `docker run`, and takes precedence over +the `--cgroup-parent` option on the daemon. + +#### Daemon metrics + +The `--metrics-addr` option takes a tcp address to serve the metrics API. +This feature is still experimental, therefore, the daemon must be running in experimental +mode for this feature to work. + +To serve the metrics API on localhost:1337 you would specify `--metrics-addr 127.0.0.1:1337` +allowing you to make requests on the API at `127.0.0.1:1337/metrics` to receive metrics in the +[prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) format. + +If you are running a prometheus server you can add this address to your scrape configs +to have prometheus collect metrics on Docker. For more information +on prometheus you can view the website [here](https://prometheus.io/). + +```none +scrape_configs: + - job_name: 'docker' + static_configs: + - targets: ['127.0.0.1:1337'] +``` + +Please note that this feature is still marked as experimental as metrics and metric +names could change while this feature is still in experimental. Please provide +feedback on what you would like to see collected in the API. + +#### Daemon configuration file + +The `--config-file` option allows you to set any configuration option +for the daemon in a JSON format. This file uses the same flag names as keys, +except for flags that allow several entries, where it uses the plural +of the flag name, e.g., `labels` for the `label` flag. + +The options set in the configuration file must not conflict with options set +via flags. The docker daemon fails to start if an option is duplicated between +the file and the flags, regardless their value. We do this to avoid +silently ignore changes introduced in configuration reloads. +For example, the daemon fails to start if you set daemon labels +in the configuration file and also set daemon labels via the `--label` flag. +Options that are not present in the file are ignored when the daemon starts. + +##### On Linux + +The default location of the configuration file on Linux is +`/etc/docker/daemon.json`. The `--config-file` flag can be used to specify a + non-default location. + +This is a full example of the allowed configuration options on Linux: + +```json +{ + "authorization-plugins": [], + "data-root": "", + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "exec-root": "", + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "live-restore": true, + "log-driver": "", + "log-opts": {}, + "mtu": 0, + "pidfile": "", + "cluster-store": "", + "cluster-store-opts": {}, + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "default-shm-size": "64M", + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tls": true, + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "api-cors-header": "", + "selinux-enabled": false, + "userns-remap": "", + "group": "", + "cgroup-parent": "", + "default-ulimits": {}, + "init": false, + "init-path": "/usr/libexec/docker-init", + "ipv6": false, + "iptables": false, + "ip-forward": false, + "ip-masq": false, + "userland-proxy": false, + "userland-proxy-path": "/usr/libexec/docker-proxy", + "ip": "0.0.0.0", + "bridge": "", + "bip": "", + "fixed-cidr": "", + "fixed-cidr-v6": "", + "default-gateway": "", + "default-gateway-v6": "", + "icc": false, + "raw-logs": false, + "allow-nondistributable-artifacts": [], + "registry-mirrors": [], + "seccomp-profile": "", + "insecure-registries": [], + "disable-legacy-registry": false, + "no-new-privileges": false, + "default-runtime": "runc", + "oom-score-adjust": -500, + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +> **Note:** You cannot set options in `daemon.json` that have already been set on +> daemon startup as a flag. +> On systems that use `systemd` to start the Docker daemon, `-H` is already set, so +> you cannot use the `hosts` key in `daemon.json` to add listening addresses. +> See https://docs.docker.com/engine/admin/systemd/#custom-docker-daemon-options for how +> to accomplish this task with a systemd drop-in file. + +##### On Windows + +The default location of the configuration file on Windows is + `%programdata%\docker\config\daemon.json`. The `--config-file` flag can be + used to specify a non-default location. + +This is a full example of the allowed configuration options on Windows: + +```json +{ + "authorization-plugins": [], + "data-root": "", + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "log-driver": "", + "mtu": 0, + "pidfile": "", + "cluster-store": "", + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "group": "", + "default-ulimits": {}, + "bridge": "", + "fixed-cidr": "", + "raw-logs": false, + "allow-nondistributable-artifacts": [], + "registry-mirrors": [], + "insecure-registries": [], + "disable-legacy-registry": false +} +``` + +#### Configuration reload behavior + +Some options can be reconfigured when the daemon is running without requiring +to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event +in Windows with the key `Global\docker-daemon-config-$PID`. The options can +be modified in the configuration file but still will check for conflicts with +the provided flags. The daemon fails to reconfigure itself +if there are conflicts, but it won't stop execution. + +The list of currently supported options that can be reconfigured is this: + +- `debug`: it changes the daemon to debug mode when set to true. +- `cluster-store`: it reloads the discovery store with the new address. +- `cluster-store-opts`: it uses the new options to reload the discovery store. +- `cluster-advertise`: it modifies the address advertised after reloading. +- `labels`: it replaces the daemon labels with a new set of labels. +- `live-restore`: Enables [keeping containers alive during daemon downtime](https://docs.docker.com/engine/admin/live-restore/). +- `max-concurrent-downloads`: it updates the max concurrent downloads for each pull. +- `max-concurrent-uploads`: it updates the max concurrent uploads for each push. +- `default-runtime`: it updates the runtime to be used if not is + specified at container creation. It defaults to "default" which is + the runtime shipped with the official docker packages. +- `runtimes`: it updates the list of available OCI runtimes that can + be used to run containers +- `authorization-plugin`: specifies the authorization plugins to use. +- `allow-nondistributable-artifacts`: Replaces the set of registries to which the daemon will push nondistributable artifacts with a new set of registries. +- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config. +- `registry-mirrors`: it replaces the daemon registry mirrors with a new set of registry mirrors. If some existing registry mirrors in daemon's configuration are not in newly reloaded registry mirrors, these existing ones will be removed from daemon's config. + +Updating and reloading the cluster configurations such as `--cluster-store`, +`--cluster-advertise` and `--cluster-store-opts` will take effect only if +these configurations were not previously configured. If `--cluster-store` +has been provided in flags and `cluster-advertise` not, `cluster-advertise` +can be added in the configuration file without accompanied by `--cluster-store`. +Configuration reload will log a warning message if it detects a change in +previously configured cluster configurations. + + +### Run multiple daemons + +> **Note:** Running multiple daemons on a single host is considered as "experimental". The user should be aware of +> unsolved problems. This solution may not work properly in some cases. Solutions are currently under development +> and will be delivered in the near future. + +This section describes how to run multiple Docker daemons on a single host. To +run multiple daemons, you must configure each daemon so that it does not +conflict with other daemons on the same host. You can set these options either +by providing them as flags, or by using a [daemon configuration file](#daemon-configuration-file). + +The following daemon options must be configured for each daemon: + +```none +-b, --bridge= Attach containers to a network bridge +--exec-root=/var/run/docker Root of the Docker execdriver +--data-root=/var/lib/docker Root of persisted Docker data +-p, --pidfile=/var/run/docker.pid Path to use for daemon PID file +-H, --host=[] Daemon socket(s) to connect to +--iptables=true Enable addition of iptables rules +--config-file=/etc/docker/daemon.json Daemon configuration file +--tlscacert="~/.docker/ca.pem" Trust certs signed only by this CA +--tlscert="~/.docker/cert.pem" Path to TLS certificate file +--tlskey="~/.docker/key.pem" Path to TLS key file +``` + +When your daemons use different values for these flags, you can run them on the same host without any problems. +It is very important to properly understand the meaning of those options and to use them correctly. + +- The `-b, --bridge=` flag is set to `docker0` as default bridge network. It is created automatically when you install Docker. +If you are not using the default, you must create and configure the bridge manually or just set it to 'none': `--bridge=none` +- `--exec-root` is the path where the container state is stored. The default value is `/var/run/docker`. Specify the path for +your running daemon here. +- `--data-root` is the path where persisted data such as images, volumes, and +cluster state are stored. The default value is `/var/lib/docker`. To avoid any +conflict with other daemons, set this parameter separately for each daemon. +- `-p, --pidfile=/var/run/docker.pid` is the path where the process ID of the daemon is stored. Specify the path for your +pid file here. +- `--host=[]` specifies where the Docker daemon will listen for client connections. If unspecified, it defaults to `/var/run/docker.sock`. +- `--iptables=false` prevents the Docker daemon from adding iptables rules. If +multiple daemons manage iptables rules, they may overwrite rules set by another +daemon. Be aware that disabling this option requires you to manually add +iptables rules to expose container ports. If you prevent Docker from adding +iptables rules, Docker will also not add IP masquerading rules, even if you set +`--ip-masq` to `true`. Without IP masquerading rules, Docker containers will not be +able to connect to external hosts or the internet when using network other than +default bridge. +- `--config-file=/etc/docker/daemon.json` is the path where configuration file is stored. You can use it instead of +daemon flags. Specify the path for each daemon. +- `--tls*` Docker daemon supports `--tlsverify` mode that enforces encrypted and authenticated remote connections. +The `--tls*` options enable use of specific certificates for individual daemons. + +Example script for a separate “bootstrap” instance of the Docker daemon without network: + +```bash +$ sudo dockerd \ + -H unix:///var/run/docker-bootstrap.sock \ + -p /var/run/docker-bootstrap.pid \ + --iptables=false \ + --ip-masq=false \ + --bridge=none \ + --data-root=/var/lib/docker-bootstrap \ + --exec-root=/var/run/docker-bootstrap +``` diff --git a/docs/reference/commandline/events.md b/docs/reference/commandline/events.md new file mode 100644 index 0000000000..71475b43ec --- /dev/null +++ b/docs/reference/commandline/events.md @@ -0,0 +1,345 @@ +--- +title: "events" +description: "The events command description and usage" +keywords: "events, container, report" +--- + + + +# events + +```markdown +Usage: docker events [OPTIONS] + +Get real time events from the server + +Options: + -f, --filter value Filter output based on conditions provided (default []) + --format string Format the output using the given Go template + --help Print usage + --since string Show all events created since timestamp + --until string Stream events until this timestamp +``` + +## Description + +Use `docker events` to get real-time events from the server. These events differ +per Docker object type. + +### Object types + +#### Containers + +Docker containers report the following events: + +- `attach` +- `commit` +- `copy` +- `create` +- `destroy` +- `detach` +- `die` +- `exec_create` +- `exec_detach` +- `exec_start` +- `export` +- `health_status` +- `kill` +- `oom` +- `pause` +- `rename` +- `resize` +- `restart` +- `start` +- `stop` +- `top` +- `unpause` +- `update` + +#### Images + +Docker images report the following events: + +- `delete` +- `import` +- `load` +- `pull` +- `push` +- `save` +- `tag` +- `untag` + +#### Plugins + +Docker plugins report the following events: + +- `install` +- `enable` +- `disable` +- `remove` + +#### Volumes + +Docker volumes report the following events: + +- `create` +- `mount` +- `unmount` +- `destroy` + +#### Networks + +Docker networks report the following events: + +- `create` +- `connect` +- `disconnect` +- `destroy` + +#### Daemons + +Docker daemons report the following events: + +- `reload` + +### Limiting, filtering, and formatting the output + +#### Limit events by time + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine’s time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +#### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would +like to use multiple filters, pass multiple flags (e.g., +`--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a *OR*; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display +events for container 588a23dac085 *OR* container a8f7720b8c22 + +Using multiple filters will be handled as a *AND*; for example +`--filter container=588a23dac085 --filter event=start` will display events for +container container 588a23dac085 *AND* the event type is *start* + +The currently supported filters are: + +* container (`container=`) +* daemon (`daemon=`) +* event (`event=`) +* image (`image=`) +* label (`label=` or `label==`) +* network (`network=`) +* plugin (`plugin=`) +* type (`type=`) +* volume (`volume=`) + +#### Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default +format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + +## Examples + +### Basic example + +You'll need two shells for this example. + +**Shell 1: Listening for events:** + +```bash +$ docker events +``` + +**Shell 2: Start and Stop containers:** + +```bash +$ docker create --name test alpine:latest top +$ docker start test +$ docker stop test +``` + +**Shell 1: (Again .. now showing events):** + +```none +2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +To exit the `docker events` command, use `CTRL+C`. + +### Filter events by time + +You can filter the output by an absolute timestamp or relative time on the host +machine, using the following different time syntaxes: + +```bash +$ docker events --since 1483283804 +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --since '2017-01-05' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --since '2013-09-03T15:49:29' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --since '10m' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +### Filter events by criteria + +The following commands show several different ways to filter the `docker event` +output. + +```bash +$ docker events --filter 'event=stop' + +2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain) + +$ docker events --filter 'image=alpine' + +2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15) +2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9) +2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner) +2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner) + +$ docker events --filter 'container=test' + +2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --filter 'container=test' --filter 'container=d9cdb1525ea8' + +2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9) +2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test) +2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker events --filter 'container=test' --filter 'event=stop' + +2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test) + +$ docker events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge) + +$ docker events --filter 'container=container_1' --filter 'container=container_2' + +2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) +2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +$ docker events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) + +$ docker events --filter 'type=plugin' + +2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +``` + +### Format the output + +```bash +$ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + +Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +``` + +#### Format as JSON + +```none + $ docker events --format '{{json .}}' + + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. +``` diff --git a/docs/reference/commandline/exec.md b/docs/reference/commandline/exec.md new file mode 100644 index 0000000000..e2e5d607b9 --- /dev/null +++ b/docs/reference/commandline/exec.md @@ -0,0 +1,99 @@ +--- +title: "exec" +description: "The exec command description and usage" +keywords: "command, container, run, execute" +--- + + + +# exec + +```markdown +Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] + +Run a command in a running container + +Options: + -d, --detach Detached mode: run command in the background + --detach-keys Override the key sequence for detaching a container + -e, --env=[] Set environment variables + --help Print usage + -i, --interactive Keep STDIN open even if not attached + --privileged Give extended privileges to the command + -t, --tty Allocate a pseudo-TTY + -u, --user Username or UID (format: [:]) +``` + +## Description + +The `docker exec` command runs a new command in a running container. + +The command started using `docker exec` only runs while the container's primary +process (`PID 1`) is running, and it is not restarted if the container is +restarted. + +COMMAND will run in the default directory of the container. It the +underlying image has a custom directory specified with the WORKDIR directive +in its Dockerfile, this will be used instead. + +COMMAND should be an executable, a chained or a quoted command +will not work. Example: `docker exec -ti my_container "echo a && echo b"` will +not work, but `docker exec -ti my_container sh -c "echo a && echo b"` will. + +## Examples + +### Run `docker exec` on a running container + +First, start a container. + +```bash +$ docker run --name ubuntu_bash --rm -i -t ubuntu bash +``` + +This will create a container named `ubuntu_bash` and start a Bash session. + +Next, execute a command on the container. + +```bash +$ docker exec -d ubuntu_bash touch /tmp/execWorks +``` + +This will create a new file `/tmp/execWorks` inside the running container +`ubuntu_bash`, in the background. + +Next, execute an interactive `bash` shell on the container. + +```bash +$ docker exec -it ubuntu_bash bash +``` + +This will create a new Bash session in the container `ubuntu_bash`. + +### Try to run `docker exec` on a paused container + +If the container is paused, then the `docker exec` command will fail with an error: + +```bash +$ docker pause test + +test + +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test + +$ docker exec test ls + +FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec + +$ echo $? +1 +``` diff --git a/docs/reference/commandline/export.md b/docs/reference/commandline/export.md new file mode 100644 index 0000000000..9de509714a --- /dev/null +++ b/docs/reference/commandline/export.md @@ -0,0 +1,48 @@ +--- +title: "export" +description: "The export command description and usage" +keywords: "export, file, system, container" +--- + + + +# export + +```markdown +Usage: docker export [OPTIONS] CONTAINER + +Export a container's filesystem as a tar archive + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +## Description + +The `docker export` command does not export the contents of volumes associated +with the container. If a volume is mounted on top of an existing directory in +the container, `docker export` will export the contents of the *underlying* +directory, not the contents of the volume. + +Refer to [Backup, restore, or migrate data volumes](https://docs.docker.com/engine/tutorials/dockervolumes/#backup-restore-or-migrate-data-volumes) +in the user guide for examples on exporting data in a volume. + +## Examples + +Each of these commands has the same result. + +```bash +$ docker export red_panda > latest.tar +``` + +```bash +$ docker export --output="latest.tar" red_panda +``` diff --git a/docs/reference/commandline/history.md b/docs/reference/commandline/history.md new file mode 100644 index 0000000000..014ceddaf3 --- /dev/null +++ b/docs/reference/commandline/history.md @@ -0,0 +1,93 @@ +--- +title: "history" +description: "The history command description and usage" +keywords: "docker, image, history" +--- + + + +# history + +```markdown +Usage: docker history [OPTIONS] IMAGE + +Show the history of an image + +Options: + --format string Pretty-print images using a Go template + --help Print usage + -H, --human Print sizes and dates in human readable format (default true) + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + + +## Examples + +To see how the `docker:latest` image was built: + +```bash +$ docker history docker + +IMAGE CREATED CREATED BY SIZE COMMENT +3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B +8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB +be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB +4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB +750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi : 2 weeks ago +: 2 weeks ago +: 2 weeks ago +: 2 weeks ago +: 3 weeks ago +: 3 weeks ago +: 3 weeks ago +``` diff --git a/docs/reference/commandline/image.md b/docs/reference/commandline/image.md new file mode 100644 index 0000000000..fef161aa99 --- /dev/null +++ b/docs/reference/commandline/image.md @@ -0,0 +1,47 @@ + +--- +title: "image" +description: "The image command description and usage" +keywords: "image" +--- + + + +# image + +```markdown +Usage: docker image COMMAND + +Manage images + +Options: + --help Print usage + +Commands: + build Build an image from a Dockerfile + history Show the history of an image + import Import the contents from a tarball to create a filesystem image + inspect Display detailed information on one or more images + load Load an image from a tar archive or STDIN + ls List images + prune Remove unused images + pull Pull an image or a repository from a registry + push Push an image or a repository to a registry + rm Remove one or more images + save Save one or more images to a tar archive (streamed to STDOUT by default) + tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE + +Run 'docker image COMMAND --help' for more information on a command. + +``` + +## Description + +Manage images. diff --git a/docs/reference/commandline/image_prune.md b/docs/reference/commandline/image_prune.md new file mode 100644 index 0000000000..f3b1545407 --- /dev/null +++ b/docs/reference/commandline/image_prune.md @@ -0,0 +1,171 @@ +--- +title: "image prune" +description: "Remove all stopped images" +keywords: "image, prune, delete, remove" +--- + + + +# image prune + +```markdown +Usage: docker image prune [OPTIONS] + +Remove unused images + +Options: + -a, --all Remove all unused images, not just dangling ones + --filter filter Provide filter values (e.g. 'until=') + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container. + +## Examples + +Example output: + +```bash +$ docker image prune -a + +WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue? [y/N] y +Deleted Images: +untagged: alpine:latest +untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a +deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba +deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f +untagged: alpine:3.3 +untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423 +untagged: my-jq:latest +deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff +deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65 +deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7 +untagged: my-curl:latest +deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e +deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9 +deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e +deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec +deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06 +deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c +deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35 +deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809 +deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0 +deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac +deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b +deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1 + +Total reclaimed space: 16.43 MB +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* until (``) - only remove images created before given timestamp +* label (`label=`, `label==`, `label!=`, or `label!==`) - only remove images with (or without, in case `label!=...` is used) the specified labels. + +The `until` filter can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the daemon machine’s time. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the daemon will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +The `label` filter accepts two formats. One is the `label=...` (`label=` or `label==`), +which removes images with the specified labels. The other +format is the `label!=...` (`label!=` or `label!==`), which removes +images without the specified labels. + +The following removes images created before `2017-01-04T00:00:00`: + +```bash +$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}' +REPOSITORY TAG IMAGE ID CREATED AT SIZE +foo latest 2f287ac753da 2017-01-04 13:42:23 -0800 PST 3.98 MB +alpine latest 88e169ea8f46 2016-12-27 10:17:25 -0800 PST 3.98 MB +busybox latest e02e811dd08f 2016-10-07 14:03:58 -0700 PDT 1.09 MB + +$ docker image prune -a --force --filter "until=2017-01-04T00:00:00" + +Deleted Images: +untagged: alpine:latest +untagged: alpine@sha256:dfbd4a3a8ebca874ebd2474f044a0b33600d4523d03b0df76e5c5986cb02d7e8 +untagged: busybox:latest +untagged: busybox@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 +deleted: sha256:e02e811dd08fd49e7f6032625495118e63f597eb150403d02e3238af1df240ba +deleted: sha256:e88b3f82283bc59d5e0df427c824e9f95557e661fcb0ea15fb0fb6f97760f9d9 + +Total reclaimed space: 1.093 MB + +$ docker images --format 'table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}' + +REPOSITORY TAG IMAGE ID CREATED AT SIZE +foo latest 2f287ac753da 2017-01-04 13:42:23 -0800 PST 3.98 MB +``` + +The following removes images created more than 10 days (`240h`) ago: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +foo latest 2f287ac753da 14 seconds ago 3.98 MB +alpine latest 88e169ea8f46 8 days ago 3.98 MB +debian jessie 7b0a06c805e8 2 months ago 123 MB +busybox latest e02e811dd08f 2 months ago 1.09 MB +golang 1.7.0 138c2e655421 4 months ago 670 MB + +$ docker image prune -a --force --filter "until=240h" + +Deleted Images: +untagged: golang:1.7.0 +untagged: golang@sha256:6765038c2b8f407fd6e3ecea043b44580c229ccfa2a13f6d85866cf2b4a9628e +deleted: sha256:138c2e6554219de65614d88c15521bfb2da674cbb0bf840de161f89ff4264b96 +deleted: sha256:ec353c2e1a673f456c4b78906d0d77f9d9456cfb5229b78c6a960bfb7496b76a +deleted: sha256:fe22765feaf3907526b4921c73ea6643ff9e334497c9b7e177972cf22f68ee93 +deleted: sha256:ff845959c80148421a5c3ae11cc0e6c115f950c89bc949646be55ed18d6a2912 +deleted: sha256:a4320831346648c03db64149eafc83092e2b34ab50ca6e8c13112388f25899a7 +deleted: sha256:4c76020202ee1d9709e703b7c6de367b325139e74eebd6b55b30a63c196abaf3 +deleted: sha256:d7afd92fb07236c8a2045715a86b7d5f0066cef025018cd3ca9a45498c51d1d6 +deleted: sha256:9e63c5bce4585dd7038d830a1f1f4e44cb1a1515b00e620ac718e934b484c938 +untagged: debian:jessie +untagged: debian@sha256:c1af755d300d0c65bb1194d24bce561d70c98a54fb5ce5b1693beb4f7988272f +deleted: sha256:7b0a06c805e8f23807fb8856621c60851727e85c7bcb751012c813f122734c8d +deleted: sha256:f96222d75c5563900bc4dd852179b720a0885de8f7a0619ba0ac76e92542bbc8 + +Total reclaimed space: 792.6 MB + +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +foo latest 2f287ac753da About a minute ago 3.98 MB +alpine latest 88e169ea8f46 8 days ago 3.98 MB +busybox latest e02e811dd08f 2 months ago 1.09 MB +``` + +## Related commands + +* [system df](system_df.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/docs/reference/commandline/images.md b/docs/reference/commandline/images.md new file mode 100644 index 0000000000..86a32aed27 --- /dev/null +++ b/docs/reference/commandline/images.md @@ -0,0 +1,343 @@ +--- +title: "images" +description: "The images command description and usage" +keywords: "list, docker, images" +--- + + + +# images + +```markdown +Usage: docker images [OPTIONS] [REPOSITORY[:TAG]] + +List images + +Options: + -a, --all Show all images (default hides intermediate images) + --digests Show digests + -f, --filter value Filter output based on conditions provided (default []) + - dangling=(true|false) + - label= or label== + - before=([:tag]||) + - since=([:tag]||) + - reference=(pattern of an image reference) + --format string Pretty-print images using a Go template + --help Print usage + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + +## Description + +The default `docker images` will show all top level +images, their repository and tags, and their size. + +Docker images have intermediate layers that increase reusability, +decrease disk usage, and speed up `docker build` by +allowing each step to be cached. These intermediate layers are not shown +by default. + +The `SIZE` is the cumulative space taken up by the image and all +its parent images. This is also the disk space used by the contents of the +Tar file created when you `docker save` an image. + +An image will be listed more than once if it has multiple repository names +or tags. This single image (identifiable by its matching `IMAGE ID`) +uses up the `SIZE` listed only once. + +## Examples + +### List the most recently created images + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE + 77af4d6b9913 19 hours ago 1.089 GB +committ latest b6fa739cedf5 19 hours ago 1.089 GB + 78a85c484f71 19 hours ago 1.089 GB +docker latest 30557a29d5ab 20 hours ago 1.089 GB + 5ed6274db6ce 24 hours ago 1.089 GB +postgres 9 746b819f315e 4 days ago 213.4 MB +postgres 9.3 746b819f315e 4 days ago 213.4 MB +postgres 9.3.5 746b819f315e 4 days ago 213.4 MB +postgres latest 746b819f315e 4 days ago 213.4 MB +``` + +### List images by name and tag + +The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument +that restricts the list to images that match the argument. If you specify +`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the +given repository. + +For example, to list all images in the "java" repository, run this command : + +```bash +$ docker images java + +REPOSITORY TAG IMAGE ID CREATED SIZE +java 8 308e519aac60 6 days ago 824.5 MB +java 7 493d82594c15 3 months ago 656.3 MB +java latest 2711b1d6f3aa 5 months ago 603.9 MB +``` + +The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, +`docker images jav` does not match the image `java`. + +If both `REPOSITORY` and `TAG` are provided, only images matching that +repository and tag are listed. To find all local images in the "java" +repository with tag "8" you can use: + +```bash +$ docker images java:8 + +REPOSITORY TAG IMAGE ID CREATED SIZE +java 8 308e519aac60 6 days ago 824.5 MB +``` + +If nothing matches `REPOSITORY[:TAG]`, the list is empty. + +```bash +$ docker images java:0 + +REPOSITORY TAG IMAGE ID CREATED SIZE +``` + +### List the full length image IDs + +```bash +$ docker images --no-trunc + +REPOSITORY TAG IMAGE ID CREATED SIZE + sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB +committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB + sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB +docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB + sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB + sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB + sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB +tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB + sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB +``` + +### List image digests + +Images that use the v2 or later format have a content-addressable identifier +called a `digest`. As long as the input used to generate the image is +unchanged, the digest value is predictable. To list image digest values, use +the `--digests` flag: + +```bash +$ docker images --digests +REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB +``` + +When pushing or pulling to a 2.0 registry, the `push` or `pull` command +output includes the image digest. You can `pull` using a digest value. You can +also reference by digest in `create`, `run`, and `rmi` commands, as well as the +`FROM` image reference in a Dockerfile. + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false) +* label (`label=` or `label==`) +* before (`[:]`, `` or ``) - filter images created before given id or references +* since (`[:]`, `` or ``) - filter images created since given id or references +* reference (pattern of an image reference) - filter images whose reference matches the specified pattern + +#### Show untagged images (dangling) + +```bash +$ docker images --filter "dangling=true" + +REPOSITORY TAG IMAGE ID CREATED SIZE + 8abc22fbb042 4 weeks ago 0 B + 48e5f45168b9 4 weeks ago 2.489 MB + bf747efa0e2f 4 weeks ago 0 B + 980fe10e5736 12 weeks ago 101.4 MB + dea752e4e117 12 weeks ago 101.4 MB + 511136ea3c5a 8 months ago 0 B +``` + +This will display untagged images that are the leaves of the images tree (not +intermediary layers). These images occur when a new build of an image takes the +`repo:tag` away from the image ID, leaving it as `:` or untagged. +A warning will be issued if trying to remove an image when a container is presently +using it. By having this flag it allows for batch cleanup. + +You can use this in conjunction with `docker rmi ...`: + +```bash +$ docker rmi $(docker images -f "dangling=true" -q) + +8abc22fbb042 +48e5f45168b9 +bf747efa0e2f +980fe10e5736 +dea752e4e117 +511136ea3c5a +``` + +> **Note**: Docker warns you if any containers exist that are using these +> untagged images. + + +#### Show images with a given label + +The `label` filter matches images based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches images with the `com.example.version` label regardless of its value. + +```bash +$ docker images --filter "label=com.example.version" + +REPOSITORY TAG IMAGE ID CREATED SIZE +match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB +match-me-2 latest dea752e4e117 About a minute ago 188.3 MB +``` + +The following filter matches images with the `com.example.version` label with the `1.0` value. + +```bash +$ docker images --filter "label=com.example.version=1.0" + +REPOSITORY TAG IMAGE ID CREATED SIZE +match-me latest 511136ea3c5a About a minute ago 188.3 MB +``` + +In this example, with the `0.1` value, it returns an empty set because no matches were found. + +```bash +$ docker images --filter "label=com.example.version=0.1" +REPOSITORY TAG IMAGE ID CREATED SIZE +``` + +#### Filter images by time + +The `before` filter shows only images created before the image with +given id or reference. For example, having these images: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +image1 latest eeae25ada2aa 4 minutes ago 188.3 MB +image2 latest dea752e4e117 9 minutes ago 188.3 MB +image3 latest 511136ea3c5a 25 minutes ago 188.3 MB +``` + +Filtering with `before` would give: + +```bash +$ docker images --filter "before=image1" + +REPOSITORY TAG IMAGE ID CREATED SIZE +image2 latest dea752e4e117 9 minutes ago 188.3 MB +image3 latest 511136ea3c5a 25 minutes ago 188.3 MB +``` + +Filtering with `since` would give: + +```bash +$ docker images --filter "since=image3" +REPOSITORY TAG IMAGE ID CREATED SIZE +image1 latest eeae25ada2aa 4 minutes ago 188.3 MB +image2 latest dea752e4e117 9 minutes ago 188.3 MB +``` + +#### Filter images by reference + +The `reference` filter shows only images whose reference matches +the specified pattern. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox latest e02e811dd08f 5 weeks ago 1.09 MB +busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB +busybox musl 733eb3059dce 5 weeks ago 1.21 MB +busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB +``` + +Filtering with `reference` would give: + +```bash +$ docker images --filter=reference='busy*:*libc' + +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB +busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB +``` + +### Format the output + +The formatting option (`--format`) will pretty print container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +| Placeholder | Description| +| ---- | ---- | +| `.ID` | Image ID | +| `.Repository` | Image repository | +| `.Tag` | Image tag | +| `.Digest` | Image digest | +| `.CreatedSince` | Elapsed time since the image was created | +| `.CreatedAt` | Time when the image was created | +| `.Size` | Image disk size | + +When using the `--format` option, the `image` command will either +output the data exactly as the template declares or, when using the +`table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Repository` entries separated by a colon for all images: + +```bash +$ docker images --format "{{.ID}}: {{.Repository}}" + +77af4d6b9913: +b6fa739cedf5: committ +78a85c484f71: +30557a29d5ab: docker +5ed6274db6ce: +746b819f315e: postgres +746b819f315e: postgres +746b819f315e: postgres +746b819f315e: postgres +``` + +To list all images with their repository and tag in a table format you +can use: + +```bash +$ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" + +IMAGE ID REPOSITORY TAG +77af4d6b9913 +b6fa739cedf5 committ latest +78a85c484f71 +30557a29d5ab docker latest +5ed6274db6ce +746b819f315e postgres 9 +746b819f315e postgres 9.3 +746b819f315e postgres 9.3.5 +746b819f315e postgres latest +``` diff --git a/docs/reference/commandline/import.md b/docs/reference/commandline/import.md new file mode 100644 index 0000000000..57edf650c9 --- /dev/null +++ b/docs/reference/commandline/import.md @@ -0,0 +1,89 @@ +--- +title: "import" +description: "The import command description and usage" +keywords: "import, file, system, container" +--- + + + +# import + +```markdown +Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] + +Import the contents from a tarball to create a filesystem image + +Options: + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Set commit message for imported image +``` + +## Description + +You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The +`URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) +containing a filesystem or to an individual file on the Docker host. If you +specify an archive, Docker untars it in the container relative to the `/` +(root). If you specify an individual file, you must specify the full path within +the host. To import from a remote location, specify a `URI` that begins with the +`http://` or `https://` protocol. + +The `--change` option will apply `Dockerfile` instructions to the image +that is created. +Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Examples + +### Import from a remote location + +This will create a new untagged image. + +```bash +$ docker import http://example.com/exampleimage.tgz +``` + +### Import from a local file + +- Import to docker via pipe and `STDIN`. + + ```bash + $ cat exampleimage.tgz | docker import - exampleimagelocal:new + ``` + +- Import with a commit message. + + ```bash + $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new + ``` + +- Import to docker from a local archive. + + ```bash + $ docker import /path/to/exampleimage.tgz + ``` + +### Import from a local directory + +```bash +$ sudo tar -c . | docker import - exampleimagedir +``` + +### Import from a local directory with new configurations + +```bash +$ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir +``` + +Note the `sudo` in this example – you must preserve +the ownership of the files (especially root ownership) during the +archiving with tar. If you are not root (or the sudo command) when you +tar, then the ownerships might not get preserved. diff --git a/docs/reference/commandline/index.md b/docs/reference/commandline/index.md new file mode 100644 index 0000000000..c000082fab --- /dev/null +++ b/docs/reference/commandline/index.md @@ -0,0 +1,184 @@ +--- +title: "Docker commands" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +identifier: "smn_cli_guide" +--- + + + +# The Docker commands + +This section contains reference information on using Docker's command line +client. Each command has a reference page along with samples. If you are +unfamiliar with the command line, you should start by reading about how to [Use +the Docker command line](cli.md). + +You start the Docker daemon with the command line. How you start the daemon +affects your Docker containers. For that reason you should also make sure to +read the [`dockerd`](dockerd.md) reference page. + +## Commands by object + +### Docker management commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [dockerd](dockerd.md) | Launch the Docker daemon | +| [info](info.md) | Display system-wide information | +| [inspect](inspect.md)| Return low-level information on a container or image | +| [version](version.md) | Show the Docker version information | + + +### Image commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [build](build.md) | Build an image from a Dockerfile | +| [commit](commit.md) | Create a new image from a container's changes | +| [history](history.md) | Show the history of an image | +| [images](images.md) | List images | +| [import](import.md) | Import the contents from a tarball to create a filesystem image | +| [load](load.md) | Load an image from a tar archive or STDIN | +| [image prune](image_prune.md) | Remove unused images | +| [rmi](rmi.md) | Remove one or more images | +| [save](save.md) | Save images to a tar archive | +| [tag](tag.md) | Tag an image into a repository | + +### Container commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [attach](attach.md) | Attach to a running container | +| [container prune](container_prune.md) | Remove all stopped containers | +| [cp](cp.md) | Copy files/folders from a container to a HOSTDIR or to STDOUT | +| [create](create.md) | Create a new container | +| [diff](diff.md) | Inspect changes on a container's filesystem | +| [events](events.md) | Get real time events from the server | +| [exec](exec.md) | Run a command in a running container | +| [export](export.md) | Export a container's filesystem as a tar archive | +| [kill](kill.md) | Kill a running container | +| [logs](logs.md) | Fetch the logs of a container | +| [pause](pause.md) | Pause all processes within a container | +| [port](port.md) | List port mappings or a specific mapping for the container | +| [ps](ps.md) | List containers | +| [rename](rename.md) | Rename a container | +| [restart](restart.md) | Restart a running container | +| [rm](rm.md) | Remove one or more containers | +| [run](run.md) | Run a command in a new container | +| [start](start.md) | Start one or more stopped containers | +| [stats](stats.md) | Display a live stream of container(s) resource usage statistics | +| [stop](stop.md) | Stop a running container | +| [top](top.md) | Display the running processes of a container | +| [unpause](unpause.md) | Unpause all processes within a container | +| [update](update.md) | Update configuration of one or more containers | +| [wait](wait.md) | Block until a container stops, then print its exit code | + +### Hub and registry commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [login](login.md) | Register or log in to a Docker registry | +| [logout](logout.md) | Log out from a Docker registry | +| [pull](pull.md) | Pull an image or a repository from a Docker registry | +| [push](push.md) | Push an image or a repository to a Docker registry | +| [search](search.md) | Search the Docker Hub for images | + +### Network and connectivity commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [network connect](network_connect.md) | Connect a container to a network | +| [network create](network_create.md) | Create a new network | +| [network disconnect](network_disconnect.md) | Disconnect a container from a network | +| [network inspect](network_inspect.md) | Display information about a network | +| [network ls](network_ls.md) | Lists all the networks the Engine `daemon` knows about | +| [network prune](network_prune.md) | Remove all unused networks | +| [network rm](network_rm.md) | Removes one or more networks | + +### Shared data volume commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [volume create](volume_create.md) | Creates a new volume where containers can consume and store data | +| [volume inspect](volume_inspect.md) | Display information about a volume | +| [volume ls](volume_ls.md) | Lists all the volumes Docker knows about | +| [volume prune](volume_prune.md) | Remove all unused volumes | +| [volume rm](volume_rm.md) | Remove one or more volumes | + +### Swarm node commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager | +| [node inspect](node_inspect.md) | Inspect a node in the swarm | +| [node ls](node_ls.md) | List nodes in the swarm | +| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager | +| [node ps](node_ps.md) | List tasks running on one or more nodes | +| [node rm](node_rm.md) | Remove one or more nodes from the swarm | +| [node update](node_update.md) | Update attributes for a node | + +### Swarm management commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [swarm init](swarm_init.md) | Initialize a swarm | +| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node | +| [swarm leave](swarm_leave.md) | Remove the current node from the swarm | +| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens | +| [swarm unlock](swarm_unlock.md) | Unlock swarm | +| [swarm unlock-key](swarm_unlock_key.md) | Manage the unlock key | +| [swarm update](swarm_update.md) | Update attributes of a swarm | + +### Swarm service commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [service create](service_create.md) | Create a new service | +| [service inspect](service_inspect.md) | Inspect a service | +| [service logs](service_logs.md) | Fetch the logs of a service or task | +| [service ls](service_ls.md) | List services in the swarm | +| [service ps](service_ps.md) | List the tasks of a service | +| [service rm](service_rm.md) | Remove a service from the swarm | +| [service scale](service_scale.md) | Set the number of replicas for the desired state of the service | +| [service update](service_update.md) | Update the attributes of a service | + +### Swarm secret commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [secret create](secret_create.md) | Create a secret from a file or STDIN as content | +| [secret inspect](service_inspect.md) | Inspect the specified secret | +| [secret ls](secret_ls.md) | List secrets in the swarm | +| [secret rm](secret_rm.md) | Remove the specified secrets from the swarm | + +### Swarm stack commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [stack deploy](stack_deploy.md) | Deploy a new stack or update an existing stack | +| [stack ls](stack_ls.md) | List stacks in the swarm | +| [stack ps](stack_ps.md) | List the tasks in the stack | +| [stack rm](stack_rm.md) | Remove the stack from the swarm | +| [stack services](stack_services.md) | List the services in the stack | + +### Plugin commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [plugin create](plugin_create.md) | Create a plugin from a rootfs and configuration | +| [plugin disable](plugin_disable.md) | Disable a plugin | +| [plugin enable](plugin_enable.md) | Enable a plugin | +| [plugin inspect](plugin_inspect.md) | Display detailed information on a plugin | +| [plugin install](plugin_install.md) | Install a plugin | +| [plugin ls](plugin_ls.md) | List plugins | +| [plugin push](plugin_push.md) | Push a plugin to a registry | +| [plugin rm](plugin_rm.md) | Remove a plugin | +| [plugin set](plugin_set.md) | Change settings for a plugin | diff --git a/docs/reference/commandline/info.md b/docs/reference/commandline/info.md new file mode 100644 index 0000000000..9929c04af3 --- /dev/null +++ b/docs/reference/commandline/info.md @@ -0,0 +1,245 @@ +--- +title: "info" +description: "The info command description and usage" +keywords: "display, docker, information" +--- + + + +# info + +```markdown +Usage: docker info [OPTIONS] + +Display system-wide information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +This command displays system wide information regarding the Docker installation. +Information displayed includes the kernel version, number of containers and images. +The number of images shown is the number of unique images. The same image tagged +under different names is counted only once. + +If a format is specified, the given template will be executed instead of the +default format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +Depending on the storage driver in use, additional information can be shown, such +as pool name, data file, metadata file, data space used, total data space, metadata +space used, and total metadata space. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +## Examples + +### Show output + +The example below shows the output for a daemon running on Red Hat Enterprise Linux, +using the `devicemapper` storage driver. As can be seen in the output, additional +information about the `devicemapper` storage driver is shown: + +```bash +$ docker info + +Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 +Images: 52 +Server Version: 1.10.3 +Storage Driver: devicemapper + Pool Name: docker-202:2-25583803-pool + Pool Blocksize: 65.54 kB + Base Device Size: 10.74 GB + Backing Filesystem: xfs + Data file: /dev/loop0 + Metadata file: /dev/loop1 + Data Space Used: 1.68 GB + Data Space Total: 107.4 GB + Data Space Available: 7.548 GB + Metadata Space Used: 2.322 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.145 GB + Udev Sync Supported: true + Deferred Removal Enabled: false + Deferred Deletion Enabled: false + Deferred Deleted Device Count: 0 + Data loop file: /var/lib/docker/devicemapper/devicemapper/data + Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.107-RHEL7 (2015-12-01) +Execution Driver: native-0.2 +Logging Driver: json-file +Plugins: + Volume: local + Network: null host bridge +Kernel Version: 3.10.0-327.el7.x86_64 +Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) +OSType: linux +Architecture: x86_64 +CPUs: 1 +Total Memory: 991.7 MiB +Name: ip-172-30-0-91.ec2.internal +ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S +Docker Root Dir: /var/lib/docker +Debug mode (client): false +Debug mode (server): false +Username: gordontheturtle +Registry: https://index.docker.io/v1/ +Insecure registries: + myinsecurehost:5000 + 127.0.0.0/8 +``` + +### Show debugging output + +Here is a sample output for a daemon running on Ubuntu, using the overlay2 +storage driver and a node that is part of a 2-node swarm: + +```bash +$ docker -D info + +Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 +Images: 52 +Server Version: 1.13.0 +Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Native Overlay Diff: false +Logging Driver: json-file +Cgroup Driver: cgroupfs +Plugins: + Volume: local + Network: bridge host macvlan null overlay +Swarm: active + NodeID: rdjq45w1op418waxlairloqbm + Is Manager: true + ClusterID: te8kdyw33n36fqiz74bfjeixd + Managers: 1 + Nodes: 2 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Root Rotation In Progress: false + Node Address: 172.16.66.128 172.16.66.129 + Manager Addresses: + 172.16.66.128:2477 +Runtimes: runc +Default Runtime: runc +Init Binary: docker-init +containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 +runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 +init version: N/A (expected: v0.13.0) +Security Options: + apparmor + seccomp + Profile: default +Kernel Version: 4.4.0-31-generic +Operating System: Ubuntu 16.04.1 LTS +OSType: linux +Architecture: x86_64 +CPUs: 2 +Total Memory: 1.937 GiB +Name: ubuntu +ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 +Docker Root Dir: /var/lib/docker +Debug Mode (client): true +Debug Mode (server): true + File Descriptors: 30 + Goroutines: 123 + System Time: 2016-11-12T17:24:37.955404361-08:00 + EventsListeners: 0 +Http Proxy: http://test:test@proxy.example.com:8080 +Https Proxy: https://test:test@proxy.example.com:8080 +No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com +Registry: https://index.docker.io/v1/ +WARNING: No swap limit support +Labels: + storage=ssd + staging=true +Experimental: false +Insecure Registries: + 127.0.0.0/8 +Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ +Live Restore Enabled: false +``` + +The global `-D` option causes all `docker` commands to output debug information. + +### Format the output + +You can also specify the output format: + +```bash +$ docker info --format '{{json .}}' + +{"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} +``` + +### Run `docker info` on Windows + +Here is a sample output for a daemon running on Windows Server 2016: + +```none +E:\docker>docker info + +Containers: 1 + Running: 0 + Paused: 0 + Stopped: 1 +Images: 17 +Server Version: 1.13.0 +Storage Driver: windowsfilter + Windows: +Logging Driver: json-file +Plugins: + Volume: local + Network: nat null overlay +Swarm: inactive +Default Isolation: process +Kernel Version: 10.0 14393 (14393.206.amd64fre.rs1_release.160912-1937) +Operating System: Windows Server 2016 Datacenter +OSType: windows +Architecture: x86_64 +CPUs: 8 +Total Memory: 3.999 GiB +Name: WIN-V0V70C0LU5P +ID: NYMS:B5VK:UMSL:FVDZ:EWB5:FKVK:LPFL:FJMQ:H6FT:BZJ6:L2TD:XH62 +Docker Root Dir: C:\control +Debug Mode (client): false +Debug Mode (server): false +Registry: https://index.docker.io/v1/ +Insecure Registries: + 127.0.0.0/8 +Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ +Live Restore Enabled: false +``` diff --git a/docs/reference/commandline/inspect.md b/docs/reference/commandline/inspect.md new file mode 100644 index 0000000000..9ce4f5f517 --- /dev/null +++ b/docs/reference/commandline/inspect.md @@ -0,0 +1,122 @@ +--- +title: "inspect" +description: "The inspect command description and usage" +keywords: "inspect, container, json" +--- + + + +# inspect + +```markdown +Usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...] + +Return low-level information on Docker object(s) (e.g. container, image, volume, +network, node, service, or task) identified by name or ID + +Options: + -f, --format Format the output using the given Go template + --help Print usage + -s, --size Display total file sizes if the type is container + --type Return JSON for specified type +``` + +## Description + +Docker inspect provides detailed information on constructs controlled by Docker. + +By default, `docker inspect` will render results in a JSON array. + +## Request a custom response format (--format) + +If a format is specified, the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Specify target type (--type) + +`--type container|image|node|network|secret|service|volume|task|plugin` + +The `docker inspect` command matches any type of object by either ID or name. +In some cases multiple type of objects (for example, a container and a volume) +exist with the same name, making the result ambigious. + +To restrict `docker inspect` to a specific type of object, use the `--type` +option. + +The following example inspects a _volume_ named "myvolume" + +```bash +$ docker inspect --type=volume myvolume +``` + +## Examples + +### Get an instance's IP address + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + +```bash +$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID +``` + +### Get an instance's MAC address + +```bash +$ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID +``` + +### Get an instance's log path + +```bash +$ docker inspect --format='{{.LogPath}}' $INSTANCE_ID +``` + +### Get an instance's image name + +```bash +$ docker inspect --format='{{.Config.Image}}' $INSTANCE_ID +``` + +### List all port bindings + +You can loop over arrays and maps in the results to produce simple text +output: + +```bash +$ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID +``` + +### Find a specific port mapping + +The `.Field` syntax doesn't work when the field name begins with a +number, but the template language's `index` function does. The +`.NetworkSettings.Ports` section contains a map of the internal port +mappings to a list of external address/port objects. To grab just the +numeric public port, you use `index` to find the specific port map, and +then `index` 0 contains the first object inside of that. Then we ask for +the `HostPort` field to get the public address. + +```bash +$ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID +``` + +### Get a subsection in JSON format + +If you request a field which is itself a structure containing other +fields, by default you get a Go-style dump of the inner values. +Docker adds a template function, `json`, which can be applied to get +results in JSON format. + +```bash +$ docker inspect --format='{{json .Config}}' $INSTANCE_ID +``` diff --git a/docs/reference/commandline/kill.md b/docs/reference/commandline/kill.md new file mode 100644 index 0000000000..97b15add9b --- /dev/null +++ b/docs/reference/commandline/kill.md @@ -0,0 +1,35 @@ +--- +title: "kill" +description: "The kill command description and usage" +keywords: "container, kill, signal" +--- + + + +# kill + +```markdown +Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] + +Kill one or more running containers + +Options: + --help Print usage + -s, --signal string Signal to send to the container (default "KILL") +``` + +## Description + +The main process inside the container will be sent `SIGKILL`, or any +signal specified with option `--signal`. + +> **Note**: `ENTRYPOINT` and `CMD` in the *shell* form run as a subcommand of +> `/bin/sh -c`, which does not pass signals. This means that the executable is +> not the container’s PID 1 and does not receive Unix signals. diff --git a/docs/reference/commandline/load.md b/docs/reference/commandline/load.md new file mode 100644 index 0000000000..3ce6c19e24 --- /dev/null +++ b/docs/reference/commandline/load.md @@ -0,0 +1,62 @@ +--- +title: "load" +description: "The load command description and usage" +keywords: "stdin, tarred, repository" +--- + + + +# load + +```markdown +Usage: docker load [OPTIONS] + +Load an image from a tar archive or STDIN + +Options: + --help Print usage + -i, --input string Read from tar archive file, instead of STDIN. + The tarball may be compressed with gzip, bzip, or xz + -q, --quiet Suppress the load output but still outputs the imported images +``` +## Description + +`docker load` loads a tarred repository from a file or the standard input stream. +It restores both images and tags. + +## Examples + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE + +$ docker load < busybox.tar.gz + +Loaded image: busybox:latest +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox latest 769b9341d937 7 weeks ago 2.489 MB + +$ docker load --input fedora.tar + +Loaded image: fedora:rawhide + +Loaded image: fedora:20 + +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +busybox latest 769b9341d937 7 weeks ago 2.489 MB +fedora rawhide 0d20aec6529d 7 weeks ago 387 MB +fedora 20 58394af37342 7 weeks ago 385.5 MB +fedora heisenbug 58394af37342 7 weeks ago 385.5 MB +fedora latest 58394af37342 7 weeks ago 385.5 MB +``` diff --git a/docs/reference/commandline/login.md b/docs/reference/commandline/login.md new file mode 100644 index 0000000000..0b8e697281 --- /dev/null +++ b/docs/reference/commandline/login.md @@ -0,0 +1,158 @@ +--- +title: "login" +description: "The login command description and usage" +keywords: "registry, login, image" +--- + + + +# login + +```markdown +Usage: docker login [OPTIONS] [SERVER] + +Log in to a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage + -p, --password string Password + -u, --username string Username +``` + +## Description + +Login to a registry. + +### Login to a self-hosted registry + +If you want to login to a self-hosted registry you can specify this by +adding the server name. + +```bash +$ docker login localhost:8080 +``` + +### Privileged user requirement + +`docker login` requires user to use `sudo` or be `root`, except when: + +1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. +2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. + +You can log into any public or private repository for which you have +credentials. When you log in, the command stores encoded credentials in +`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. + +### Credentials store + +The Docker Engine can keep user credentials in an external credentials store, +such as the native keychain of the operating system. Using an external store +is more secure than storing credentials in the Docker configuration file. + +To use a credentials store, you need an external helper program to interact +with a specific keychain or external store. Docker requires the helper +program to be in the client's host `$PATH`. + +This is the list of currently available credentials helpers and where +you can download them from: + +- D-Bus Secret Service: https://github.com/docker/docker-credential-helpers/releases +- Apple macOS keychain: https://github.com/docker/docker-credential-helpers/releases +- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases + +You need to specify the credentials store in `$HOME/.docker/config.json` +to tell the docker engine to use it. The value of the config property should be +the suffix of the program to use (i.e. everything after `docker-credential-`). +For example, to use `docker-credential-osxkeychain`: + +```json +{ + "credsStore": "osxkeychain" +} +``` + +If you are currently logged in, run `docker logout` to remove +the credentials from the file and run `docker login` again. + +### Credential helper protocol + +Credential helpers can be any program or script that follows a very simple protocol. +This protocol is heavily inspired by Git, but it differs in the information shared. + +The helpers always use the first argument in the command to identify the action. +There are only three possible values for that argument: `store`, `get`, and `erase`. + +The `store` command takes a JSON payload from the standard input. That payload carries +the server address, to identify the credential, the user name, and either a password +or an identity token. + +```json +{ + "ServerURL": "https://index.docker.io/v1", + "Username": "david", + "Secret": "passw0rd1" +} +``` + +If the secret being stored is an identity token, the Username should be set to +``. + +The `store` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. + +The `get` command takes a string payload from the standard input. That payload carries +the server address that the docker engine needs credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `get` command writes a JSON payload to `STDOUT`. Docker reads the user name +and password from this payload: + +```json +{ + "Username": "david", + "Secret": "passw0rd1" +} +``` + +The `erase` command takes a string payload from `STDIN`. That payload carries +the server address that the docker engine wants to remove credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `erase` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. + +### Credential helpers + +Credential helpers are similar to the credential store above, but act as the +designated programs to handle credentials for *specific registries*. The default +credential store (`credsStore` or the config file itself) will not be used for +operations concerning credentials of the specified registries. + +### Logging out + +If you are currently logged in, run `docker logout` to remove +the credentials from the default store. + +Credential helpers are specified in a similar way to `credsStore`, but +allow for multiple helpers to be configured at a time. Keys specify the +registry domain, and values specify the suffix of the program to use +(i.e. everything after `docker-credential-`). +For example: + +```json +{ + "credHelpers": { + "registry.example.com": "registryhelper", + "awesomereg.example.org": "hip-star", + "unicorn.example.io": "vcbait" + } +} +``` diff --git a/docs/reference/commandline/logout.md b/docs/reference/commandline/logout.md new file mode 100644 index 0000000000..1e150eb848 --- /dev/null +++ b/docs/reference/commandline/logout.md @@ -0,0 +1,32 @@ +--- +title: "logout" +description: "The logout command description and usage" +keywords: "logout, docker, registry" +--- + + + +# logout + +```markdown +Usage: docker logout [SERVER] + +Log out from a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage +``` + +## Examples + +```bash +$ docker logout localhost:8080 +``` diff --git a/docs/reference/commandline/logs.md b/docs/reference/commandline/logs.md new file mode 100644 index 0000000000..75f25f7657 --- /dev/null +++ b/docs/reference/commandline/logs.md @@ -0,0 +1,68 @@ +--- +title: "logs" +description: "The logs command description and usage" +keywords: "logs, retrieve, docker" +--- + + + +# logs + +```markdown +Usage: docker logs [OPTIONS] CONTAINER + +Fetch the logs of a container + +Options: + --details Show extra details provided to logs + -f, --follow Follow log output + --help Print usage + --since string Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes) + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +## Description + +The `docker logs` command batch-retrieves logs present at the time of execution. + +> **Note**: this command is only functional for containers that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker logs --follow` command will continue streaming the new output from +the container's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +container. + +The `--since` option shows only the container logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. diff --git a/docs/reference/commandline/network.md b/docs/reference/commandline/network.md new file mode 100644 index 0000000000..4555740dad --- /dev/null +++ b/docs/reference/commandline/network.md @@ -0,0 +1,51 @@ +--- +title: "network" +description: "The network command description and usage" +keywords: "network" +--- + + + +# network + +```markdown +Usage: docker network COMMAND + +Manage networks + +Options: + --help Print usage + +Commands: + connect Connect a container to a network + create Create a network + disconnect Disconnect a container from a network + inspect Display detailed information on one or more networks + ls List networks + prune Remove all unused networks + rm Remove one or more networks + +Run 'docker network COMMAND --help' for more information on a command. +``` + +## Description + +Manage networks. You can use subcommands to create, inspect, list, remove, +prune, connect, and disconnect networks. + +## Related commands + +* [network create](network_create.md) +* [network inspect](network_inspect.md) +* [network list](network_list.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [network connect](network_connect.md) +* [network disconnect](network_disconnect.md) diff --git a/docs/reference/commandline/network_connect.md b/docs/reference/commandline/network_connect.md new file mode 100644 index 0000000000..ba01fc6add --- /dev/null +++ b/docs/reference/commandline/network_connect.md @@ -0,0 +1,117 @@ +--- +title: "network connect" +description: "The network connect command description and usage" +keywords: "network, connect, user-defined" +--- + + + +# network connect + +```markdown +Usage: docker network connect [OPTIONS] NETWORK CONTAINER + +Connect a container to a network + +Options: + --alias value Add network-scoped alias for the container (default []) + --help Print usage + --ip string IPv4 address (e.g., 172.30.100.104) + --ip6 string IPv6 address (e.g., 2001:db8::33) + --link value Add link to another container (default []) + --link-local-ip value Add a link-local address for the container (default []) +``` + +## Description + +Connects a container to a network. You can connect a container by name +or by ID. Once connected, the container can communicate with other containers in +the same network. + +## Examples + +### Connect a running container to a network + +```bash +$ docker network connect multi-host-network container1 +``` + +### Connect a container to a network when it starts + +You can also use the `docker run --network=` option to start a container and immediately connect it to a network. + +```bash +$ docker run -itd --network=multi-host-network busybox +``` + +### Specify the IP address a container will use on a given network + +You can specify the IP address you want to be assigned to the container's interface. + +```bash +$ docker network connect --ip 10.10.36.122 multi-host-network container2 +``` + +### Use the legacy `--link` option + +You can use `--link` option to link another container with a preferred alias + +```bash +$ docker network connect --link container1:c1 multi-host-network container2 +``` + +### Create a network alias for a container + +`--alias` option can be used to resolve the container by another name in the network +being connected to. + +```bash +$ docker network connect --alias db --alias mysql multi-host-network container2 +``` + +### Network implications of stopping, pausing, or restarting containers + +You can pause, restart, and stop containers that are connected to a network. +A container connects to its configured networks when it runs. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. + +```bash +$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network +``` + +```bash +$ docker network connect --ip 172.20.128.2 multi-host-network container2 +``` + +To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. + +Once connected in network, containers can communicate using only another +container's IP address or name. For `overlay` networks or custom plugins that +support multi-host connectivity, containers connected to the same multi-host +network but launched from different Engines can also communicate in this way. + +You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. + +## Related commands + +* [network inspect](network_inspect.md) +* [network create](network_create.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [Work with networks](https://docs.docker.com/engine/userguide/networking/work-with-networks/) diff --git a/docs/reference/commandline/network_create.md b/docs/reference/commandline/network_create.md new file mode 100644 index 0000000000..099cc1439a --- /dev/null +++ b/docs/reference/commandline/network_create.md @@ -0,0 +1,227 @@ +--- +title: "network create" +description: "The network create command description and usage" +keywords: "network, create" +--- + + + +# network create + +```markdown +Usage: docker network create [OPTIONS] NETWORK + +Create a network + +Options: + --attachable Enable manual container attachment + --ingress Specify the network provides the routing-mesh + --aux-address value Auxiliary IPv4 or IPv6 addresses used by Network + driver (default map[]) + -d, --driver string Driver to manage the Network (default "bridge") + --gateway value IPv4 or IPv6 Gateway for the master subnet (default []) + --help Print usage + --internal Restrict external access to the network + --ip-range value Allocate container ip from a sub-range (default []) + --ipam-driver string IP Address Management Driver (default "default") + --ipam-opt value Set IPAM driver specific options (default map[]) + --ipv6 Enable IPv6 networking + --label value Set metadata on a network (default []) + -o, --opt value Set driver specific options (default map[]) + --subnet value Subnet in CIDR format that represents a + network segment (default []) + --scope value Promote a network to swarm scope (value = [ local | swarm ]) + --config-only Creates a configuration only network + --config-from The name of the network from which copying the configuration +``` + +## Description + +Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the +built-in network drivers. If you have installed a third party or your own custom +network driver you can specify that `DRIVER` here also. If you don't specify the +`--driver` option, the command automatically creates a `bridge` network for you. +When you install Docker Engine it creates a `bridge` network automatically. This +network corresponds to the `docker0` bridge that Engine has traditionally relied +on. When you launch a new container with `docker run` it automatically connects to +this bridge network. You cannot remove this default bridge network, but you can +create new ones using the `network create` command. + +```bash +$ docker network create -d bridge my-bridge-network +``` + +Bridge networks are isolated networks on a single Engine installation. If you +want to create a network that spans multiple Docker hosts each running an +Engine, you must create an `overlay` network. Unlike `bridge` networks, overlay +networks require some pre-existing conditions before you can create one. These +conditions are: + +* Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores. +* A cluster of hosts with connectivity to the key-value store. +* A properly configured Engine `daemon` on each host in the cluster. + +The `dockerd` options that support the `overlay` network are: + +* `--cluster-store` +* `--cluster-store-opt` +* `--cluster-advertise` + +To read more about these options and how to configure them, see ["*Get started +with multi-host network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay). + +While not required, it is a good idea to install Docker Swarm to +manage the cluster that makes up your network. Swarm provides sophisticated +discovery and server management tools that can assist your implementation. + +Once you have prepared the `overlay` network prerequisites you simply choose a +Docker host in the cluster and issue the following to create the network: + +```bash +$ docker network create -d overlay my-multihost-network +``` + +Network names must be unique. The Docker daemon attempts to identify naming +conflicts but this is not guaranteed. It is the user's responsibility to avoid +name conflicts. + +## Examples + +### Connect containers + +When you start a container, use the `--network` flag to connect it to a network. +This example adds the `busybox` container to the `mynet` network: + +```bash +$ docker run -itd --network=mynet busybox +``` + +If you want to add a container to a network after the container is already +running, use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate using only another container's IP address or name. +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +### Specify advanced options + +When you create a network, Engine creates a non-overlapping subnetwork for the +network by default. This subnetwork is not a subdivision of an existing +network. It is purely for ip-addressing purposes. You can override this default +and specify subnetwork values directly using the `--subnet` option. On a +`bridge` network you can only create a single subnet: + +```bash +$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0 +``` + +Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` +options. + +```bash +$ docker network create \ + --driver=bridge \ + --subnet=172.28.0.0/16 \ + --ip-range=172.28.5.0/24 \ + --gateway=172.28.5.254 \ + br0 +``` + +If you omit the `--gateway` flag the Engine selects one for you from inside a +preferred pool. For `overlay` networks and for network driver plugins that +support it you can create multiple subnetworks. + +```bash +$ docker network create -d overlay \ + --subnet=192.168.0.0/16 \ + --subnet=192.170.0.0/16 \ + --gateway=192.168.0.100 \ + --gateway=192.170.0.100 \ + --ip-range=192.168.1.0/24 \ + --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ + --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ + my-multihost-network +``` + +Be sure that your subnetworks do not overlap. If they do, the network create +fails and Engine returns an error. + +### Bridge driver options + +When creating a custom network, the default network driver (i.e. `bridge`) has +additional options that can be passed. The following are those options and the +equivalent docker daemon flags used for docker0 bridge: + +| Option | Equivalent | Description | +|--------------------------------------------------|-------------|-------------------------------------------------------| +| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | +| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | +| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | +| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | +| `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU | + +The following arguments can be passed to `docker network create` for any +network driver, again with their approximate equivalents to `docker daemon`. + +| Argument | Equivalent | Description | +|--------------|----------------|--------------------------------------------| +| `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet | +| `--ip-range` | `--fixed-cidr` | Allocate IPs from a range | +| `--internal` | - | Restrict external access to the network | +| `--ipv6` | `--ipv6` | Enable IPv6 networking | +| `--subnet` | `--bip` | Subnet for network | + +For example, let's use `-o` or `--opt` options to specify an IP address binding +when publishing ports: + +```bash +$ docker network create \ + -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \ + simple-network +``` + +### Network internal mode + +By default, when you connect a container to an `overlay` network, Docker also +connects a bridge network to it to provide external connectivity. If you want +to create an externally isolated `overlay` network, you can specify the +`--internal` option. + +### Network ingress mode + +You can create the network which will be used to provide the routing-mesh in the +swarm cluster. You do so by specifying `--ingress` when creating the network. Only +one ingress network can be created at the time. The network can be removed only +if no services depend on it. Any option available when creating a overlay network +is also available when creating the ingress network, besides the `--attachable` option. + +```bash +$ docker network create -d overlay \ + --subnet=10.11.0.0/16 \ + --ingress \ + --opt com.docker.network.mtu=9216 \ + --opt encrypted=true \ + my-ingress-network +``` + +## Related commands + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/docs/reference/commandline/network_disconnect.md b/docs/reference/commandline/network_disconnect.md new file mode 100644 index 0000000000..e855894d27 --- /dev/null +++ b/docs/reference/commandline/network_disconnect.md @@ -0,0 +1,48 @@ +--- +title: "network disconnect" +description: "The network disconnect command description and usage" +keywords: "network, disconnect, user-defined" +--- + + + +# network disconnect + +```markdown +Usage: docker network disconnect [OPTIONS] NETWORK CONTAINER + +Disconnect a container from a network + +Options: + -f, --force Force the container to disconnect from a network + --help Print usage +``` + +## Description + +Disconnects a container from a network. The container must be running to +disconnect it from the network. + +## Examples + +```bash + $ docker network disconnect multi-host-network container1 +``` + + +## Related commands + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/docs/reference/commandline/network_inspect.md b/docs/reference/commandline/network_inspect.md new file mode 100644 index 0000000000..1a856ddcb1 --- /dev/null +++ b/docs/reference/commandline/network_inspect.md @@ -0,0 +1,307 @@ +--- +title: "network inspect" +description: "The network inspect command description and usage" +keywords: "network, inspect, user-defined" +--- + + + +# network inspect + +```markdown +Usage: docker network inspect [OPTIONS] NETWORK [NETWORK...] + +Display detailed information on one or more networks + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Returns information about one or more networks. By default, this command renders +all results in a JSON object. + +## Examples + +## Inspect the `bridge` network + +Connect two containers to the default `bridge` network: + +```bash +$ sudo docker run -itd --name=container1 busybox +f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 + +$ sudo docker run -itd --name=container2 busybox +bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 +``` + +The `network inspect` command shows the containers, by id, in its +results. For networks backed by multi-host network driver, such as Overlay, +this command also shows the container endpoints in other hosts in the +cluster. These endpoints are represented as "ep-{endpoint-id}" in the output. +However, for swarm mode networks, only the endpoints that are local to the +node are shown. + +You can specify an alternate format to execute a given +template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +```none +$ sudo docker network inspect bridge + +[ + { + "Name": "bridge", + "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.42.1/16", + "Gateway": "172.17.42.1" + } + ] + }, + "Internal": false, + "Containers": { + "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { + "Name": "container2", + "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + }, + "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { + "Name": "container1", + "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", + "MacAddress": "02:42:ac:11:00:01", + "IPv4Address": "172.17.0.1/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": {} + } +] +``` + +### Inspect a user-defined network + +Create and inspect a user-defined network: + +```bash +$ docker network create simple-network + +69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a +``` + +```none +$ docker network inspect simple-network + +[ + { + "Name": "simple-network", + "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.22.0.0/16", + "Gateway": "172.22.0.1" + } + ] + }, + "Containers": {}, + "Options": {}, + "Labels": {} + } +] +``` + +### Inspect the `ingress` network + +For swarm mode overlay networks `network inspect` also shows the IP address and node name +of the peers. Peers are the nodes in the swarm cluster which have at least one task attached +to the network. Node name is of the format `-`. + +```none +$ docker network inspect ingress + +[ + { + "Name": "ingress", + "Id": "j0izitrut30h975vk4m1u5kk3", + "Created": "2016-11-08T06:49:59.803387552Z", + "Scope": "swarm", + "Driver": "overlay", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + }, + "Internal": false, + "Attachable": false, + "Containers": { + "ingress-sbox": { + "Name": "ingress-endpoint", + "EndpointID": "40e002d27b7e5d75f60bc72199d8cae3344e1896abec5eddae9743755fe09115", + "MacAddress": "02:42:0a:ff:00:03", + "IPv4Address": "10.255.0.3/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + }, + "Labels": {}, + "Peers": [ + { + "Name": "net-1-1d22adfe4d5c", + "IP": "192.168.33.11" + }, + { + "Name": "net-2-d55d838b34af", + "IP": "192.168.33.12" + }, + { + "Name": "net-3-8473f8140bd9", + "IP": "192.168.33.13" + } + ] + } +] +``` + +### Using `verbose` option for `network inspect` + +`docker network inspect --verbose` for swarm mode overlay networks shows service-specific +details such as the service's VIP and port mappings. It also shows IPs of service tasks, +and the IPs of the nodes where the tasks are running. + +Following is an example output for a overlay network `ov1` that has one service `s1` +attached to. service `s1` in this case has three replicas. + +```bash +$ docker network inspect --verbose ov1 +[ + { + "Name": "ov1", + "Id": "ybmyjvao9vtzy3oorxbssj13b", + "Created": "2017-03-13T17:04:39.776106792Z", + "Scope": "swarm", + "Driver": "overlay", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "10.0.0.0/24", + "Gateway": "10.0.0.1" + } + ] + }, + "Internal": false, + "Attachable": false, + "Containers": { + "020403bd88a15f60747fd25d1ad5fa1272eb740e8a97fc547d8ad07b2f721c5e": { + "Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o", + "EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e", + "MacAddress": "02:42:0a:00:00:04", + "IPv4Address": "10.0.0.4/24", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "4097" + }, + "Labels": {}, + "Peers": [ + { + "Name": "net-3-5d3cfd30a58c", + "IP": "192.168.33.13" + }, + { + "Name": "net-1-6ecbc0040a73", + "IP": "192.168.33.11" + }, + { + "Name": "net-2-fb80208efd75", + "IP": "192.168.33.12" + } + ], + "Services": { + "s1": { + "VIP": "10.0.0.2", + "Ports": [], + "LocalLBIndex": 257, + "Tasks": [ + { + "Name": "s1.2.q4hcq2aiiml25ubtrtg4q1txt", + "EndpointID": "040879b027e55fb658e8b60ae3b87c6cdac7d291e86a190a3b5ac6567b26511a", + "EndpointIP": "10.0.0.5", + "Info": { + "Host IP": "192.168.33.11" + } + }, + { + "Name": "s1.3.yawl4cgkp7imkfx469kn9j6lm", + "EndpointID": "106edff9f120efe44068b834e1cddb5b39dd4a3af70211378b2f7a9e562bbad8", + "EndpointIP": "10.0.0.3", + "Info": { + "Host IP": "192.168.33.12" + } + }, + { + "Name": "s1.1.pjn2ik0sfgkfzed3h0s00gs9o", + "EndpointID": "ad16946f416562d658f3bb30b9830d73ad91ccf6feae44411269cd0ff674714e", + "EndpointIP": "10.0.0.4", + "Info": { + "Host IP": "192.168.33.13" + } + } + ] + } + } + } +] +``` + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/docs/reference/commandline/network_ls.md b/docs/reference/commandline/network_ls.md new file mode 100644 index 0000000000..8bb8a2c48a --- /dev/null +++ b/docs/reference/commandline/network_ls.md @@ -0,0 +1,250 @@ +--- +title: "network ls" +description: "The network ls command description and usage" +keywords: "network, list, user-defined" +--- + + + +# docker network ls + +```markdown +Usage: docker network ls [OPTIONS] + +List networks + +Aliases: + ls, list + +Options: + -f, --filter filter Provide filter values (e.g. 'driver=bridge') + --format string Pretty-print networks using a Go template + --help Print usage + --no-trunc Do not truncate the output + -q, --quiet Only display network IDs +``` + +## Description + +Lists all the networks the Engine `daemon` knows about. This includes the +networks that span across multiple hosts in a cluster. + +## Examples + +### List all networks + +```bash +$ sudo docker network ls +NETWORK ID NAME DRIVER SCOPE +7fca4eb8c647 bridge bridge local +9f904ee27bf5 none null local +cf03ee007fb4 host host local +78b03ee04fc4 multi-host overlay swarm +``` + +Use the `--no-trunc` option to display the full network id: + +```bash +$ docker network ls --no-trunc +NETWORK ID NAME DRIVER SCOPE +18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null local +c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host local +7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge local +95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge local +63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge local +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. + +The currently supported filters are: + +* driver +* id (network's id) +* label (`label=` or `label==`) +* name (network's name) +* scope (`swarm|global|local`) +* type (`custom|builtin`) + +#### Driver + +The `driver` filter matches networks based on their driver. + +The following example matches networks with the `bridge` driver: + +```bash +$ docker network ls --filter driver=bridge +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +#### ID + +The `id` filter matches on all or part of a network's ID. + +The following filter matches all networks with an ID containing the +`63d1ff1f77b0...` string. + +```bash +$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 +NETWORK ID NAME DRIVER SCOPE +63d1ff1f77b0 dev bridge local +``` + +You can also filter for a substring in an ID as this shows: + +```bash +$ docker network ls --filter id=95e74588f40d +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local + +$ docker network ls --filter id=95e +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +``` + +#### Label + +The `label` filter matches networks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches networks with the `usage` label regardless of its value. + +```bash +$ docker network ls -f "label=usage" +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +The following filter matches networks with the `usage` label with the `prod` value. + +```bash +$ docker network ls -f "label=usage=prod" +NETWORK ID NAME DRIVER SCOPE +f6e212da9dfd test2 bridge local +``` + +#### Name + +The `name` filter matches on all or part of a network's name. + +The following filter matches all networks with a name containing the `foobar` string. + +```bash +$ docker network ls --filter name=foobar +NETWORK ID NAME DRIVER SCOPE +06e7eef0a170 foobar bridge local +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker network ls --filter name=foo +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +06e7eef0a170 foobar bridge local +``` + +#### Scope + +The `scope` filter matches networks based on their scope. + +The following example matches networks with the `swarm` scope: + +```bash +$ docker network ls --filter scope=swarm +NETWORK ID NAME DRIVER SCOPE +xbtm0v4f1lfh ingress overlay swarm +ic6r88twuu92 swarmnet overlay swarm +``` + +The following example matches networks with the `local` scope: + +```bash +$ docker network ls --filter scope=local +NETWORK ID NAME DRIVER SCOPE +e85227439ac7 bridge bridge local +0ca0e19443ed host host local +ca13cc149a36 localnet bridge local +f9e115d2de35 none null local +``` + +#### Type + +The `type` filter supports two values; `builtin` displays predefined networks +(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. + +The following filter matches all user defined networks: + +```bash +$ docker network ls --filter type=custom +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +63d1ff1f77b0 dev bridge local +``` + +By having this flag it allows for batch cleanup. For example, use this filter +to delete all user defined networks: + +```bash +$ docker network rm `docker network ls --filter type=custom -q` +``` + +A warning will be issued when trying to remove a network that has containers +attached. + +### Formatting + +The formatting options (`--format`) pretty-prints networks output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +-------------|------------------------------------------------------------------------------------------ +`.ID` | Network ID +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.IPv6` | Whether IPv6 is enabled on the network or not. +`.Internal` | Whether the network is internal or not. +`.Labels` | All labels assigned to the network. +`.Label` | Value of a specific label for this network. For example `{{.Label "project.version"}}` +`.CreatedAt` | Time when the network was created + +When using the `--format` option, the `network ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Driver` entries separated by a colon for all networks: + +```bash +$ docker network ls --format "{{.ID}}: {{.Driver}}" +afaaab448eb2: bridge +d1584f8dc718: host +391df270dc66: null +``` + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/docs/reference/commandline/network_prune.md b/docs/reference/commandline/network_prune.md new file mode 100644 index 0000000000..9ef541061d --- /dev/null +++ b/docs/reference/commandline/network_prune.md @@ -0,0 +1,104 @@ +--- +title: "network prune" +description: "Remove unused networks" +keywords: "network, prune, delete" +--- + +# network prune + +```markdown +Usage: docker network prune [OPTIONS] + +Remove all unused networks + +Options: + --filter filter Provide filter values (e.g. 'until=') + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all unused networks. Unused networks are those which are not referenced +by any containers. + +## Examples + +```bash +$ docker network prune + +WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue? [y/N] y +Deleted Networks: +n1 +n2 +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* until (``) - only remove networks created before given timestamp +* label (`label=`, `label==`, `label!=`, or `label!==`) - only remove networks with (or without, in case `label!=...` is used) the specified labels. + +The `until` filter can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the daemon machine’s time. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the daemon will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +The `label` filter accepts two formats. One is the `label=...` (`label=` or `label==`), +which removes networks with the specified labels. The other +format is the `label!=...` (`label!=` or `label!==`), which removes +networks without the specified labels. + +The following removes networks created more than 5 minutes ago. Note that +system networks such as `bridge`, `host`, and `none` will never be pruned: + +```none +$ docker network ls + +NETWORK ID NAME DRIVER SCOPE +7430df902d7a bridge bridge local +ea92373fd499 foo-1-day-ago bridge local +ab53663ed3c7 foo-1-min-ago bridge local +97b91972bc3b host host local +f949d337b1f5 none null local + +$ docker network prune --force --filter until=5m + +Deleted Networks: +foo-1-day-ago + +$ docker network ls + +NETWORK ID NAME DRIVER SCOPE +7430df902d7a bridge bridge local +ab53663ed3c7 foo-1-min-ago bridge local +97b91972bc3b host host local +f949d337b1f5 none null local +``` + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [volume prune](volume_prune.md) +* [system prune](system_prune.md) diff --git a/docs/reference/commandline/network_rm.md b/docs/reference/commandline/network_rm.md new file mode 100644 index 0000000000..aab487a044 --- /dev/null +++ b/docs/reference/commandline/network_rm.md @@ -0,0 +1,68 @@ +--- +title: "network rm" +description: "the network rm command description and usage" +keywords: "network, rm, user-defined" +--- + + + +# network rm + +```markdown +Usage: docker network rm NETWORK [NETWORK...] + +Remove one or more networks + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +## Description + +Removes one or more networks by name or identifier. To remove a network, +you must first disconnect any containers connected to it. + +## Examples + +### Remove a network + +To remove the network named 'my-network': + +```bash + $ docker network rm my-network +``` + +### Remove multiple networks + +To delete multiple networks in a single `docker network rm` command, provide +multiple network names or ids. The following example deletes a network with id +`3695c422697f` and a network named `my-network`: + +```bash + $ docker network rm 3695c422697f my-network +``` + +When you specify multiple networks, the command attempts to delete each in turn. +If the deletion of one network fails, the command continues to the next on the +list and tries to delete that. The command reports success or failure for each +deletion. + +## Related commands + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/docs/reference/commandline/node.md b/docs/reference/commandline/node.md new file mode 100644 index 0000000000..3a7d4b3a76 --- /dev/null +++ b/docs/reference/commandline/node.md @@ -0,0 +1,42 @@ + +--- +title: "node" +description: "The node command description and usage" +keywords: "node" +--- + + + +# node + +```markdown +Usage: docker node COMMAND + +Manage Swarm nodes + +Options: + --help Print usage + +Commands: + demote Demote one or more nodes from manager in the swarm + inspect Display detailed information on one or more nodes + ls List nodes in the swarm + promote Promote one or more nodes to manager in the swarm + ps List tasks running on one or more nodes, defaults to current node + rm Remove one or more nodes from the swarm + update Update a node + +Run 'docker node COMMAND --help' for more information on a command. +``` + +## Description + +Manage nodes. + diff --git a/docs/reference/commandline/node_demote.md b/docs/reference/commandline/node_demote.md new file mode 100644 index 0000000000..e6e59d8945 --- /dev/null +++ b/docs/reference/commandline/node_demote.md @@ -0,0 +1,47 @@ +--- +title: "node demote" +description: "The node demote command description and usage" +keywords: "node, demote" +--- + + + +# node demote + +```markdown +Usage: docker node demote NODE [NODE...] + +Demote one or more nodes from manager in the swarm + +Options: + --help Print usage + +``` + +## Description + +Demotes an existing manager so that it is no longer a manager. This command +targets a docker engine that is a manager in the swarm. + + +## Examples + +```bash +$ docker node demote +``` + +## Related commands + +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/docs/reference/commandline/node_inspect.md b/docs/reference/commandline/node_inspect.md new file mode 100644 index 0000000000..6ee9e17e6f --- /dev/null +++ b/docs/reference/commandline/node_inspect.md @@ -0,0 +1,167 @@ +--- +title: "node inspect" +description: "The node inspect command description and usage" +keywords: "node, inspect" +--- + + + +# node inspect + +```markdown +Usage: docker node inspect [OPTIONS] self|NODE [NODE...] + +Display detailed information on one or more nodes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format +``` + +## Description + +Returns information about a node. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +## Examples + +### Inspect a node + +```none +$ docker node inspect swarm-manager + +[ +{ + "ID": "e216jshn25ckzbvmwlnh5jr3g", + "Version": { + "Index": 10 + }, + "CreatedAt": "2017-05-16T22:52:44.9910662Z", + "UpdatedAt": "2017-05-16T22:52:45.230878043Z", + "Spec": { + "Role": "manager", + "Availability": "active" + }, + "Description": { + "Hostname": "swarm-manager", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 1000000000, + "MemoryBytes": 1039843328 + }, + "Engine": { + "EngineVersion": "17.06.0-ce", + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "overlay" + }, + { + "Type": "Network", + "Name": "null" + }, + { + "Type": "Network", + "Name": "host" + }, + { + "Type": "Network", + "Name": "bridge" + }, + { + "Type": "Network", + "Name": "overlay" + } + ] + }, + "TLSInfo": { + "TrustRoot": "-----BEGIN CERTIFICATE-----\nMIIBazCCARCgAwIBAgIUOzgqU4tA2q5Yv1HnkzhSIwGyIBswCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAyMDAyNDAwWhcNMzcwNDI3MDAy\nNDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABMbiAmET+HZyve35ujrnL2kOLBEQhFDZ5MhxAuYs96n796sFlfxTxC1lM/2g\nAh8DI34pm3JmHgZxeBPKUURJHKWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBS3sjTJOcXdkls6WSY2rTx1KIJueTAKBggqhkjO\nPQQDAgNJADBGAiEAoeVWkaXgSUAucQmZ3Yhmx22N/cq1EPBgYHOBZmHt0NkCIQC3\nzONcJ/+WA21OXtb+vcijpUOXtNjyHfcox0N8wsLDqQ==\n-----END CERTIFICATE-----\n", + "CertIssuerSubject": "MBMxETAPBgNVBAMTCHN3YXJtLWNh", + "CertIssuerPublicKey": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExuICYRP4dnK97fm6OucvaQ4sERCEUNnkyHEC5iz3qfv3qwWV/FPELWUz/aACHwMjfimbcmYeBnF4E8pRREkcpQ==" + } + }, + "Status": { + "State": "ready", + "Addr": "168.0.32.137" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "168.0.32.137:2377" + } +} +] +``` + +### Specify an output format + +```none +$ docker node inspect --format '{{ .ManagerStatus.Leader }}' self + +false + +$ docker node inspect --pretty self +ID: e216jshn25ckzbvmwlnh5jr3g +Hostname: swarm-manager +Joined at: 2017-05-16 22:52:44.9910662 +0000 utc +Status: + State: Ready + Availability: Active + Address: 172.17.0.2 +Manager Status: + Address: 172.17.0.2:2377 + Raft Status: Reachable + Leader: Yes +Platform: + Operating System: linux + Architecture: x86_64 +Resources: + CPUs: 4 + Memory: 7.704 GiB +Plugins: + Network: overlay, bridge, null, host, overlay + Volume: local +Engine Version: 17.06.0-ce +TLS Info: + TrustRoot: +-----BEGIN CERTIFICATE----- +MIIBazCCARCgAwIBAgIUOzgqU4tA2q5Yv1HnkzhSIwGyIBswCgYIKoZIzj0EAwIw +EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAyMDAyNDAwWhcNMzcwNDI3MDAy +NDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABMbiAmET+HZyve35ujrnL2kOLBEQhFDZ5MhxAuYs96n796sFlfxTxC1lM/2g +Ah8DI34pm3JmHgZxeBPKUURJHKWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBS3sjTJOcXdkls6WSY2rTx1KIJueTAKBggqhkjO +PQQDAgNJADBGAiEAoeVWkaXgSUAucQmZ3Yhmx22N/cq1EPBgYHOBZmHt0NkCIQC3 +zONcJ/+WA21OXtb+vcijpUOXtNjyHfcox0N8wsLDqQ== +-----END CERTIFICATE----- + + Issuer Public Key: MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAExuICYRP4dnK97fm6OucvaQ4sERCEUNnkyHEC5iz3qfv3qwWV/FPELWUz/aACHwMjfimbcmYeBnF4E8pRREkcpQ== + Issuer Subject: MBMxETAPBgNVBAMTCHN3YXJtLWNh +``` + +## Related commands + +* [node demote](node_demote.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/docs/reference/commandline/node_ls.md b/docs/reference/commandline/node_ls.md new file mode 100644 index 0000000000..224642c2e5 --- /dev/null +++ b/docs/reference/commandline/node_ls.md @@ -0,0 +1,171 @@ +--- +title: "node ls" +description: "The node ls command description and usage" +keywords: "node, list" +--- + + + +# node ls + +```markdown +Usage: docker node ls [OPTIONS] + +List nodes in the swarm + +Aliases: + ls, list + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print nodes using a Go template + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +Lists all the nodes that the Docker Swarm manager knows about. You can filter +using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section +for more information about available filter options. + +## Examples + +```bash +$ docker node ls + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` +> **Note**: +> In the above example output, there is a hidden column of `.Self` that indicates if the +> node is the same node as the current docker daemon. A `*` (e.g., `e216jshn25ckzbvmwlnh5jr3g *`) +> means this node is the current docker daemon. + + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](node_ls.md#id) +* [label](node_ls.md#label) +* [membership](node_ls.md#membership) +* [name](node_ls.md#name) +* [role](node_ls.md#role) + +#### id + +The `id` filter matches all or part of a node's id. + +```bash +$ docker node ls -f id=1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### label + +The `label` filter matches nodes based on engine labels and on the presence of a `label` alone or a `label` and a value. Node labels are currently not used for filtering. + +The following filter matches nodes with the `foo` label regardless of its value. + +```bash +$ docker node ls -f "label=foo" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### membersip + +The `membership` filter matches nodes based on the presence of a `membership` and a value +`accepted` or `pending`. + +The following filter matches nodes with the `membership` of `accepted`. + +```bash +$ docker node ls -f "membership=accepted" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +``` + +#### name + +The `name` filter matches on all or part of a node hostname. + +The following filter matches the nodes with a name equal to `swarm-master` string. + +```bash +$ docker node ls -f name=swarm-manager1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +#### role + +The `role` filter matches nodes based on the presence of a `role` and a value `worker` or `manager`. + +The following filter matches nodes with the `manager` role. + +```bash +$ docker node ls -f "role=manager" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +### Formatting + +The formatting options (`--format`) pretty-prints nodes output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +-----------------|------------------------------------------------------------------------------------------ +`.ID` | Node ID +`.Self` | Node of the daemon (`true/false`, `true`indicates that the node is the same as current docker daemon) +`.Hostname` | Node hostname +`.Status` | Node status +`.Availability` | Node availability ("active", "pause", or "drain") +`.ManagerStatus` | Manager status of the node +`.TLSStatus` | TLS status of the node ("Ready", or "Needs Rotation" has TLS certificate signed by an old CA) + +When using the `--format` option, the `node ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID`, `Hostname`, and `TLS Status` entries separated by a colon for all nodes: + +```bash +$ docker node ls --format "{{.ID}}: {{.Hostname}} {{.TLSStatus}}" +e216jshn25ckzbvmwlnh5jr3g: swarm-manager1 Ready +35o6tiywb700jesrt3dmllaza: swarm-worker1 Needs Rotation +`` + + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/docs/reference/commandline/node_promote.md b/docs/reference/commandline/node_promote.md new file mode 100644 index 0000000000..1ebbe9550c --- /dev/null +++ b/docs/reference/commandline/node_promote.md @@ -0,0 +1,45 @@ +--- +title: "node promote" +description: "The node promote command description and usage" +keywords: "node, promote" +--- + + + +# node promote + +```markdown +Usage: docker node promote NODE [NODE...] + +Promote one or more nodes to manager in the swarm + +Options: + --help Print usage +``` + +## Description + +Promotes a node to manager. This command targets a docker engine that is a +manager in the swarm. + +## Examples + +```bash +$ docker node promote +``` + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/docs/reference/commandline/node_ps.md b/docs/reference/commandline/node_ps.md new file mode 100644 index 0000000000..0bf76e0d8e --- /dev/null +++ b/docs/reference/commandline/node_ps.md @@ -0,0 +1,148 @@ +--- +title: "node ps" +description: "The node ps command description and usage" +keywords: node, tasks, ps +aliases: ["/engine/reference/commandline/node_tasks/"] +--- + + + +# node ps + +```markdown +Usage: docker node ps [OPTIONS] [NODE...] + +List tasks running on one or more nodes, defaults to current node. + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print tasks using a Go template + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output + -q, --quiet Only display task IDs +``` + +## Description + +Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. + +## Examples + +```bash +$ docker node ps swarm-manager1 +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [name](#name) +* [id](#id) +* [label](#label) +* [desired-state](#desired-state) + +#### name + +The `name` filter matches on all or part of a task's name. + +The following filter matches all tasks with a name containing the `redis` string. + +```bash +$ docker node ps -f name=redis swarm-manager1 + +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds +redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds +``` + +#### id + +The `id` filter matches a task's id. + +```bash +$ docker node ps -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1 + +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds +``` + +#### label + +The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches tasks with the `usage` label regardless of its value. + +```bash +$ docker node ps -f "label=usage" + +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 10 minutes +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 9 minutes +``` + + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`. + + +### Formatting + +The formatting options (`--format`) pretty-prints tasks output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +----------------|------------------------------------------------------------------------------------------ +`.Name` | Task name +`.Image` | Task image +`.Node` | Node ID +`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`) +`.CurrentState` | Current state of the task +`.Error` | Error +`.Ports` | Task published ports + +When using the `--format` option, the `node ps` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Image` entries separated by a colon for all tasks: + +```bash +$ docker node ps --format "{{.Name}}: {{.Image}}" +top.1: busybox +top.2: busybox +top.3: busybox +``` + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/docs/reference/commandline/node_rm.md b/docs/reference/commandline/node_rm.md new file mode 100644 index 0000000000..c2fdd4d156 --- /dev/null +++ b/docs/reference/commandline/node_rm.md @@ -0,0 +1,80 @@ +--- +title: "node rm" +description: "The node rm command description and usage" +keywords: "node, remove" +--- + + + +# node rm + +```markdown +Usage: docker node rm [OPTIONS] NODE [NODE...] + +Remove one or more nodes from the swarm + +Aliases: + rm, remove + +Options: + -f, --force Force remove a node from the swarm + --help Print usage +``` + +## Description + +When run from a manager node, removes the specified nodes from a swarm. + + +## Examples + +### Remove a stopped node from the swarm + +```bash +$ docker node rm swarm-node-02 + +Node swarm-node-02 removed from swarm +``` +### Attempt to remove a running node from a swarm + +Removes the specified nodes from the swarm, but only if the nodes are in the +down state. If you attempt to remove an active node you will receive an error: + +```non +$ docker node rm swarm-node-03 + +Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not +down and can't be removed +``` + +### Forcibly remove an inaccessible node from a swarm + +If you lose access to a worker node or need to shut it down because it has been +compromised or is not behaving as expected, you can use the `--force` option. +This may cause transient errors or interruptions, depending on the type of task +being run on the node. + +```bash +$ docker node rm --force swarm-node-03 + +Node swarm-node-03 removed from swarm +``` + +A manager node must be demoted to a worker node (using `docker node demote`) +before you can remove it from the swarm. + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node update](node_update.md) diff --git a/docs/reference/commandline/node_update.md b/docs/reference/commandline/node_update.md new file mode 100644 index 0000000000..11117c7a9b --- /dev/null +++ b/docs/reference/commandline/node_update.md @@ -0,0 +1,77 @@ +--- +title: "node update" +description: "The node update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +# update + +```markdown +Usage: docker node update [OPTIONS] NODE + +Update a node + +Options: + --availability string Availability of the node ("active"|"pause"|"drain") + --help Print usage + --label-add value Add or update a node label (key=value) (default []) + --label-rm value Remove a node label if exists (default []) + --role string Role of the node ("worker"|"manager") +``` + +## Description + +Update metadata about a node, such as its availability, labels, or roles. + +## Examples + +### Add label metadata to a node + +Add metadata to a swarm node using node labels. You can specify a node label as +a key with an empty value: + +``` bash +$ docker node update --label-add foo worker1 +``` + +To add multiple labels to a node, pass the `--label-add` flag for each label: + +```bash +$ docker node update --label-add foo --label-add bar worker1 +``` + +When you [create a service](service_create.md), +you can use node labels as a constraint. A constraint limits the nodes where the +scheduler deploys tasks for a service. + +For example, to add a `type` label to identify nodes where the scheduler should +deploy message queue service tasks: + +``` bash +$ docker node update --label-add type=queue worker1 +``` + +The labels you set for nodes using `docker node update` apply only to the node +entity within the swarm. Do not confuse them with the docker daemon labels for +[dockerd](https://docs.docker.com/engine/userguide/labels-custom-metadata/#daemon-labels). + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +## Related commands + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) diff --git a/docs/reference/commandline/pause.md b/docs/reference/commandline/pause.md new file mode 100644 index 0000000000..5bb652b923 --- /dev/null +++ b/docs/reference/commandline/pause.md @@ -0,0 +1,48 @@ +--- +title: "pause" +description: "The pause command description and usage" +keywords: "cgroups, container, suspend, SIGSTOP" +--- + + + +# pause + +```markdown +Usage: docker pause CONTAINER [CONTAINER...] + +Pause all processes within one or more containers + +Options: + --help Print usage +``` + +## Description + +The `docker pause` command suspends all processes in the specified containers. +On Linux, this uses the cgroups freezer. Traditionally, when suspending a process +the `SIGSTOP` signal is used, which is observable by the process being suspended. +With the cgroups freezer the process is unaware, and unable to capture, +that it is being suspended, and subsequently resumed. On Windows, only Hyper-V +containers can be paused. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Examples + +```bash +$ docker pause my_container +``` + +## Related commands + +* [unpause](unpause.md) diff --git a/docs/reference/commandline/plugin.md b/docs/reference/commandline/plugin.md new file mode 100644 index 0000000000..75082477d1 --- /dev/null +++ b/docs/reference/commandline/plugin.md @@ -0,0 +1,44 @@ +--- +title: "plugin" +description: "The plugin command description and usage" +keywords: "plugin" +--- + + + +# plugin + +```markdown +Usage: docker plugin COMMAND + +Manage plugins + +Options: + --help Print usage + +Commands: + create Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. + disable Disable a plugin + enable Enable a plugin + inspect Display detailed information on one or more plugins + install Install a plugin + ls List plugins + push Push a plugin to a registry + rm Remove one or more plugins + set Change settings for a plugin + upgrade Upgrade an existing plugin + +Run 'docker plugin COMMAND --help' for more information on a command. + +``` + +## Description + +Manage plugins. diff --git a/docs/reference/commandline/plugin_create.md b/docs/reference/commandline/plugin_create.md new file mode 100644 index 0000000000..5f16ac3e43 --- /dev/null +++ b/docs/reference/commandline/plugin_create.md @@ -0,0 +1,66 @@ +--- +title: "plugin create" +description: "the plugin create command description and usage" +keywords: "plugin, create" +--- + + + +# plugin create + +```markdown +Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR + +Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. + +Options: + --compress Compress the context using gzip + --help Print usage +``` + +## Description + +Creates a plugin. Before creating the plugin, prepare the plugin's root filesystem as well as +[the config.json](../../extend/config.md) + +## Examples + +The following example shows how to create a sample `plugin`. + +```bash +$ ls -ls /home/pluginDir + +total 4 +4 -rw-r--r-- 1 root root 431 Nov 7 01:40 config.json +0 drwxr-xr-x 19 root root 420 Nov 7 01:40 rootfs + +$ docker plugin create plugin /home/pluginDir + +plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +672d8144ec02 plugin latest A sample plugin for Docker false +``` + +The plugin can subsequently be enabled for local use or pushed to the public registry. + +## Related commands + +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/docs/reference/commandline/plugin_disable.md b/docs/reference/commandline/plugin_disable.md new file mode 100644 index 0000000000..fa1327b0c7 --- /dev/null +++ b/docs/reference/commandline/plugin_disable.md @@ -0,0 +1,69 @@ +--- +title: "plugin disable" +description: "the plugin disable command description and usage" +keywords: "plugin, disable" +--- + + + +# plugin disable + +```markdown +Usage: docker plugin disable [OPTIONS] PLUGIN + +Disable a plugin + +Options: + -f, --force Force the disable of an active plugin + --help Print usage +``` + +## Description + +Disables a plugin. The plugin must be installed before it can be disabled, +see [`docker plugin install`](plugin_install.md). Without the `-f` option, +a plugin that has references (e.g., volumes, networks) cannot be disabled. + +## Examples + +The following example shows that the `sample-volume-plugin` plugin is installed +and enabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +To disable the plugin, use the following command: + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/docs/reference/commandline/plugin_enable.md b/docs/reference/commandline/plugin_enable.md new file mode 100644 index 0000000000..2098a115ad --- /dev/null +++ b/docs/reference/commandline/plugin_enable.md @@ -0,0 +1,68 @@ +--- +title: "plugin enable" +description: "the plugin enable command description and usage" +keywords: "plugin, enable" +--- + + + +# plugin enable + +```markdown +Usage: docker plugin enable [OPTIONS] PLUGIN + +Enable a plugin + +Options: + --help Print usage + --timeout int HTTP client timeout (in seconds) +``` + +## Description + +Enables a plugin. The plugin must be installed before it can be enabled, +see [`docker plugin install`](plugin_install.md). + +## Examples + +The following example shows that the `sample-volume-plugin` plugin is installed, +but disabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +To enable the plugin, use the following command: + +```bash +$ docker plugin enable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/docs/reference/commandline/plugin_inspect.md b/docs/reference/commandline/plugin_inspect.md new file mode 100644 index 0000000000..e1a6403343 --- /dev/null +++ b/docs/reference/commandline/plugin_inspect.md @@ -0,0 +1,166 @@ +--- +title: "plugin inspect" +description: "The plugin inspect command description and usage" +keywords: "plugin, inspect" +--- + + + +# plugin inspect + +```markdown +Usage: docker plugin inspect [OPTIONS] PLUGIN [PLUGIN...] + +Display detailed information on one or more plugins + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Returns information about a plugin. By default, this command renders all results +in a JSON array. + +## Examples + + +```none +$ docker plugin inspect tiborvass/sample-volume-plugin:latest + +{ + "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21", + "Name": "tiborvass/sample-volume-plugin:latest", + "PluginReference": "tiborvas/sample-volume-plugin:latest", + "Enabled": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-sample-volume-plugin", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` + +(output formatted for readability) + +### Formatting the output + +```bash +$ docker plugin inspect -f '{{.Id}}' tiborvass/sample-volume-plugin:latest + +8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21 +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin disable](plugin_disable.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/docs/reference/commandline/plugin_install.md b/docs/reference/commandline/plugin_install.md new file mode 100644 index 0000000000..78d9a61b75 --- /dev/null +++ b/docs/reference/commandline/plugin_install.md @@ -0,0 +1,75 @@ +--- +title: "plugin install" +description: "the plugin install command description and usage" +keywords: "plugin, install" +--- + + + +# plugin install + +```markdown +Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...] + +Install a plugin + +Options: + --alias string Local name for plugin + --disable Do not enable the plugin on install + --disable-content-trust Skip image verification (default true) + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage +``` + +## Description + +Installs and enables a plugin. Docker looks first for the plugin on your Docker +host. If the plugin does not exist locally, then the plugin is pulled from +the registry. Note that the minimum required registry version to distribute +plugins is 2.3.0 + +## Examples + +The following example installs `vieus/sshfs` plugin and [sets](plugin_set.md) its +`DEBUG` environment variable to `1`. To install, `pull` the plugin from Docker +Hub and prompt the user to accept the list of privileges that the plugin needs, +set the plugin's parameters and enable the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs +``` + +After the plugin is installed, it appears in the list of plugins: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 vieux/sshfs latest sshFS plugin for Docker true +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/docs/reference/commandline/plugin_ls.md b/docs/reference/commandline/plugin_ls.md new file mode 100644 index 0000000000..3ba29fee03 --- /dev/null +++ b/docs/reference/commandline/plugin_ls.md @@ -0,0 +1,118 @@ +--- +title: "plugin ls" +description: "The plugin ls command description and usage" +keywords: "plugin, list" +--- + + + +# plugin ls + +```markdown +Usage: docker plugin ls [OPTIONS] + +List plugins + +Aliases: + ls, list + +Options: + -f, --filter filter Provide filter values (e.g. 'enabled=true') + --format string Pretty-print plugins using a Go template + --help Print usage + --no-trunc Don't truncate output + -q, --quiet Only display plugin IDs +``` + +## Description + +Lists all the plugins that are currently installed. You can install plugins +using the [`docker plugin install`](plugin_install.md) command. +You can also filter using the `-f` or `--filter` flag. +Refer to the [filtering](#filtering) section for more information about available filter options. + +## Examples + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* enabled (boolean - true or false, 0 or 1) +* capability (string - currently `volumedriver`, `networkdriver`, `ipamdriver`, `logdriver`, `metricscollector`, or `authz`) + +#### enabled + +The `enabled` filter matches on plugins enabled or disabled. + +#### capability + +The `capability` filter matches on plugin capabilities. One plugin +might have multiple capabilities. Currently `volumedriver`, `networkdriver`, +`ipamdriver`, `logdriver`, `metricscollector`, and `authz` are supported capabilities. + +```bash +$ docker plugin install --disable tiborvass/no-remove + +tiborvass/no-remove + +$ docker plugin ls --filter enabled=true + +NAME TAG DESCRIPTION ENABLED +``` + +### Formatting + +The formatting options (`--format`) pretty-prints plugins output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +---------------|------------------------------------------------------------------------------------------ +`.ID` | Plugin ID +`.Name` | Plugin name +`.Description` | Plugin description +`.Enabled` | Whether plugin is enabled or not +`.PluginReference` | The reference used to push/pull from a registry + +When using the `--format` option, the `plugin ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Name` entries separated by a colon for all plugins: + +```bash +$ docker plugin ls --format "{{.ID}}: {{.Name}}" + +4be01827a72e: tiborvass/no-remove +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/docs/reference/commandline/plugin_push.md b/docs/reference/commandline/plugin_push.md new file mode 100644 index 0000000000..f444ed4d4d --- /dev/null +++ b/docs/reference/commandline/plugin_push.md @@ -0,0 +1,57 @@ +--- +title: "plugin push" +description: "the plugin push command description and usage" +keywords: "plugin, push" +--- + + + +```markdown +Usage: docker plugin push [OPTIONS] PLUGIN[:TAG] + +Push a plugin to a registry + +Options: + --disable-content-trust Skip image signing (default true) + --help Print usage +``` + +## Description + +After you have created a plugin using `docker plugin create` and the plugin is +ready for distribution, use `docker plugin push` to share your images to Docker +Hub or a self-hosted registry. + +Registry credentials are managed by [docker login](login.md). + +## Examples + +The following example shows how to push a sample `user/plugin`. + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d456 user/plugin latest A sample plugin for Docker false + +$ docker plugin push user/plugin +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/docs/reference/commandline/plugin_rm.md b/docs/reference/commandline/plugin_rm.md new file mode 100644 index 0000000000..d599ed8fc1 --- /dev/null +++ b/docs/reference/commandline/plugin_rm.md @@ -0,0 +1,63 @@ +--- +title: "plugin rm" +description: "the plugin rm command description and usage" +keywords: "plugin, rm" +--- + + + +# plugin rm + +```markdown +Usage: docker plugin rm [OPTIONS] PLUGIN [PLUGIN...] + +Remove one or more plugins + +Aliases: + rm, remove + +Options: + -f, --force Force the removal of an active plugin + --help Print usage +``` + +## Description + +Removes a plugin. You cannot remove a plugin if it is enabled, you must disable +a plugin using the [`docker plugin disable`](plugin_disable.md) before removing +it (or use --force, use of force is not recommended, since it can affect +functioning of running containers using the plugin). + +## Examples + +The following example disables and removes the `sample-volume-plugin:latest` +plugin: + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin rm tiborvass/sample-volume-plugin:latest + +tiborvass/sample-volume-plugin +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/docs/reference/commandline/plugin_set.md b/docs/reference/commandline/plugin_set.md new file mode 100644 index 0000000000..ea7d92e735 --- /dev/null +++ b/docs/reference/commandline/plugin_set.md @@ -0,0 +1,172 @@ +--- +title: "plugin set" +description: "the plugin set command description and usage" +keywords: "plugin, set" +--- + + + +# plugin set + +```markdown +Usage: docker plugin set PLUGIN KEY=VALUE [KEY=VALUE...] + +Change settings for a plugin + +Options: + --help Print usage +``` + +## Description + +Change settings for a plugin. The plugin must be disabled. + +The settings currently supported are: + * env variables + * source of mounts + * path of devices + * args + +## What is settable ? + +Look at the plugin manifest, it's easy to see what fields are settable, +by looking at the `Settable` field. + +Here is an extract of a plugin manifest: + +``` +{ + "config": { + ... + "args": { + "name": "myargs", + "settable": ["value"], + "value": ["foo", "bar"] + }, + "env": [ + { + "name": "DEBUG", + "settable": ["value"], + "value": "0" + }, + { + "name": "LOGGING", + "value": "1" + } + ], + "devices": [ + { + "name": "mydevice", + "path": "/dev/foo", + "settable": ["path"] + } + ], + "mounts": [ + { + "destination": "/baz", + "name": "mymount", + "options": ["rbind"], + "settable": ["source"], + "source": "/foo", + "type": "bind" + } + ], + ... + } +} +``` + +In this example, we can see that the `value` of the `DEBUG` environment variable is settable, +the `source` of the `mymount` mount is also settable. Same for the `path` of `mydevice` and `value` of `myargs`. + +On the contrary, the `LOGGING` environment variable doesn't have any settable field, which implies that user cannot tweak it. + +## Examples + +### Change an environment variable + +The following example change the env variable `DEBUG` on the +`sample-volume-plugin` plugin. + +```bash +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin +[DEBUG=0] + +$ docker plugin set tiborvass/sample-volume-plugin DEBUG=1 + +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin +[DEBUG=1] +``` + +### Change the source of a mount + +The following example change the source of the `mymount` mount on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/foo + +$ docker plugins set myplugin mymount.source=/bar + +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/bar +``` + +> **Note**: Since only `source` is settable in `mymount`, +> `docker plugins set mymount=/bar myplugin` would work too. + +### Change a device path + +The following example change the path of the `mydevice` device on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin + +/dev/foo + +$ docker plugins set myplugin mydevice.path=/dev/bar + +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin + +/dev/bar +``` + +> **Note**: Since only `path` is settable in `mydevice`, +> `docker plugins set mydevice=/dev/bar myplugin` would work too. + +### Change the source of the arguments + +The following example change the value of the args on the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin + +["foo", "bar"] + +$ docker plugins set myplugin myargs="foo bar baz" + +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin + +["foo", "bar", "baz"] +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/docs/reference/commandline/plugin_upgrade.md b/docs/reference/commandline/plugin_upgrade.md new file mode 100644 index 0000000000..8c79ebdabf --- /dev/null +++ b/docs/reference/commandline/plugin_upgrade.md @@ -0,0 +1,100 @@ +--- +title: "plugin upgrade" +description: "the plugin upgrade command description and usage" +keywords: "plugin, upgrade" +--- + + + +# plugin upgrade + +```markdown +Usage: docker plugin upgrade [OPTIONS] PLUGIN [REMOTE] + +Upgrade a plugin + +Options: + --disable-content-trust Skip image verification (default true) + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage + --skip-remote-check Do not check if specified remote plugin matches existing plugin image +``` + +## Description + +Upgrades an existing plugin to the specified remote plugin image. If no remote +is specified, Docker will re-pull the current image and use the updated version. +All existing references to the plugin will continue to work. +The plugin must be disabled before running the upgrade. + +## Examples + +The following example installs `vieus/sshfs` plugin, uses it to create and use +a volume, then upgrades the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs:next + +$ docker volume create -d vieux/sshfs:next -o sshcmd=root@1.2.3.4:/tmp/shared -o password=XXX sshvolume + +sshvolume + +$ docker run -it -v sshvolume:/data alpine sh -c "touch /data/hello" + +$ docker plugin disable -f vieux/sshfs:next + +viex/sshfs:next + +# Here docker volume ls doesn't show 'sshfsvolume', since the plugin is disabled +$ docker volume ls + +DRIVER VOLUME NAME + +$ docker plugin upgrade vieux/sshfs:next vieux/sshfs:next + +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +Upgrade plugin vieux/sshfs:next to vieux/sshfs:next + +$ docker plugin enable vieux/sshfs:next + +viex/sshfs:next + +$ docker volume ls + +DRIVER VOLUME NAME +viuex/sshfs:next sshvolume + +$ docker run -it -v sshvolume:/data alpine sh -c "ls /data" + +hello +``` + +## Related commands + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) diff --git a/docs/reference/commandline/port.md b/docs/reference/commandline/port.md new file mode 100644 index 0000000000..c38763ea34 --- /dev/null +++ b/docs/reference/commandline/port.md @@ -0,0 +1,47 @@ +--- +title: "port" +description: "The port command description and usage" +keywords: "port, mapping, container" +--- + + + +# port + +```markdown +Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] + +List port mappings or a specific mapping for the container + +Options: + --help Print usage +``` + +## Examples + +### Show all mapped ports + +You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or +just a specific mapping: + +```bash +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test +$ docker port test +7890/tcp -> 0.0.0.0:4321 +9876/tcp -> 0.0.0.0:1234 +$ docker port test 7890/tcp +0.0.0.0:4321 +$ docker port test 7890/udp +2014/06/24 11:53:36 Error: No public port '7890/udp' published for test +$ docker port test 7890 +0.0.0.0:4321 +``` diff --git a/docs/reference/commandline/ps.md b/docs/reference/commandline/ps.md new file mode 100644 index 0000000000..51bab4834d --- /dev/null +++ b/docs/reference/commandline/ps.md @@ -0,0 +1,432 @@ +--- +title: "ps" +description: "The ps command description and usage" +keywords: "container, running, list" +--- + + + +# ps + +```markdown +Usage: docker ps [OPTIONS] + +List containers + +Options: + -a, --all Show all containers (default shows just running) + -f, --filter value Filter output based on conditions provided (default []) + - ancestor=([:tag]||) + containers created from an image or a descendant. + - before=(|) + - expose=([/]|/[]) + - exited= an exit code of + - health=(starting|healthy|unhealthy|none) + - id= a container's ID + - isolation=(`default`|`process`|`hyperv`) (Windows daemon only) + - is-task=(true|false) + - label= or label== + - name= a container's name + - network=(|) + - publish=([/]|/[]) + - since=(|) + - status=(created|restarting|removing|running|paused|exited) + - volume=(|) + --format string Pretty-print containers using a Go template + --help Print usage + -n, --last int Show n last created containers (includes all states) (default -1) + -l, --latest Show the latest created container (includes all states) + --no-trunc Don't truncate output + -q, --quiet Only display numeric IDs + -s, --size Display total file sizes +``` + +## Examples + +### Prevent truncating output + +Running `docker ps --no-trunc` showing 2 linked containers. + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp +d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db +``` + +### Show both running and stopped containers + +The `docker ps` command only shows running containers by default. To see all +containers, use the `-a` (or `--all`) flag: + +```bash +$ docker ps -a +``` + +`docker ps` groups exposed ports into a single range if possible. E.g., a +container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in +the `PORTS` column. + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* id (container's id) +* label (`label=` or `label==`) +* name (container's name) +* exited (int - the code of exited containers. Only useful with `--all`) +* status (`created|restarting|running|removing|paused|exited|dead`) +* ancestor (`[:]`, `` or ``) - filters containers that were created from the given image or a descendant. +* before (container's id or name) - filters containers created before given id or name +* since (container's id or name) - filters containers created since given id or name +* isolation (`default|process|hyperv`) (Windows daemon only) +* volume (volume name or mount point) - filters containers that mount volumes. +* network (network id or name) - filters containers connected to the provided network +* health (starting|healthy|unhealthy|none) - filters containers based on healthcheck status +* publish=(container's published port) - filters published ports by containers +* expose=(container's exposed port) - filters exposed ports by containers + +#### label + +The `label` filter matches containers based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches containers with the `color` label regardless of its value. + +```bash +$ docker ps --filter "label=color" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley +d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani +``` + +The following filter matches containers with the `color` label with the `blue` value. + +```bash +$ docker ps --filter "label=color=blue" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani +``` + +#### name + +The `name` filter matches on all or part of a container's name. + +The following filter matches all containers with a name containing the `nostalgic_stallman` string. + +```bash +$ docker ps --filter "name=nostalgic_stallman" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker ps --filter "name=nostalgic" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic +9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman +673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley +``` + +#### exited + +The `exited` filter matches containers by exist status code. For example, to +filter for containers that have exited successfully: + +```bash +$ docker ps -a --filter 'exited=0' + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey +106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani +48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds +``` + +#### Filter by exit signal + +You can use a filter to locate containers that exited with status of `137` +meaning a `SIGKILL(9)` killed them. + +```none +$ docker ps -a --filter 'exited=137' + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski +a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande +``` + +Any of these events result in a `137` status: + +* the `init` process of the container is killed manually +* `docker kill` kills the container +* Docker daemon restarts which kills all running containers + +#### status + +The `status` filter matches containers by status. You can filter using +`created`, `restarting`, `running`, `removing`, `paused`, `exited` and `dead`. For example, +to filter for `running` containers: + +```bash +$ docker ps --filter status=running + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic +d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top +9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman +``` + +To filter for `paused` containers: + +```bash +$ docker ps --filter status=paused + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley +``` + +#### ancestor + +The `ancestor` filter matches containers based on its image or a descendant of +it. The filter supports the following image representation: + +- image +- image:tag +- image:tag@digest +- short-id +- full-id + +If you don't specify a `tag`, the `latest` tag is used. For example, to filter +for containers that use the latest `ubuntu` image: + +```bash +$ docker ps --filter ancestor=ubuntu + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet +82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose +bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath +``` + +Match containers based on the `ubuntu-c1` image which, in this case, is a child +of `ubuntu`: + +```bash +$ docker ps --filter ancestor=ubuntu-c1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +``` + +Match containers based on the `ubuntu` version `12.04.5` image: + +```bash +$ docker ps --filter ancestor=ubuntu:12.04.5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +The following matches containers based on the layer `d0e008c6cf02` or an image +that have this layer in its layer stack. + +```bash +$ docker ps --filter ancestor=d0e008c6cf02 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +#### Create time + +##### before + +The `before` filter shows only containers created before the container with +given id or name. For example, having these containers created: + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky +4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +Filtering with `before` would give: + +```bash +$ docker ps -f before=9c3527ed70ce + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +##### since + +The `since` filter shows only containers created since the container with given +id or name. For example, with the same containers as in `before` filter: + +```bash +$ docker ps -f since=6e63f6ff38b0 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky +4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton +``` + +#### volume + +The `volume` filter shows only containers that mount a specific volume or have +a volume mounted in a specific path: + +```bash +$ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume + +$ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume +``` + +#### network + +The `network` filter shows only containers that are connected to a network with +a given name or id. + +The following filter matches all containers that are connected to a network +with a name containing `net1`. + +```bash +$ docker run -d --net=net1 --name=test1 ubuntu top +$ docker run -d --net=net2 --name=test2 ubuntu top + +$ docker ps --filter network=net1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +The network filter matches on both the network's name and id. The following +example shows all containers that are attached to the `net1` network, using +the network id as a filter; + +```bash +$ docker network inspect --format "{{.ID}}" net1 + +8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +$ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +#### publish and expose + +The `publish` and `expose` filters show only containers that have published or exposed port with a given port +number, port range, and/or protocol. The default protocol is `tcp` when not specified. + +The following filter matches all containers that have published port of 80: + +```bash +$ docker run -d --publish=80 busybox top +$ docker run -d --expose=8080 busybox top + +$ docker ps -a + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9833437217a5 busybox "top" 5 seconds ago Up 4 seconds 8080/tcp dreamy_mccarthy +fc7e477723b7 busybox "top" 50 seconds ago Up 50 seconds 0.0.0.0:32768->80/tcp admiring_roentgen + +$ docker ps --filter publish=80 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +fc7e477723b7 busybox "top" About a minute ago Up About a minute 0.0.0.0:32768->80/tcp admiring_roentgen +``` + +The following filter matches all containers that have exposed TCP port in the range of `8000-8080`: +```bash +$ docker ps --filter expose=8000-8080/tcp + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9833437217a5 busybox "top" 21 seconds ago Up 19 seconds 8080/tcp dreamy_mccarthy +``` + +The following filter matches all containers that have exposed UDP port `80`: +```bash +$ docker ps --filter publish=80/udp + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +``` + +### Formatting + +The formatting option (`--format`) pretty-prints container output using a Go +template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|---------------------------------------------------------------------------------------------------- +`.ID` | Container ID +`.Image` | Image ID +`.Command` | Quoted command +`.CreatedAt` | Time when the container was created. +`.RunningFor` | Elapsed time since the container was started. +`.Ports` | Exposed ports. +`.Status` | Container status. +`.Size` | Container disk size. +`.Names` | Container names. +`.Labels` | All labels assigned to the container. +`.Label` | Value of a specific label for this container. For example `'{{.Label "com.docker.swarm.cpu"}}'` +`.Mounts` | Names of the volumes mounted in this container. +`.Networks` | Names of the networks attached to this container. + +When using the `--format` option, the `ps` command will either output the data +exactly as the template declares or, when using the `table` directive, includes +column headers as well. + +The following example uses a template without headers and outputs the `ID` and +`Command` entries separated by a colon for all running containers: + +```bash +$ docker ps --format "{{.ID}}: {{.Command}}" + +a87ecb4f327c: /bin/sh -c #(nop) MA +01946d9d34d8: /bin/sh -c #(nop) MA +c1d3b0166030: /bin/sh -c yum -y up +41d50ecd2f57: /bin/sh -c #(nop) MA +``` + +To list all running containers with their labels in a table format you can use: + +```bash +$ docker ps --format "table {{.ID}}\t{{.Labels}}" + +CONTAINER ID LABELS +a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd +01946d9d34d8 +c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 +41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd +``` diff --git a/docs/reference/commandline/pull.md b/docs/reference/commandline/pull.md new file mode 100644 index 0000000000..7bf3df8363 --- /dev/null +++ b/docs/reference/commandline/pull.md @@ -0,0 +1,254 @@ +--- +title: "pull" +description: "The pull command description and usage" +keywords: "pull, image, hub, docker" +--- + + + +# pull + +```markdown +Usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST] + +Pull an image or a repository from a registry + +Options: + -a, --all-tags Download all tagged images in the repository + --disable-content-trust Skip image verification (default true) + --help Print usage +``` + +## Description + +Most of your images will be created on top of a base image from the +[Docker Hub](https://hub.docker.com) registry. + +[Docker Hub](https://hub.docker.com) contains many pre-built images that you +can `pull` and try without needing to define and configure your own. + +To download a particular image, or set of images (i.e., a repository), +use `docker pull`. + +### Proxy configuration + +If you are behind an HTTP proxy server, for example in corporate settings, +before open a connect to registry, you may need to configure the Docker +daemon's proxy settings, using the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables. To set these environment variables on a host using +`systemd`, refer to the [control and configure Docker with systemd](https://docs.docker.com/engine/admin/systemd/#http-proxy) +for variables configuration. + +### Concurrent downloads + +By default the Docker daemon will pull three layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-downloads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Pull an image from Docker Hub + +To download a particular image, or set of images (i.e., a repository), use +`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a +default. This command pulls the `debian:latest` image: + +```bash +$ docker pull debian + +Using default tag: latest +latest: Pulling from library/debian +fdd5d7827f33: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa +Status: Downloaded newer image for debian:latest +``` + +Docker images can consist of multiple layers. In the example above, the image +consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. + +Layers can be reused by images. For example, the `debian:jessie` image shares +both layers with `debian:latest`. Pulling the `debian:jessie` image therefore +only pulls its metadata, but not its layers, because all layers are already +present locally: + +```bash +$ docker pull debian:jessie + +jessie: Pulling from library/debian +fdd5d7827f33: Already exists +a3ed95caeb02: Already exists +Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e +Status: Downloaded newer image for debian:jessie +``` + +To see which images are present locally, use the [`docker images`](images.md) +command: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +debian jessie f50f9524513f 5 days ago 125.1 MB +debian latest f50f9524513f 5 days ago 125.1 MB +``` + +Docker uses a content-addressable image store, and the image ID is a SHA256 +digest covering the image's configuration and layers. In the example above, +`debian:jessie` and `debian:latest` have the same image ID because they are +actually the *same* image tagged with different names. Because they are the +same image, their layers are stored only once and do not consume extra disk +space. + +For more information about images, layers, and the content-addressable store, +refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/). + + +### Pull an image by digest (immutable identifier) + +So far, you've pulled images by their name (and "tag"). Using names and tags is +a convenient way to work with images. When using tags, you can `docker pull` an +image again to make sure you have the most up-to-date version of that image. +For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu +14.04 image. + +In some cases you don't want images to be updated to newer versions, but prefer +to use a fixed version of an image. Docker enables you to pull an image by its +*digest*. When pulling an image by digest, you specify *exactly* which version +of an image to pull. Doing so, allows you to "pin" an image to that version, +and guarantee that the image you're using is always the same. + +To know the digest of an image, pull the image first. Let's pull the latest +`ubuntu:14.04` image from Docker Hub: + +```bash +$ docker pull ubuntu:14.04 + +14.04: Pulling from library/ubuntu +5a132a7e7af1: Pull complete +fd2731e4c50c: Pull complete +28a2f68d1120: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu:14.04 +``` + +Docker prints the digest of the image after the pull has finished. In the example +above, the digest of the image is: + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Docker also prints the digest of an image when *pushing* to a registry. This +may be useful if you want to pin to a version of the image you just pushed. + +A digest takes the place of the tag when pulling an image, for example, to +pull the above image by digest, run the following command: + +```bash +$ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu +5a132a7e7af1: Already exists +fd2731e4c50c: Already exists +28a2f68d1120: Already exists +a3ed95caeb02: Already exists +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +``` + +Digest can also be used in the `FROM` of a Dockerfile, for example: + +```Dockerfile +FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +MAINTAINER some maintainer +``` + +> **Note**: Using this feature "pins" an image to a specific version in time. +> Docker will therefore not pull updated versions of an image, which may include +> security updates. If you want to pull an updated image, you need to change the +> digest accordingly. + + +### Pull from a different registry + +By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to +manually specify the path of a registry to pull from. For example, if you have +set up a local registry, you can specify its path to pull from it. A registry +path is similar to a URL, but does not contain a protocol specifier (`https://`). + +The following command pulls the `testing/test-image` image from a local registry +listening on port 5000 (`myregistry.local:5000`): + +```bash +$ docker pull myregistry.local:5000/testing/test-image +``` + +Registry credentials are managed by [docker login](login.md). + +Docker uses the `https://` protocol to communicate with a registry, unless the +registry is allowed to be accessed over an insecure connection. Refer to the +[insecure registries](dockerd.md#insecure-registries) section for more information. + + +### Pull a repository with multiple images + +By default, `docker pull` pulls a *single* image from the registry. A repository +can contain multiple images. To pull all images from a repository, provide the +`-a` (or `--all-tags`) option when using `docker pull`. + +This command pulls all images from the `fedora` repository: + +```bash +$ docker pull --all-tags fedora + +Pulling repository fedora +ad57ef8d78d7: Download complete +105182bb5e8b: Download complete +511136ea3c5a: Download complete +73bd853d2ea5: Download complete +.... + +Status: Downloaded newer image for fedora +``` + +After the pull has completed use the `docker images` command to see the +images that were pulled. The example below shows all the `fedora` images +that are present locally: + +```bash +$ docker images fedora + +REPOSITORY TAG IMAGE ID CREATED SIZE +fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB +fedora 20 105182bb5e8b 5 days ago 372.7 MB +fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB +fedora latest 105182bb5e8b 5 days ago 372.7 MB +``` + +### Cancel a pull + +Killing the `docker pull` process, for example by pressing `CTRL-c` while it is +running in a terminal, will terminate the pull operation. + +```bash +$ docker pull fedora + +Using default tag: latest +latest: Pulling from library/fedora +a3ed95caeb02: Pulling fs layer +236608c7b546: Pulling fs layer +^C +``` + +> **Note**: Technically, the Engine terminates a pull operation when the +> connection between the Docker Engine daemon and the Docker Engine client +> initiating the pull is lost. If the connection with the Engine daemon is +> lost for other reasons than a manual interaction, the pull is also aborted. diff --git a/docs/reference/commandline/push.md b/docs/reference/commandline/push.md new file mode 100644 index 0000000000..61c37139fd --- /dev/null +++ b/docs/reference/commandline/push.md @@ -0,0 +1,82 @@ +--- +title: "push" +description: "The push command description and usage" +keywords: "share, push, image" +--- + + + +# push + +```markdown +Usage: docker push [OPTIONS] NAME[:TAG] + +Push an image or a repository to a registry + +Options: + --disable-content-trust Skip image signing (default true) + --help Print usage +``` + +## Description + +Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) +registry or to a self-hosted one. + +Refer to the [`docker tag`](tag.md) reference for more information about valid +image and tag names. + +Killing the `docker push` process, for example by pressing `CTRL-c` while it is +running in a terminal, terminates the push operation. + +Progress bars are shown during docker push, which show the uncompressed size. The +actual amount of data that's pushed will be compressed before sending, so the uploaded + size will not be reflected by the progress bar. + +Registry credentials are managed by [docker login](login.md). + +### Concurrent uploads + +By default the Docker daemon will push five layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-uploads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Push a new image to a registry + +First save the new image by finding the container ID (using [`docker ps`](ps.md)) +and then committing it to a new image name. Note that only `a-z0-9-_.` are +allowed when naming images: + +```bash +$ docker commit c16378f943fe rhel-httpd +``` + +Now, push the image to the registry using the image ID. In this example the +registry is on host named `registry-host` and listening on port `5000`. To do +this, tag the image with the host name or IP address, and the port of the +registry: + +```bash +$ docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd + +$ docker push registry-host:5000/myadmin/rhel-httpd +``` + +Check that this worked by running: + +```bash +$ docker images +``` + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. diff --git a/docs/reference/commandline/rename.md b/docs/reference/commandline/rename.md new file mode 100644 index 0000000000..90268a2a2c --- /dev/null +++ b/docs/reference/commandline/rename.md @@ -0,0 +1,35 @@ +--- +title: "rename" +description: "The rename command description and usage" +keywords: "rename, docker, container" +--- + + + +# rename + +```markdown +Usage: docker rename CONTAINER NEW_NAME + +Rename a container + +Options: + --help Print usage +``` + +## Description + +The `docker rename` command renames a container. + +## Examples + +```bash +$ docker rename my_container my_new_container +``` diff --git a/docs/reference/commandline/restart.md b/docs/reference/commandline/restart.md new file mode 100644 index 0000000000..a2796afe33 --- /dev/null +++ b/docs/reference/commandline/restart.md @@ -0,0 +1,32 @@ +--- +title: "restart" +description: "The restart command description and usage" +keywords: "restart, container, Docker" +--- + + + +# restart + +```markdown +Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] + +Restart one or more containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing the container (default 10) +``` + +## Examples + +```bash +$ docker restart my_container +``` diff --git a/docs/reference/commandline/rm.md b/docs/reference/commandline/rm.md new file mode 100644 index 0000000000..8ee5b2874d --- /dev/null +++ b/docs/reference/commandline/rm.md @@ -0,0 +1,100 @@ +--- +title: "rm" +description: "The rm command description and usage" +keywords: "remove, Docker, container" +--- + + + +# rm + +```markdown +Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] + +Remove one or more containers + +Options: + -f, --force Force the removal of a running container (uses SIGKILL) + --help Print usage + -l, --link Remove the specified link + -v, --volumes Remove the volumes associated with the container +``` + +## Examples + +### Remove a container + +This will remove the container referenced under the link +`/redis`. + +```bash +$ docker rm /redis + +/redis +``` + +### Remove a link specified with `--link` on the default bridge network + +This will remove the underlying link between `/webapp` and the `/redis` +containers on the default bridge network, removing all network communication +between the two containers. This does not apply when `--link` is used with +user-specified networks. + +```bash +$ docker rm --link /webapp/redis + +/webapp/redis +``` + +### Force-remove a running container + +This command will force-remove a running container. + +```bash +$ docker rm --force redis + +redis +``` + +The main process inside the container referenced under the link `redis` will receive +`SIGKILL`, then the container will be removed. + +### Remove all stopped containers + +```bash +$ docker rm $(docker ps -a -q) +``` + +This command will delete all stopped containers. The command +`docker ps -a -q` will return all existing container IDs and pass them to +the `rm` command which will delete them. Any running containers will not be +deleted. + +### Remove a container and its volumes + +```bash +$ docker rm -v redis +redis +``` + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + +### Remove a container and selectively remove volumes + +```bash +$ docker create -v awesome:/foo -v /bar --name hello redis +hello +$ docker rm -v hello +``` + +In this example, the volume for `/foo` will remain intact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. diff --git a/docs/reference/commandline/rmi.md b/docs/reference/commandline/rmi.md new file mode 100644 index 0000000000..28e21d4398 --- /dev/null +++ b/docs/reference/commandline/rmi.md @@ -0,0 +1,105 @@ +--- +title: "rmi" +description: "The rmi command description and usage" +keywords: "remove, image, Docker" +--- + + + +# rmi + +```markdown +Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] + +Remove one or more images + +Options: + -f, --force Force removal of the image + --help Print usage + --no-prune Do not delete untagged parents +``` + +## Examples + +You can remove an image using its short or long ID, its tag, or its digest. If +an image has one or more tag referencing it, you must remove all of them before +the image is removed. Digest references are removed automatically when an image +is removed by tag. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + +$ docker rmi fd484f19954f + +Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force +2013/12/11 05:47:16 Error: failed to remove one or more images + +$ docker rmi test1 + +Untagged: test1:latest + +$ docker rmi test2 + +Untagged: test2:latest + + +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + +$ docker rmi test + +Untagged: test:latest +Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 +``` + +If you use the `-f` flag and specify the image's short or long ID, then this +command untags and removes all images that match the specified ID. + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) +test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + +$ docker rmi -f fd484f19954f + +Untagged: test1:latest +Untagged: test:latest +Untagged: test2:latest +Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 +``` + +An image pulled by digest has no tag associated with it: + +```bash +$ docker images --digests + +REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB +``` + +To remove an image using its digest: + +```bash +$ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 +Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 +Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b +``` diff --git a/docs/reference/commandline/run.md b/docs/reference/commandline/run.md new file mode 100644 index 0000000000..ecb676dc20 --- /dev/null +++ b/docs/reference/commandline/run.md @@ -0,0 +1,777 @@ +--- +title: "run" +description: "The run command description and usage" +keywords: "run, command, container" +--- + + + +# run + +```markdown +Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + +Run a command in a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int Limit percentage of CPU available for execution + by the container. Windows daemon only. + The processor resource controls are mutually + exclusive, the order of precedence is CPUCount + first, then CPUShares, and CPUPercent last. + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + -d, --detach Run container in background and print container ID + --detach-keys string Override the key sequence for detaching a container + --device value Add a host device to the container (default []) + --device-cgroup-rule value Add a rule to the cgroup allowed devices list + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + (Windows only). The format is ``. + Unit is optional and can be `b` (bytes per second), + `k` (kilobytes per second), `m` (megabytes per second), + or `g` (gigabytes per second). If you omit the unit, + the system uses bytes per second. + --io-maxbandwidth and --io-maxiops are mutually exclusive options. + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string IPv4 address (e.g., 172.30.100.104) + --ip6 string IPv6 address (e.g., 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g., 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --mount value Attach a filesystem mount to the container (default []) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited) + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are : no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size bytes Size of /dev/shm + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --sig-proxy Proxy received signals to the process (default true) + --stop-signal string Signal to stop a container (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], + [delegated|cached|consistent], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` + +## Description + +The `docker run` command first `creates` a writeable container layer over the +specified image, and then `starts` it using the specified command. That is, +`docker run` is equivalent to the API `/containers/create` then +`/containers/(id)/start`. A stopped container can be restarted with all its +previous changes intact using `docker start`. See `docker ps -a` to view a list +of all containers. + +The `docker run` command can be used in combination with `docker commit` to +[*change the command that a container runs*](commit.md). There is additional detailed information about `docker run` in the [Docker run reference](../run.md). + +For information on connecting a container to a network, see the ["*Docker network overview*"](https://docs.docker.com/engine/userguide/networking/). + +## Examples + +### Assign name and allocate pseudo-TTY (--name, -it) + +```bash +$ docker run --name test -it debian + +root@d6c0fe130dba:/# exit 13 +$ echo $? +13 +$ docker ps -a | grep test +d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test +``` + +This example runs a container named `test` using the `debian:latest` +image. The `-it` instructs Docker to allocate a pseudo-TTY connected to +the container's stdin; creating an interactive `bash` shell in the container. +In the example, the `bash` shell is quit by entering +`exit 13`. This exit code is passed on to the caller of +`docker run`, and is recorded in the `test` container's metadata. + +### Capture container ID (--cidfile) + +```bash +$ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" +``` + +This will create a container and print `test` to the console. The `cidfile` +flag makes Docker attempt to create a new file and write the container ID to it. +If the file exists already, Docker will return an error. Docker will close this +file when `docker run` exits. + +### Full container capabilities (--privileged) + +```bash +$ docker run -t -i --rm ubuntu bash +root@bc338942ef20:/# mount -t tmpfs none /mnt +mount: permission denied +``` + +This will *not* work, because by default, most potentially dangerous kernel +capabilities are dropped; including `cap_sys_admin` (which is required to mount +filesystems). However, the `--privileged` flag will allow it to run: + +```bash +$ docker run -t -i --privileged ubuntu bash +root@50e3f57e16e6:/# mount -t tmpfs none /mnt +root@50e3f57e16e6:/# df -h +Filesystem Size Used Avail Use% Mounted on +none 1.9G 0 1.9G 0% /mnt +``` + +The `--privileged` flag gives *all* capabilities to the container, and it also +lifts all the limitations enforced by the `device` cgroup controller. In other +words, the container can then do almost everything that the host can do. This +flag exists to allow special use-cases, like running Docker within Docker. + +### Set working directory (-w) + +```bash +$ docker run -w /path/to/dir/ -i -t ubuntu pwd +``` + +The `-w` lets the command being executed inside directory given, here +`/path/to/dir/`. If the path does not exist it is created inside the container. + +### Set storage driver options per container + +```bash +$ docker run -it --storage-opt size=120G fedora /bin/bash +``` + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Mount tmpfs (--tmpfs) + +```bash +$ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image +``` + +The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, `size=65536k` options. + +### Mount volume (-v, --read-only) + +```bash +$ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd +``` + +The `-v` flag mounts the current working directory into the container. The `-w` +lets the command being executed inside the current working directory, by +changing into the directory to the value returned by `pwd`. So this +combination executes the command using the container, but inside the +current working directory. + +```bash +$ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash +``` + +When the host directory of a bind-mounted volume doesn't exist, Docker +will automatically create this directory on the host for you. In the +example above, Docker will create the `/doesnt/exist` +folder before starting your container. + +```bash +$ docker run --read-only -v /icanwrite busybox touch /icanwrite/here +``` + +Volumes can be used in combination with `--read-only` to control where +a container writes files. The `--read-only` flag mounts the container's root +filesystem as read only prohibiting writes to locations other than the +specified volumes for the container. + +```bash +$ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh +``` + +By bind-mounting the docker unix socket and statically linked docker +binary (refer to [get the linux binary]( +https://docs.docker.com/engine/installation/binaries/#/get-the-linux-binary)), +you give the container the full access to create and manipulate the host's +Docker daemon. + +On Windows, the paths must be specified using Windows-style semantics. + +```powershell +PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt +Contents of file + +PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt +Contents of file +``` + +The following examples will fail when using Windows-based containers, as the +destination of a volume or bind-mount inside the container must be one of: +a non-existing or empty directory; or a drive other than C:. Further, the source +of a bind mount must be a local directory, not a file. + +```powershell +net use z: \\remotemachine\share +docker run -v z:\foo:c:\dest ... +docker run -v \\uncpath\to\directory:c:\dest ... +docker run -v c:\foo\somefile.txt:c:\dest ... +docker run -v c:\foo:c: ... +docker run -v c:\foo:c:\existing-directory-with-contents ... +``` + +For in-depth information about volumes, refer to [manage data in containers](https://docs.docker.com/engine/tutorials/dockervolumes/) + + +### Add bind-mounts or volumes using the --mount flag + +The `--mount` flag allows you to mount volumes, host-directories and `tmpfs` +mounts in a container. + +The `--mount` flag supports most options that are supported by the `-v` or the +`--volume` flag, but uses a different syntax. For in-depth information on the +`--mount` flag, and a comparison between `--volume` and `--mount`, refer to +the [service create command reference](service_create.md#add-bind-mounts-or-volumes). + +Even though there is no plan to deprecate `--volume`, usage of `--mount` is recommended. + +Examples: + +```bash +$ docker run --read-only --mount type=volume,target=/icanwrite busybox touch /icanwrite/here +``` + +```bash +$ docker run -t -i --mount type=bind,src=/data,dst=/data busybox sh +``` + +### Publish or expose port (-p, --expose) + +```bash +$ docker run -p 127.0.0.1:80:8080 ubuntu bash +``` + +This binds port `8080` of the container to port `80` on `127.0.0.1` of the host +machine. The [Docker User +Guide](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/) +explains in detail how to manipulate ports in Docker. + +```bash +$ docker run --expose 80 ubuntu bash +``` + +This exposes port `80` of the container without publishing the port to the host +system's interfaces. + +### Set environment variables (-e, --env, --env-file) + +```bash +$ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash +``` + +Use the `-e`, `--env`, and `--env-file` flags to set simple (non-array) +environment variables in the container you're running, or overwrite variables +that are defined in the Dockerfile of the image you're running. + +You can define the variable and its value when running the container: + +```bash +$ docker run --env VAR1=value1 --env VAR2=value2 ubuntu env | grep VAR +VAR1=value1 +VAR2=value2 +``` + +You can also use variables that you've exported to your local environment: + +```bash +export VAR1=value1 +export VAR2=value2 + +$ docker run --env VAR1 --env VAR2 ubuntu env | grep VAR +VAR1=value1 +VAR2=value2 +``` + +When running the command, the Docker CLI client checks the value the variable +has in your local environment and passes it to the container. +If no `=` is provided and that variable is not exported in your local +environment, the variable won't be set in the container. + +You can also load the environment variables from a file. This file should use +the syntax `=value` (which sets the variable to the given value) or +`` (which takes the value from the local environment), and `#` for comments. + +```bash +$ cat env.list +# This is a comment +VAR1=value1 +VAR2=value2 +USER + +$ docker run --env-file env.list ubuntu env | grep VAR +VAR1=value1 +VAR2=value2 +USER=denis +``` + +### Set metadata on container (-l, --label, --label-file) + +A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: + +```bash +$ docker run -l my-label --label com.example.foo=bar ubuntu bash +``` + +The `my-label` key doesn't specify a value so the label defaults to an empty +string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). + +The `key=value` must be unique to avoid overwriting the label value. If you +specify labels with identical keys but different values, each subsequent value +overwrites the previous. Docker uses the last `key=value` you supply. + +Use the `--label-file` flag to load multiple labels from a file. Delimit each +label in the file with an EOL mark. The example below loads labels from a +labels file in the current directory: + +```bash +$ docker run --label-file ./labels ubuntu bash +``` + +The label-file format is similar to the format for loading environment +variables. (Unlike environment variables, labels are not visible to processes +running inside a container.) The following example illustrates a label-file +format: + +```none +com.example.label1="a label" + +# this is a comment +com.example.label2=another\ label +com.example.label3 +``` + +You can load multiple label-files by supplying multiple `--label-file` flags. + +For additional information on working with labels, see [*Labels - custom +metadata in Docker*](https://docs.docker.com/engine/userguide/labels-custom-metadata/) in the Docker User +Guide. + +### Connect a container to a network (--network) + +When you start a container use the `--network` flag to connect it to a network. +This adds the `busybox` container to the `my-net` network. + +```bash +$ docker run -itd --network=my-net busybox +``` + +You can also choose the IP addresses for the container with `--ip` and `--ip6` +flags when you start the container on a user-defined network. + +```bash +$ docker run -itd --network=my-net --ip=10.10.9.75 busybox +``` + +If you want to add a running container to a network use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate easily need only another container's IP address +or name. For `overlay` networks or custom plugins that support multi-host +connectivity, containers connected to the same multi-host network but launched +from different Engines can also communicate in this way. + +> **Note**: Service discovery is unavailable on the default bridge network. +> Containers can communicate via their IP addresses by default. To communicate +> by name, they must be linked. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +### Mount volumes from container (--volumes-from) + +```bash +$ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd +``` + +The `--volumes-from` flag mounts all the defined volumes from the referenced +containers. Containers can be specified by repetitions of the `--volumes-from` +argument. The container ID may be optionally suffixed with `:ro` or `:rw` to +mount the volumes in read-only or read-write mode, respectively. By default, +the volumes are mounted in the same mode (read write or read only) as +the reference container. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change the label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +### Attach to STDIN/STDOUT/STDERR (-a) + +The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` +or `STDERR`. This makes it possible to manipulate the output and input as +needed. + +```bash +$ echo "test" | docker run -i -a stdin ubuntu cat - +``` + +This pipes data into a container and prints the container's ID by attaching +only to the container's `STDIN`. + +```bash +$ docker run -a stderr ubuntu echo test +``` + +This isn't going to print anything unless there's an error because we've +only attached to the `STDERR` of the container. The container's logs +still store what's been written to `STDERR` and `STDOUT`. + +```bash +$ cat somefile | docker run -i -a stdin mybuilder dobuild +``` + +This is how piping a file into a container could be done for a build. +The container's ID will be printed after the build is done and the build +logs could be retrieved using `docker logs`. This is +useful if you need to pipe a file or something else into a container and +retrieve the container's ID once the container has finished running. + +### Add host device to container (--device) + +```bash +$ docker run --device=/dev/sdc:/dev/xvdc \ + --device=/dev/sdd --device=/dev/zero:/dev/nulo \ + -i -t \ + ubuntu ls -l /dev/{xvdc,sdd,nulo} + +brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc +brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd +crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo +``` + +It is often necessary to directly expose devices to a container. The `--device` +option enables that. For example, a specific block storage device or loop +device or audio device can be added to an otherwise unprivileged container +(without the `--privileged` flag) and have the application directly access it. + +By default, the container will be able to `read`, `write` and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` +flag: + +```bash +$ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + +Command (m for help): q +$ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc +You will not be able to write the partition table. + +Command (m for help): q + +$ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc + +Command (m for help): q + +$ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc +fdisk: unable to open /dev/xvdc: Operation not permitted +``` + +> **Note**: `--device` cannot be safely used with ephemeral devices. Block devices +> that may be removed should not be added to untrusted containers with +> `--device`. + +### Restart policies (--restart) + +Use Docker's `--restart` to specify a container's *restart policy*. A restart +policy controls whether the Docker daemon restarts a container after exit. +Docker supports the following restart policies: + +| Policy | Result | +|:----------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `no` | Do not automatically restart the container when it exits. This is the default. | +| `failure` | Restart only if the container exits with a non-zero exit status. Optionally, limit the number of restart retries the Docker daemon attempts. | +| `always` | Always restart the container regardless of the exit status. When you specify always, the Docker daemon will try to restart the container indefinitely. The container will also always start on daemon startup, regardless of the current state of the container. | + +```bash +$ docker run --restart=always redis +``` + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + +More detailed information on restart policies can be found in the +[Restart Policies (--restart)](../run.md#restart-policies-restart) +section of the Docker run reference page. + +### Add entries to container hosts file (--add-host) + +You can add other hosts into a container's `/etc/hosts` file by using one or +more `--add-host` flags. This example adds a static address for a host named +`docker`: + +```bash +$ docker run --add-host=docker:10.180.0.1 --rm -it debian + +root@f38c87f2a42d:/# ping docker +PING docker (10.180.0.1): 48 data bytes +56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms +56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms +^C--- docker ping statistics --- +2 packets transmitted, 2 packets received, 0% packet loss +round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms +``` + +Sometimes you need to connect to the Docker host from within your +container. To enable this, pass the Docker host's IP address to +the container using the `--add-host` flag. To find the host's address, +use the `ip addr show` command. + +The flags you pass to `ip addr show` depend on whether you are +using IPv4 or IPv6 networking in your containers. Use the following +flags for IPv4 address retrieval for a network device named `eth0`: + +```bash +$ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1` +$ docker run --add-host=docker:${HOSTIP} --rm -it debian +``` + +For IPv6 use the `-6` flag instead of the `-4` flag. For other network +devices, replace `eth0` with the correct device name (for example `docker0` +for the bridge device). + +### Set ulimits in container (--ulimit) + +Since setting `ulimit` settings in a container requires extra privileges not +available in the default container, you can set these using the `--ulimit` flag. +`--ulimit` is specified with a soft and hard limit as such: +`=[:]`, for example: + +```bash +$ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" +1024 +``` + +> **Note**: If you do not provide a `hard limit`, the `soft limit` will be used +> for both values. If no `ulimits` are set, they will be inherited from +> the default `ulimits` set on the daemon. `as` option is disabled now. +> In other words, the following script is not supported: +> +> ```bash +> $ docker run -it --ulimit as=1024 fedora /bin/bash` +> ``` + +The values are sent to the appropriate `syscall` as they are set. +Docker doesn't perform any byte conversion. Take this into account when setting the values. + +#### For `nproc` usage + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the +maximum number of processes available to a user, not to a container. For example, start four +containers with `daemon` user: + +```bash +$ docker run -d -u daemon --ulimit nproc=3 busybox top + +$ docker run -d -u daemon --ulimit nproc=3 busybox top + +$ docker run -d -u daemon --ulimit nproc=3 busybox top + +$ docker run -d -u daemon --ulimit nproc=3 busybox top +``` + +The 4th container fails and reports "[8] System error: resource temporarily unavailable" error. +This fails because the caller set `nproc=3` resulting in the first three containers using up +the three processes quota set for the `daemon` user. + +### Stop container with signal (--stop-signal) + +The `--stop-signal` flag sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +### Optional security options (--security-opt) + +On Windows, this flag can be used to specify the `credentialspec` option. +The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`. + +### Stop container with timeout (--stop-timeout) + +The `--stop-timeout` flag sets the timeout (in seconds) that a pre-defined (see `--stop-signal`) system call +signal that will be sent to the container to exit. After timeout elapses the container will be killed with SIGKILL. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation ` option sets a container's isolation technology. +On Linux, the only supported is the `default` option which uses +Linux namespaces. These two commands are equivalent on Linux: + +```bash +$ docker run -d busybox top +$ docker run -d --isolation default busybox top +``` + +On Windows, `--isolation` can take one of these values: + + +| Value | Description | +|:----------|:-------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` or system default (see below). | +| `process` | Shared-kernel namespace isolation (not supported on Windows client operating systems). | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +The default isolation on Windows server operating systems is `process`. The default (and only supported) +isolation on Windows client operating systems is `hyperv`. An attempt to start a container on a client +operating system with `--isolation process` will fail. + +On Windows server, assuming the default configuration, these commands are equivalent +and result in `process` isolation: + +```PowerShell +PS C:\> docker run -d microsoft/nanoserver powershell echo process +PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo process +PS C:\> docker run -d --isolation process microsoft/nanoserver powershell echo process +``` + +If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, or +are running against a Windows client-based daemon, these commands are equivalent and +result in `hyperv` isolation: + +```PowerShell +PS C:\> docker run -d microsoft/nanoserver powershell echo hyperv +PS C:\> docker run -d --isolation default microsoft/nanoserver powershell echo hyperv +PS C:\> docker run -d --isolation hyperv microsoft/nanoserver powershell echo hyperv +``` + +### Configure namespaced kernel parameters (sysctls) at runtime + +The `--sysctl` sets namespaced kernel parameters (sysctls) in the +container. For example, to turn on IP forwarding in the containers +network namespace, run this command: + +```bash +$ docker run --sysctl net.ipv4.ip_forward=1 someimage +``` + +> **Note**: Not all sysctls are namespaced. Docker does not support changing sysctls +> inside of a container that also modify the host system. As the kernel +> evolves we expect to see more sysctls become namespaced. + +#### Currently supported sysctls + +- `IPC Namespace`: + + ```none + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + ``` + + If you use the `--ipc=host` option these sysctls will not be allowed. + +- `Network Namespace`: + + Sysctls beginning with net.* + + If you use the `--network=host` option using these sysctls will not be allowed. diff --git a/docs/reference/commandline/save.md b/docs/reference/commandline/save.md new file mode 100644 index 0000000000..cba7385e11 --- /dev/null +++ b/docs/reference/commandline/save.md @@ -0,0 +1,62 @@ +--- +title: "save" +description: "The save command description and usage" +keywords: "tarred, repository, backup" +--- + + + +# save + +```markdown +Usage: docker save [OPTIONS] IMAGE [IMAGE...] + +Save one or more images to a tar archive (streamed to STDOUT by default) + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +## Description + +Produces a tarred repository to the standard output stream. +Contains all parent layers, and all tags + versions, or specified `repo:tag`, for +each argument provided. + +## Examples + +### Create a backup that can then be used with `docker load`. + +```bash +$ docker save busybox > busybox.tar + +$ ls -sh busybox.tar + +2.7M busybox.tar + +$ docker save --output busybox.tar busybox + +$ ls -sh busybox.tar + +2.7M busybox.tar + +$ docker save -o fedora-all.tar fedora + +$ docker save -o fedora-latest.tar fedora:latest +``` + +### Cherry-pick particular tags + +You can even cherry-pick particular tags of an image repository. + +```bash +$ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy +``` diff --git a/docs/reference/commandline/search.md b/docs/reference/commandline/search.md new file mode 100644 index 0000000000..f645c78603 --- /dev/null +++ b/docs/reference/commandline/search.md @@ -0,0 +1,149 @@ +--- +title: "search" +description: "The search command description and usage" +keywords: "search, hub, images" +--- + + + +# search + +```markdown +Usage: docker search [OPTIONS] TERM + +Search the Docker Hub for images + +Options: + -f, --filter value Filter output based on conditions provided (default []) + - is-automated=(true|false) + - is-official=(true|false) + - stars= - image has at least 'number' stars + --help Print usage + --limit int Max number of search results (default 25) + --no-trunc Don't truncate output +``` + +## Description + +Search [Docker Hub](https://hub.docker.com) for images + +See [*Find Public Images on Docker Hub*](https://docs.docker.com/engine/tutorials/dockerrepos/#searching-for-images) for +more details on finding shared images from the command line. + +> **Note**: Search queries return a maximum of 25 results. + +## Examples + +### Search images by name + +This example displays images with a name containing 'busybox': + +```none +$ docker search busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +busybox Busybox base image. 316 [OK] +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +odise/busybox-python 2 [OK] +azukiapp/busybox This image is meant to be used as the base... 2 [OK] +ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK] +shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK] +odise/busybox-curl 1 [OK] +ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK] +peelsky/zulu-openjdk-busybox 1 [OK] +skomma/busybox-data Docker image suitable for data volume cont... 1 [OK] +elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 [OK] +socketplane/busybox 1 [OK] +oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK] +ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK] +nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK] +openshift/busybox-http-app 0 [OK] +jllopis/busybox 0 [OK] +swyckoff/busybox 0 [OK] +powellquiring/busybox 0 [OK] +williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK] +simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK] +fhisamoto/busybox-java Busybox java 0 [OK] +scottabernethy/busybox 0 [OK] +marclop/busybox-solr +``` + +### Display non-truncated description (--no-trunc) + +This example displays images with a name containing 'busybox', +at least 3 stars and the description isn't truncated in the output: + +```bash +$ docker search --stars=3 --no-trunc busybox +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +busybox Busybox base image. 325 [OK] +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] +``` + +### Limit search results (--limit) + +The flag `--limit` is the maximum number of results returned by a search. This value could +be in the range between 1 and 100. The default value of `--limit` is 25. + + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* stars (int - number of stars the image has) +* is-automated (true|false) - is the image automated or not +* is-official (true|false) - is the image official or not + + +#### stars + +This example displays images with a name containing 'busybox' and at +least 3 stars: + +```bash +$ docker search --filter stars=3 busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +busybox Busybox base image. 325 [OK] +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +``` + + +#### is-automated + +This example displays images with a name containing 'busybox' +and are automated builds: + +```bash +$ docker search --filter is-automated busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +``` + +#### is-official + +This example displays images with a name containing 'busybox', at least +3 stars and are official builds: + +```bash +$ docker search --filter "is-official=true" --filter "stars=3" busybox + +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +progrium/busybox 50 [OK] +radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] +``` diff --git a/docs/reference/commandline/secret.md b/docs/reference/commandline/secret.md new file mode 100644 index 0000000000..50734407ab --- /dev/null +++ b/docs/reference/commandline/secret.md @@ -0,0 +1,45 @@ +--- +title: "secret" +description: "The secret command description and usage" +keywords: "secret" +--- + + + +# secret + +```markdown +Usage: docker secret COMMAND + +Manage Docker secrets + +Options: + --help Print usage + +Commands: + create Create a secret from a file or STDIN as content + inspect Display detailed information on one or more secrets + ls List secrets + rm Remove one or more secrets + +Run 'docker secret COMMAND --help' for more information on a command. + +``` + +## Description + +Manage secrets. + +## Related commands + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret list](secret_list.md) +* [secret rm](secret_rm.md) diff --git a/docs/reference/commandline/secret_create.md b/docs/reference/commandline/secret_create.md new file mode 100644 index 0000000000..e534dde553 --- /dev/null +++ b/docs/reference/commandline/secret_create.md @@ -0,0 +1,99 @@ +--- +title: "secret create" +description: "The secret create command description and usage" +keywords: ["secret, create"] +--- + + + +# secret create + +```Markdown +Usage: docker secret create [OPTIONS] SECRET file|- + +Create a secret from a file or STDIN as content + +Options: + --help Print usage + -l, --label list Secret labels (default []) +``` + +## Description + +Creates a secret using standard input or from a file for the secret content. You must run this command on a manager node. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +### Create a secret + +```bash +$ echo | docker secret create my_secret - + +onakdyv307se2tl7nl20anokv + +$ docker secret ls + +ID NAME CREATED UPDATED +onakdyv307se2tl7nl20anokv my_secret 6 seconds ago 6 seconds ago +``` + +### Create a secret with a file + +```bash +$ docker secret create my_secret ./secret.json + +dg426haahpi5ezmkkj5kyl3sn + +$ docker secret ls + +ID NAME CREATED UPDATED +dg426haahpi5ezmkkj5kyl3sn my_secret 7 seconds ago 7 seconds ago +``` + +### Create a secret with labels + +```bash +$ docker secret create --label env=dev \ + --label rev=20170324 \ + my_secret ./secret.json + +eo7jnzguqgtpdah3cm5srfb97 +``` + +```none +$ docker secret inspect my_secret + +[ + { + "ID": "eo7jnzguqgtpdah3cm5srfb97", + "Version": { + "Index": 17 + }, + "CreatedAt": "2017-03-24T08:15:09.735271783Z", + "UpdatedAt": "2017-03-24T08:15:09.735271783Z", + "Spec": { + "Name": "my_secret", + "Labels": { + "env": "dev", + "rev": "20170324" + } + } + } +] +``` + + +## Related commands + +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/docs/reference/commandline/secret_inspect.md b/docs/reference/commandline/secret_inspect.md new file mode 100644 index 0000000000..cecf3c1ddc --- /dev/null +++ b/docs/reference/commandline/secret_inspect.md @@ -0,0 +1,95 @@ +--- +title: "secret inspect" +description: "The secret inspect command description and usage" +keywords: ["secret, inspect"] +--- + + + +# secret inspect + +```Markdown +Usage: docker secret inspect [OPTIONS] SECRET [SECRET...] + +Display detailed information on one or more secrets + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Inspects the specified secret. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +### Inspect a secret by name or ID + +You can inspect a secret, either by its *name*, or *ID* + +For example, given the following secret: + +```bash +$ docker secret ls + +ID NAME CREATED UPDATED +eo7jnzguqgtpdah3cm5srfb97 my_secret 3 minutes ago 3 minutes ago +``` + +```none +$ docker secret inspect secret.json + +[ + { + "ID": "eo7jnzguqgtpdah3cm5srfb97", + "Version": { + "Index": 17 + }, + "CreatedAt": "2017-03-24T08:15:09.735271783Z", + "UpdatedAt": "2017-03-24T08:15:09.735271783Z", + "Spec": { + "Name": "my_secret", + "Labels": { + "env": "dev", + "rev": "20170324" + } + } + } +] +``` + +### Formatting + +You can use the --format option to obtain specific information about a +secret. The following example command outputs the creation time of the +secret. + +```bash +$ docker secret inspect --format='{{.CreatedAt}}' eo7jnzguqgtpdah3cm5srfb97 + +2017-03-24 08:15:09.735271783 +0000 UTC +``` + + +## Related commands + +* [secret create](secret_create.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/docs/reference/commandline/secret_ls.md b/docs/reference/commandline/secret_ls.md new file mode 100644 index 0000000000..9b60227b8e --- /dev/null +++ b/docs/reference/commandline/secret_ls.md @@ -0,0 +1,157 @@ +--- +title: "secret ls" +description: "The secret ls command description and usage" +keywords: ["secret, ls"] +--- + + + +# secret ls + +```Markdown +Usage: docker secret ls [OPTIONS] + +List secrets + +Aliases: + ls, list + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print secrets using a Go template + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +Run this command on a manager node to list the secrets in the swarm. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +```bash +$ docker secret ls + +ID NAME CREATED UPDATED +6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago +9u9hk4br2ej0wgngkga6rp4hq my_secret 5 weeks ago 5 weeks ago +mem02h8n73mybpgqjf0kfi1n0 test_secret 3 seconds ago 3 seconds ago +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](secret_ls.md#id) (secret's ID) +* [label](secret_ls.md#label) (`label=` or `label==`) +* [name](secret_ls.md#name) (secret's name) + +#### id + +The `id` filter matches all or prefix of a secret's id. + +```bash +$ docker secret ls -f "id=6697bflskwj1998km1gnnjr38" + +ID NAME CREATED UPDATED +6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago +``` + +#### label + +The `label` filter matches secrets based on the presence of a `label` alone or +a `label` and a value. + +The following filter matches all secrets with a `project` label regardless of +its value: + +```bash +$ docker secret ls --filter label=project + +ID NAME CREATED UPDATED +mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago +``` + +The following filter matches only services with the `project` label with the +`project-a` value. + +```bash +$ docker service ls --filter label=project=test + +ID NAME CREATED UPDATED +mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago +``` + +#### name + +The `name` filter matches on all or prefix of a secret's name. + +The following filter matches secret with a name containing a prefix of `test`. + +```bash +$ docker secret ls --filter name=test_secret + +ID NAME CREATED UPDATED +mem02h8n73mybpgqjf0kfi1n0 test_secret About an hour ago About an hour ago +``` + +### Format the output + +The formatting option (`--format`) pretty prints secrets output +using a Go template. + +Valid placeholders for the Go template are listed below: + +| Placeholder | Description | +| ------------ | ------------------------------------------------------------------------------------ | +| `.ID` | Secret ID | +| `.Name` | Secret name | +| `.CreatedAt` | Time when the secret was created | +| `.UpdatedAt` | Time when the secret was updated | +| `.Labels` | All labels assigned to the secret | +| `.Label` | Value of a specific label for this secret. For example `{{.Label "secret.ssh.key"}}` | + +When using the `--format` option, the `secret ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Name` entries separated by a colon for all images: + +```bash +$ docker secret ls --format "{{.ID}}: {{.Name}}" + +77af4d6b9913: secret-1 +b6fa739cedf5: secret-2 +78a85c484f71: secret-3 +``` + +To list all secrets with their name and created date in a table format you +can use: + +```bash +$ docker secret ls --format "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}" + +ID NAME CREATED +77af4d6b9913 secret-1 5 minutes ago +b6fa739cedf5 secret-2 3 hours ago +78a85c484f71 secret-3 10 days ago +``` + +## Related commands + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret rm](secret_rm.md) diff --git a/docs/reference/commandline/secret_rm.md b/docs/reference/commandline/secret_rm.md new file mode 100644 index 0000000000..1e10350f96 --- /dev/null +++ b/docs/reference/commandline/secret_rm.md @@ -0,0 +1,54 @@ +--- +title: "secret rm" +description: "The secret rm command description and usage" +keywords: ["secret, rm"] +--- + + + +# secret rm + +```Markdown +Usage: docker secret rm SECRET [SECRET...] + +Remove one or more secrets + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +## Description + +Removes the specified secrets from the swarm. This command has to be run +targeting a manager node. + +For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/). + +## Examples + +This example removes a secret: + +```bash +$ docker secret rm secret.json +sapth4csdo5b6wz2p5uimh5xg +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a secret. + + +## Related commands + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) diff --git a/docs/reference/commandline/service.md b/docs/reference/commandline/service.md new file mode 100644 index 0000000000..2c12c67648 --- /dev/null +++ b/docs/reference/commandline/service.md @@ -0,0 +1,42 @@ +--- +title: "service" +description: "The service command description and usage" +keywords: "service" +--- + + + +# service + +```markdown +Usage: docker service COMMAND + +Manage services + +Options: + --help Print usage + +Commands: + create Create a new service + inspect Display detailed information on one or more services + logs Fetch the logs of a service or task + ls List services + ps List the tasks of one or more services + rm Remove one or more services + scale Scale one or multiple replicated services + update Update a service + +Run 'docker service COMMAND --help' for more information on a command. +``` + +## Description + +Manage services. + diff --git a/docs/reference/commandline/service_create.md b/docs/reference/commandline/service_create.md new file mode 100644 index 0000000000..78faa98bf7 --- /dev/null +++ b/docs/reference/commandline/service_create.md @@ -0,0 +1,846 @@ +--- +title: "service create" +description: "The service create command description and usage" +keywords: "service, create" +--- + + + +# service create + +```Markdown +Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new service + +Options: + --constraint list Placement constraints + --container-label list Container labels + -d, --detach Exit immediately instead of waiting for the service to converge (default true) + --dns list Set custom DNS servers + --dns-option list Set DNS options + --dns-search list Set custom DNS search domains + --endpoint-mode string Endpoint mode (vip or dnsrr) (default "vip") + --entrypoint command Overwrite the default ENTRYPOINT of the image + -e, --env list Set environment variables + --env-file list Read in a file of environment variables + --group list Set one or more supplementary user groups for the container + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ms|s|m|h) + --health-timeout duration Maximum time to allow one check to run (ms|s|m|h) + --help Print usage + --host list Set one or more custom host-to-IP mappings (host:ip) + --hostname string Container hostname + -l, --label list Service labels + --limit-cpu decimal Limit CPUs + --limit-memory bytes Limit Memory + --log-driver string Logging driver for service + --log-opt list Logging driver options + --mode string Service mode (replicated or global) (default "replicated") + --mount mount Attach a filesystem mount to the service + --name string Service name + --network list Network attachments + --no-healthcheck Disable any container-specified HEALTHCHECK + --placement-pref pref Add a placement preference + -p, --publish port Publish a port as a node port + -q, --quiet Suppress progress output + --read-only Mount the container's root filesystem as read only + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs + --reserve-memory bytes Reserve Memory + --restart-condition string Restart when condition is met ("none"|"on-failure"|"any") (default "any") + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) (default 5s) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --rollback-delay duration Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s) + --rollback-failure-action string Action on rollback failure ("pause"|"continue") (default "pause") + --rollback-max-failure-ratio float Failure rate to tolerate during a rollback (default 0) + --rollback-monitor duration Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 5s) + --rollback-order string Rollback order ("start-first"|"stop-first") (default "stop-first") + --rollback-parallelism uint Maximum number of tasks rolled back simultaneously (0 to roll back all at once) (default 1) + --secret secret Specify secrets to expose to the service + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) (default 10s) + --stop-signal string Signal to stop the container + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) + --update-failure-action string Action on update failure ("pause"|"continue"|"rollback") (default "pause") + --update-max-failure-ratio float Failure rate to tolerate during an update (default 0) + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 5s) + --update-order string Update order ("start-first"|"stop-first") (default "stop-first") + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +## Description + +Creates a service as described by the specified parameters. You must run this +command on a manager node. + +## Examples + +### Create a service + +```bash +$ docker service create --name redis redis:3.0.6 + +dmu1ept4cxcfe8k8lhtux3ro3 + +$ docker service create --mode global --name redis2 redis:3.0.6 + +a8q9dasaafudfs8q8w32udass + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 1/1 redis:3.0.6 +a8q9dasaafud redis2 global 1/1 redis:3.0.6 +``` + +### Create a service with 5 replica tasks (--replicas) + +Use the `--replicas` flag to set the number of replica tasks for a replicated +service. The following command creates a `redis` service with `5` replica tasks: + +```bash +$ docker service create --name redis --replicas=5 redis:3.0.6 + +4cdgfyky7ozwh3htjfw0d12qv +``` + +The above command sets the *desired* number of tasks for the service. Even +though the command returns immediately, actual scaling of the service may take +some time. The `REPLICAS` column shows both the *actual* and *desired* number +of replica tasks for the service. + +In the following example the desired state is `5` replicas, but the current +number of `RUNNING` tasks is `3`: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 3/5 redis:3.0.7 +``` + +Once all the tasks are created and `RUNNING`, the actual number of tasks is +equal to the desired number: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 5/5 redis:3.0.7 +``` + +### Create a service with secrets + +Use the `--secret` flag to give a container access to a +[secret](secret_create.md). + +Create a service specifying a secret: + +```bash +$ docker service create --name redis --secret secret.json redis:3.0.6 + +4cdgfyky7ozwh3htjfw0d12qv +``` + +Create a service specifying the secret, target, user/group ID and mode: + +```bash +$ docker service create --name redis \ + --secret source=ssh-key,target=ssh \ + --secret source=app-key,target=app,uid=1000,gid=1001,mode=0400 \ + redis:3.0.6 + +4cdgfyky7ozwh3htjfw0d12qv +``` + +Secrets are located in `/run/secrets` in the container. If no target is +specified, the name of the secret will be used as the in memory file in the +container. If a target is specified, that will be the filename. In the +example above, two files will be created: `/run/secrets/ssh` and +`/run/secrets/app` for each of the secret targets specified. + +### Create a service with a rolling update policy + +```bash +$ docker service create \ + --replicas 10 \ + --name redis \ + --update-delay 10s \ + --update-parallelism 2 \ + redis:3.0.6 +``` + +When you run a [service update](service_update.md), the scheduler updates a +maximum of 2 tasks at a time, with `10s` between updates. For more information, +refer to the [rolling updates +tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/). + +### Set environment variables (-e, --env) + +This sets environmental variables for all tasks in a service. For example: + +```bash +$ docker service create --name redis_2 --replicas 5 --env MYVAR=foo redis:3.0.6 +``` + +### Create a service with specific hostname (--hostname) + +This option sets the docker service containers hostname to a specific string. +For example: + +```bash +$ docker service create --name redis --hostname myredis redis:3.0.6 +``` + +### Set metadata on a service (-l, --label) + +A label is a `key=value` pair that applies metadata to a service. To label a +service with two labels: + +```bash +$ docker service create \ + --name redis_2 \ + --label com.example.foo="bar" + --label bar=baz \ + redis:3.0.6 +``` + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +### Add bind-mounts or volumes + +Docker supports two different kinds of mounts, which allow containers to read to +or write from files or directories on other containers or the host operating +system. These types are _data volumes_ (often referred to simply as volumes) and +_bind-mounts_. + +Additionally, Docker supports `tmpfs` mounts. + +A **bind-mount** makes a file or directory on the host available to the +container it is mounted within. A bind-mount may be either read-only or +read-write. For example, a container might share its host's DNS information by +means of a bind-mount of the host's `/etc/resolv.conf` or a container might +write logs to its host's `/var/log/myContainerLogs` directory. If you use +bind-mounts and your host and containers have different notions of permissions, +access controls, or other such details, you will run into portability issues. + +A **named volume** is a mechanism for decoupling persistent data needed by your +container from the image used to create the container and from the host machine. +Named volumes are created and managed by Docker, and a named volume persists +even when no container is currently using it. Data in named volumes can be +shared between a container and the host machine, as well as between multiple +containers. Docker uses a _volume driver_ to create, manage, and mount volumes. +You can back up or restore volumes using Docker commands. + +A **tmpfs** mounts a tmpfs inside a container for volatile data. + +Consider a situation where your image starts a lightweight web server. You could +use that image as a base image, copy in your website's HTML files, and package +that into another image. Each time your website changed, you'd need to update +the new image and redeploy all of the containers serving your website. A better +solution is to store the website in a named volume which is attached to each of +your web server containers when they start. To update the website, you just +update the named volume. + +For more information about named volumes, see +[Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/). + +The following table describes options which apply to both bind-mounts and named +volumes in a service: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionRequiredDescription
types +

The type of mount, can be either volume, bind, or tmpfs. Defaults to volume if no type is specified. +

    +
  • volume: mounts a [managed volume](volume_create.md) into the container.
  • +
  • bind: bind-mounts a directory or file from the host into the container.
  • +
  • tmpfs: mount a tmpfs in the container
  • +

+
src or sourcefor type=bind only> +
    +
  • + type=volume: src is an optional way to specify the name of the volume (for example, src=my-volume). + If the named volume does not exist, it is automatically created. If no src is specified, the volume is + assigned a random name which is guaranteed to be unique on the host, but may not be unique cluster-wide. + A randomly-named volume has the same lifecycle as its container and is destroyed when the container + is destroyed (which is upon service update, or when scaling or re-balancing the service) +
  • +
  • + type=bind: src is required, and specifies an absolute path to the file or directory to bind-mount + (for example, src=/path/on/host/). An error is produced if the file or directory does not exist. +
  • +
  • + type=tmpfs: src is not supported. +
  • +
+

dst or destination or target

yes +

Mount path inside the container, for example /some/path/in/container/. + If the path does not exist in the container's filesystem, the Engine creates + a directory at the specified location before mounting the volume or bind-mount.

+

readonly or ro

+

The Engine mounts binds and volumes read-write unless readonly option + is given when mounting the bind or volume. +

    +
  • true or 1 or no value: Mounts the bind or volume read-only.
  • +
  • false or 0: Mounts the bind or volume read-write.
  • +

+
consistency +

The consistency requirements for the mount; one of +

    +
  • default: Equivalent to consistent.
  • +
  • consistent: Full consistency. The container runtime and the host maintain an identical view of the mount at all times.
  • +
  • cached: The host's view of the mount is authoritative. There may be delays before updates made on the host are visible within a container.
  • +
  • delegated: The container runtime's view of the mount is authoritative. There may be delays before updates made in a container are are visible on the host.
  • +
+

+
+ +#### Bind Propagation + +Bind propagation refers to whether or not mounts created within a given +bind-mount or named volume can be propagated to replicas of that mount. Consider +a mount point `/mnt`, which is also mounted on `/tmp`. The propation settings +control whether a mount on `/tmp/a` would also be available on `/mnt/a`. Each +propagation setting has a recursive counterpoint. In the case of recursion, +consider that `/tmp/a` is also mounted as `/foo`. The propagation settings +control whether `/mnt/a` and/or `/tmp/a` would exist. + +The `bind-propagation` option defaults to `rprivate` for both bind-mounts and +volume mounts, and is only configurable for bind-mounts. In other words, named +volumes do not support bind propagation. + +- **`shared`**: Sub-mounts of the original mount are exposed to replica mounts, + and sub-mounts of replica mounts are also propagated to the + original mount. +- **`slave`**: similar to a shared mount, but only in one direction. If the + original mount exposes a sub-mount, the replica mount can see it. + However, if the replica mount exposes a sub-mount, the original + mount cannot see it. +- **`private`**: The mount is private. Sub-mounts within it are not exposed to + replica mounts, and sub-mounts of replica mounts are not + exposed to the original mount. +- **`rshared`**: The same as shared, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rslave`**: The same as `slave`, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rprivate`**: The default. The same as `private`, meaning that no mount points + anywhere within the original or replica mount points propagate + in either direction. + +For more information about bind propagation, see the +[Linux kernel documentation for shared subtree](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + +#### Options for Named Volumes + +The following options can only be used for named volumes (`type=volume`); + + + + + + + + + + + + + + + + + + + + + + + +
OptionDescription
volume-driver +

Name of the volume-driver plugin to use for the volume. Defaults to + "local", to use the local volume driver to create the volume if the + volume does not exist.

+
volume-label + One or more custom metadata ("labels") to apply to the volume upon + creation. For example, + `volume-label=mylabel=hello-world,my-other-label=hello-mars`. For more + information about labels, refer to + apply custom metadata. +
volume-nocopy + By default, if you attach an empty volume to a container, and files or + directories already existed at the mount-path in the container (dst), + the Engine copies those files and directories into the volume, allowing + the host to access them. Set `volume-nocopy` to disables copying files + from the container's filesystem to the volume and mount the empty volume. + + A value is optional: + +
    +
  • true or 1: Default if you do not provide a value. Disables copying.
  • +
  • false or 0: Enables copying.
  • +
+
volume-opt + Options specific to a given volume driver, which will be passed to the + driver when creating the volume. Options are provided as a comma-separated + list of key/value pairs, for example, + volume-opt=some-option=some-value,volume-opt=some-other-option=some-other-value. + For available options for a given driver, refer to that driver's + documentation. +
+ + +#### Options for tmpfs + +The following options can only be used for tmpfs mounts (`type=tmpfs`); + + + + + + + + + + + + + + + +
OptionDescription
tmpfs-sizeSize of the tmpfs mount in bytes. Unlimited by default in Linux.
tmpfs-modeFile mode of the tmpfs in octal. (e.g. "700" or "0700".) Defaults to "1777" in Linux.
+ + +#### Differences between "--mount" and "--volume" + +The `--mount` flag supports most options that are supported by the `-v` +or `--volume` flag for `docker run`, with some important exceptions: + +- The `--mount` flag allows you to specify a volume driver and volume driver + options *per volume*, without creating the volumes in advance. In contrast, + `docker run` allows you to specify a single volume driver which is shared + by all volumes, using the `--volume-driver` flag. + +- The `--mount` flag allows you to specify custom metadata ("labels") for a volume, + before the volume is created. + +- When you use `--mount` with `type=bind`, the host-path must refer to an *existing* + path on the host. The path will not be created for you and the service will fail + with an error if the path does not exist. + +- The `--mount` flag does not allow you to relabel a volume with `Z` or `z` flags, + which are used for `selinux` labeling. + +#### Create a service using a named volume + +The following example creates a service that uses a named volume: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round" \ + nginx:alpine +``` + +For each replica of the service, the engine requests a volume named "my-volume" +from the default ("local") volume driver where the task is deployed. If the +volume does not exist, the engine creates a new volume and applies the "color" +and "shape" labels. + +When the task is started, the volume is mounted on `/path/in/container/` inside +the container. + +Be aware that the default ("local") volume is a locally scoped volume driver. +This means that depending on where a task is deployed, either that task gets a +*new* volume named "my-volume", or shares the same "my-volume" with other tasks +of the same service. Multiple containers writing to a single shared volume can +cause data corruption if the software running inside the container is not +designed to handle concurrent processes writing to the same location. Also take +into account that containers can be re-scheduled by the Swarm orchestrator and +be deployed on a different node. + +#### Create a service that uses an anonymous volume + +The following command creates a service with three replicas with an anonymous +volume on `/path/in/container`: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,destination=/path/in/container \ + nginx:alpine +``` + +In this example, no name (`source`) is specified for the volume, so a new volume +is created for each task. This guarantees that each task gets its own volume, +and volumes are not shared between tasks. Anonymous volumes are removed after +the task using them is complete. + +#### Create a service that uses a bind-mounted host directory + +The following example bind-mounts a host directory at `/path/in/container` in +the containers backing the service: + +```bash +$ docker service create \ + --name my-service \ + --mount type=bind,source=/path/on/host,destination=/path/in/container \ + nginx:alpine +``` + +### Set service mode (--mode) + +The service mode determines whether this is a _replicated_ service or a _global_ +service. A replicated service runs as many tasks as specified, while a global +service runs on each active node in the swarm. + +The following command creates a global service: + +```bash +$ docker service create \ + --name redis_2 \ + --mode global \ + redis:3.0.6 +``` + +### Specify service constraints (--constraint) + +You can limit the set of nodes where a task can be scheduled by defining +constraint expressions. Multiple constraints find nodes that satisfy every +expression (AND match). Constraints can match node or Docker Engine labels as +follows: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
node attributematchesexample
node.idNode IDnode.id == 2ivku8v2gvtg4
node.hostnameNode hostnamenode.hostname != node-2
node.roleNode rolenode.role == manager
node.labelsuser defined node labelsnode.labels.security == high
engine.labelsDocker Engine's labelsengine.labels.operatingsystem == ubuntu 14.04
+ + +`engine.labels` apply to Docker Engine labels like operating system, +drivers, etc. Swarm administrators add `node.labels` for operational purposes by +using the [`docker node update`](node_update.md) command. + +For example, the following limits tasks for the redis service to nodes where the +node type label equals queue: + +```bash +$ docker service create \ + --name redis_2 \ + --constraint 'node.labels.type == queue' \ + redis:3.0.6 +``` + +### Specify service placement preferences (--placement-pref) + +You can set up the service to divide tasks evenly over different categories of +nodes. One example of where this can be useful is to balance tasks over a set +of datacenters or availability zones. The example below illustrates this: + +```bash +$ docker service create \ + --replicas 9 \ + --name redis_2 \ + --placement-pref 'spread=node.labels.datacenter' \ + redis:3.0.6 +``` + +This uses `--placement-pref` with a `spread` strategy (currently the only +supported strategy) to spread tasks evenly over the values of the `datacenter` +node label. In this example, we assume that every node has a `datacenter` node +label attached to it. If there are three different values of this label among +nodes in the swarm, one third of the tasks will be placed on the nodes +associated with each value. This is true even if there are more nodes with one +value than another. For example, consider the following set of nodes: + +- Three nodes with `node.labels.datacenter=east` +- Two nodes with `node.labels.datacenter=south` +- One node with `node.labels.datacenter=west` + +Since we are spreading over the values of the `datacenter` label and the +service has 9 replicas, 3 replicas will end up in each datacenter. There are +three nodes associated with the value `east`, so each one will get one of the +three replicas reserved for this value. There are two nodes with the value +`south`, and the three replicas for this value will be divided between them, +with one receiving two replicas and another receiving just one. Finally, `west` +has a single node that will get all three replicas reserved for `west`. + +If the nodes in one category (for example, those with +`node.labels.datacenter=south`) can't handle their fair share of tasks due to +constraints or resource limitations, the extra tasks will be assigned to other +nodes instead, if possible. + +Both engine labels and node labels are supported by placement preferences. The +example above uses a node label, because the label is referenced with +`node.labels.datacenter`. To spread over the values of an engine label, use +`--placement-pref spread=engine.labels.`. + +It is possible to add multiple placement preferences to a service. This +establishes a hierarchy of preferences, so that tasks are first divided over +one category, and then further divided over additional categories. One example +of where this may be useful is dividing tasks fairly between datacenters, and +then splitting the tasks within each datacenter over a choice of racks. To add +multiple placement preferences, specify the `--placement-pref` flag multiple +times. The order is significant, and the placement preferences will be applied +in the order given when making scheduling decisions. + +The following example sets up a service with multiple placement preferences. +Tasks are spread first over the various datacenters, and then over racks +(as indicated by the respective labels): + +```bash +$ docker service create \ + --replicas 9 \ + --name redis_2 \ + --placement-pref 'spread=node.labels.datacenter' \ + --placement-pref 'spread=node.labels.rack' \ + redis:3.0.6 +``` + +When updating a service with `docker service update`, `--placement-pref-add` +appends a new placement preference after all existing placement preferences. +`--placement-pref-rm` removes an existing placement preference that matches the +argument. + +### Attach a service to an existing network (--network) + +You can use overlay networks to connect one or more services within the swarm. + +First, create an overlay network on a manager node the docker network create +command: + +```bash +$ docker network create --driver overlay my-network + +etjpu59cykrptrgw0z0hk5snf +``` + +After you create an overlay network in swarm mode, all manager nodes have +access to the network. + +When you create a service and pass the --network flag to attach the service to +the overlay network: + +```bash +$ docker service create \ + --replicas 3 \ + --network my-network \ + --name my-web \ + nginx + +716thylsndqma81j6kkkb5aus +``` + +The swarm extends my-network to each node running the service. + +Containers on the same network can access each other using +[service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery). + +### Publish service ports externally to the swarm (-p, --publish) + +You can publish service ports to make them available externally to the swarm +using the `--publish` flag: + +```bash +$ docker service create --publish : nginx +``` + +For example: + +```bash +$ docker service create --name my_web --replicas 3 --publish 8080:80 nginx +``` + +When you publish a service port, the swarm routing mesh makes the service +accessible at the target port on every node regardless if there is a task for +the service running on the node. For more information refer to +[Use swarm mode routing mesh](https://docs.docker.com/engine/swarm/ingress/). + +### Publish a port for TCP only or UDP only + +By default, when you publish a port, it is a TCP port. You can +specifically publish a UDP port instead of or in addition to a TCP port. When +you publish both TCP and UDP ports, Docker 1.12.2 and earlier require you to +add the suffix `/tcp` for TCP ports. Otherwise it is optional. + +#### TCP only + +The following two commands are equivalent. + +```bash +$ docker service create --name dns-cache -p 53:53 dns-cache + +$ docker service create --name dns-cache -p 53:53/tcp dns-cache +``` + +#### TCP and UDP + +```bash +$ docker service create --name dns-cache -p 53:53/tcp -p 53:53/udp dns-cache +``` + +#### UDP only + +```bash +$ docker service create --name dns-cache -p 53:53/udp dns-cache +``` + +### Create services using templates + +You can use templates for some flags of `service create`, using the syntax +provided by the Go's [text/template](http://golang.org/pkg/text/template/) package. + +The supported flags are the following : + +- `--hostname` +- `--mount` +- `--env` + +Valid placeholders for the Go template are listed below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PlaceholderDescription
.Service.IDService ID
.Service.NameService name
.Service.LabelsService labels
.Node.IDNode ID
.Task.IDTask ID
.Task.NameTask name
.Task.SlotTask slot
+ + +#### Template example + +In this example, we are going to set the template of the created containers based on the +service's name and the node's ID where it sits. + +```bash +$ docker service create --name hosttempl \ + --hostname="{{.Node.ID}}-{{.Service.Name}}"\ + busybox top + +va8ew30grofhjoychbr6iot8c + +$ docker service ps va8ew30grofhjoychbr6iot8c + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +wo41w8hg8qan hosttempl.1 busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 2e7a8a9c4da2 Running Running about a minute ago + +$ docker inspect --format="{{.Config.Hostname}}" hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj + +x3ti0erg11rjpg64m75kej2mz-hosttempl +``` + +## Related commands + +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) + + diff --git a/docs/reference/commandline/service_inspect.md b/docs/reference/commandline/service_inspect.md new file mode 100644 index 0000000000..24c593cec9 --- /dev/null +++ b/docs/reference/commandline/service_inspect.md @@ -0,0 +1,170 @@ +--- +title: "service inspect" +description: "The service inspect command description and usage" +keywords: "service, inspect" +--- + + + +# service inspect + +```Markdown +Usage: docker service inspect [OPTIONS] SERVICE [SERVICE...] + +Display detailed information on one or more services + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format +``` + +## Description + +Inspects the specified service. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Inspect a service by name or ID + +You can inspect a service, either by its *name*, or *ID* + +For example, given the following service; + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +``` + +Both `docker service inspect redis`, and `docker service inspect dmu1ept4cxcf` +produce the same result: + +```none +$ docker service inspect redis + +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + "CreatedAt": "2016-06-17T18:44:02.558012087Z", + "UpdatedAt": "2016-06-17T18:44:02.558012087Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis:3.0.6" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": {}, + "EndpointSpec": { + "Mode": "vip" + } + }, + "Endpoint": { + "Spec": {} + } + } +] +``` + +```bash +$ docker service inspect dmu1ept4cxcf + +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + ... + } +] +``` + +### Formatting + +You can print the inspect output in a human-readable format instead of the default +JSON output, by using the `--pretty` option: + +```bash +$ docker service inspect --pretty frontend + +ID: c8wgl7q4ndfd52ni6qftkvnnp +Name: frontend +Labels: + - org.example.projectname=demo-app +Service Mode: REPLICATED + Replicas: 5 +Placement: +UpdateConfig: + Parallelism: 0 + On failure: pause + Max failure ratio: 0 +ContainerSpec: + Image: nginx:alpine +Resources: +Networks: net1 +Endpoint Mode: vip +Ports: + PublishedPort = 4443 + Protocol = tcp + TargetPort = 443 + PublishMode = ingress +``` + +You can also use `--format pretty` for the same effect. + + +#### Find the number of tasks running as part of a service + +The `--format` option can be used to obtain specific information about a +service. For example, the following command outputs the number of replicas +of the "redis" service. + +```bash +$ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis + +10 +``` + + +## Related commands + +* [service create](service_create.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/docs/reference/commandline/service_logs.md b/docs/reference/commandline/service_logs.md new file mode 100644 index 0000000000..fd328d0f6d --- /dev/null +++ b/docs/reference/commandline/service_logs.md @@ -0,0 +1,85 @@ +--- +title: "service logs" +description: "The service logs command description and usage" +keywords: "service, task, logs" +--- + + + +# service logs + +```Markdown +Usage: docker service logs [OPTIONS] SERVICE|TASK + +Fetch the logs of a service or task + +Options: + -f, --follow Follow log output + --help Print usage + --no-resolve Do not map IDs to Names in output + --no-task-ids Do not include task IDs in output + --no-trunc Do not truncate output + --since string Show logs since timestamp + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +## Description + +The `docker service logs` command batch-retrieves logs present at the time of execution. + +The `docker service logs` command can be used with either the name or ID of a +service, or with the ID of a task. If a service is passed, it will display logs +for all of the containers in that service. If a task is passed, it will only +display logs from that particular task. + +> **Note**: This command is only functional for services that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker service logs --follow` command will continue streaming the new output from +the service's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker service logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker service logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +service. + +The `--since` option shows only the service logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/docs/reference/commandline/service_ls.md b/docs/reference/commandline/service_ls.md new file mode 100644 index 0000000000..c222c04857 --- /dev/null +++ b/docs/reference/commandline/service_ls.md @@ -0,0 +1,164 @@ +--- +title: "service ls" +description: "The service ls command description and usage" +keywords: "service, ls" +--- + + + +# service ls + +```Markdown +Usage: docker service ls [OPTIONS] + +List services + +Aliases: + ls, list + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print services using a Go template + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +This command when run targeting a manager, lists services are running in the +swarm. + +## Examples + +On a manager node: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +c8wgl7q4ndfd frontend replicated 5/5 nginx:alpine +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +iwe3278osahj mongo global 7/7 mongo:3.3 +``` + +The `REPLICAS` column shows both the *actual* and *desired* number of tasks for +the service. + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](service_ls.md#id) +* [label](service_ls.md#label) +* [mode](service_ls.md#mode) +* [name](service_ls.md#name) + +#### id + +The `id` filter matches all or part of a service's id. + +```bash +$ docker service ls -f "id=0bcjw" +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +#### label + +The `label` filter matches services based on the presence of a `label` alone or +a `label` and a value. + +The following filter matches all services with a `project` label regardless of +its value: + +```bash +$ docker service ls --filter label=project +ID NAME MODE REPLICAS IMAGE +01sl1rp6nj5u frontend2 replicated 1/1 nginx:alpine +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +The following filter matches only services with the `project` label with the +`project-a` value. + +```bash +$ docker service ls --filter label=project=project-a +ID NAME MODE REPLICAS IMAGE +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +#### mode + +The `mode` filter matches on the mode (either `replicated` or `global`) of a service. + +The following filter matches only `global` services. + +```bash +$ docker service ls --filter mode=global +ID NAME MODE REPLICAS IMAGE +w7y0v2yrn620 top global 1/1 busybox +``` + +#### name + +The `name` filter matches on all or part of a service's name. + +The following filter matches services with a name containing `redis`. + +```bash +$ docker service ls --filter name=redis +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +### Formatting + +The formatting options (`--format`) pretty-prints services output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------|------------------------------------------------------------------------------------------ +`.ID` | Service ID +`.Name` | Service name +`.Mode` | Service mode (replicated, global) +`.Replicas` | Service replicas +`.Image` | Service image +`.Ports` | Service ports published in ingress mode + +When using the `--format` option, the `service ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID`, `Mode`, and `Replicas` entries separated by a colon for all services: + +```bash +$ docker service ls --format "{{.ID}}: {{.Mode}} {{.Replicas}}" + +0zmvwuiu3vue: replicated 10/10 +fm6uf97exkul: global 5/5 +``` + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/docs/reference/commandline/service_ps.md b/docs/reference/commandline/service_ps.md new file mode 100644 index 0000000000..51e8604c7e --- /dev/null +++ b/docs/reference/commandline/service_ps.md @@ -0,0 +1,194 @@ +--- +title: "service ps" +description: "The service ps command description and usage" +keywords: "service, tasks, ps" +aliases: ["/engine/reference/commandline/service_tasks/"] +--- + + + +# service ps + +```Markdown +Usage: docker service ps [OPTIONS] SERVICE [SERVICE...] + +List the tasks of one or more services + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print tasks using a Go template + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output + -q, --quiet Only display task IDs +``` + +## Description + +Lists the tasks that are running as part of the specified services. This command +has to be run targeting a manager node. + +## Examples + +### List the tasks that are part of a service + +The following command shows all the tasks that are part of the `redis` service: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.5 manager1 Running Running 8 seconds +bk658fpbex0d redis.2 redis:3.0.5 worker2 Running Running 9 seconds +5ls5s5fldaqg redis.3 redis:3.0.5 worker1 Running Running 9 seconds +8ryt076polmc redis.4 redis:3.0.5 worker1 Running Running 9 seconds +1x0v8yomsncd redis.5 redis:3.0.5 manager1 Running Running 8 seconds +71v7je3el7rr redis.6 redis:3.0.5 worker2 Running Running 9 seconds +4l3zm9b7tfr7 redis.7 redis:3.0.5 worker2 Running Running 9 seconds +9tfpyixiy2i7 redis.8 redis:3.0.5 worker1 Running Running 9 seconds +3w1wu13yupln redis.9 redis:3.0.5 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.5 manager1 Running Running 8 seconds +``` + +In addition to _running_ tasks, the output also shows the task history. For +example, after updating the service to use the `redis:3.0.6` image, the output +may look like this: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxk redis.1 redis:3.0.6 manager1 Running Running 6 seconds ago +ky2re9oz86r9 \_ redis.1 redis:3.0.5 manager1 Shutdown Shutdown 8 seconds ago +3s46te2nzl4i redis.2 redis:3.0.6 worker2 Running Running less than a second ago +nvjljf7rmor4 \_ redis.2 redis:3.0.6 worker2 Shutdown Rejected 23 seconds ago "No such image: redis@sha256:6…" +vtiuz2fpc0yb \_ redis.2 redis:3.0.5 worker2 Shutdown Shutdown 1 second ago +jnarweeha8x4 redis.3 redis:3.0.6 worker1 Running Running 3 seconds ago +vs448yca2nz4 \_ redis.3 redis:3.0.5 worker1 Shutdown Shutdown 4 seconds ago +jf1i992619ir redis.4 redis:3.0.6 worker1 Running Running 10 seconds ago +blkttv7zs8ee \_ redis.4 redis:3.0.5 worker1 Shutdown Shutdown 11 seconds ago +``` + +The number of items in the task history is determined by the +`--task-history-limit` option that was set when initializing the swarm. You can +change the task history retention limit using the +[`docker swarm update`](swarm_update.md) command. + +When deploying a service, docker resolves the digest for the service's +image, and pins the service to that digest. The digest is not shown by +default, but is printed if `--no-trunc` is used. The `--no-trunc` option +also shows the non-truncated task ID, and error-messages, as can be seen below; + +```bash +$ docker service ps --no-trunc redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxksi9w2a704wkp7 redis.1 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 manager1 Running Running 5 minutes ago +ky2re9oz86r9556i2szb8a8af \_ redis.1 redis:3.0.5@sha256:f8829e00d95672c48c60f468329d6693c4bdd28d1f057e755f8ba8b40008682e worker2 Shutdown Shutdown 5 minutes ago +bk658fpbex0d57cqcwoe3jthu redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Running Running 5 seconds +nvjljf7rmor4htv7l8rwcx7i7 \_ redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Shutdown Rejected 5 minutes ago "No such image: redis@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842" +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* [id](#id) +* [name](#name) +* [node](#node) +* [desired-state](#desired-state) + + +#### id + +The `id` filter matches on all or a prefix of a task's ID. + +```bash +$ docker service ps -f "id=8" redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +8ryt076polmc redis.4 redis:3.0.6 worker1 Running Running 9 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + +#### name + +The `name` filter matches on task names. + +```bash +$ docker service ps -f "name=redis.1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +qihejybwf1x5 redis.1 redis:3.0.6 manager1 Running Running 8 seconds +``` + + +#### node + +The `node` filter matches on a node name or a node ID. + +```bash +$ docker service ps -f "node=manager1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.6 manager1 Running Running 8 seconds +1x0v8yomsncd redis.5 redis:3.0.6 manager1 Running Running 8 seconds +3w1wu13yupln redis.9 redis:3.0.6 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`. + +### Formatting + +The formatting options (`--format`) pretty-prints tasks output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +----------------|------------------------------------------------------------------------------------------ +`.ID` | Task ID +`.Name` | Task name +`.Image` | Task image +`.Node` | Node ID +`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`) +`.CurrentState` | Current state of the task +`.Error` | Error +`.Ports` | Task published ports + +When using the `--format` option, the `service ps` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Image` entries separated by a colon for all tasks: + +```bash +$ docker service ps --format "{{.Name}}: {{.Image}}" top +top.1: busybox +top.2: busybox +top.3: busybox +``` + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service update](service_update.md) diff --git a/docs/reference/commandline/service_rm.md b/docs/reference/commandline/service_rm.md new file mode 100644 index 0000000000..448f2c3b24 --- /dev/null +++ b/docs/reference/commandline/service_rm.md @@ -0,0 +1,60 @@ +--- +title: "service rm" +description: "The service rm command description and usage" +keywords: "service, rm" +--- + + + +# service rm + +```Markdown +Usage: docker service rm SERVICE [SERVICE...] + +Remove one or more services + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +## Description + +Removes the specified services from the swarm. This command has to be run +targeting a manager node. + +## Examples + +Remove the `redis` service: + +```bash +$ docker service rm redis + +redis + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a running service. + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/docs/reference/commandline/service_scale.md b/docs/reference/commandline/service_scale.md new file mode 100644 index 0000000000..a3aef5fd34 --- /dev/null +++ b/docs/reference/commandline/service_scale.md @@ -0,0 +1,104 @@ +--- +title: "service scale" +description: "The service scale command description and usage" +keywords: "service, scale" +--- + + + +# service scale + +```markdown +Usage: docker service scale SERVICE=REPLICAS [SERVICE=REPLICAS...] + +Scale one or multiple replicated services + +Options: + --help Print usage +``` + +## Description + +The scale command enables you to scale one or more replicated services either up +or down to the desired number of replicas. This command cannot be applied on +services which are global mode. The command will return immediately, but the +actual scaling of the service may take some time. To stop all replicas of a +service while keeping the service active in the swarm you can set the scale to 0. + +## Examples + +### Scale a single service + +The following command scales the "frontend" service to 50 tasks. + +```bash +$ docker service scale frontend=50 + +frontend scaled to 50 +``` + +The following command tries to scale a global service to 10 tasks and returns an error. + +```bash +$ docker service create --mode global --name backend backend:latest + +b4g08uwuairexjub6ome6usqh + +$ docker service scale backend=10 + +backend: scale can only be used with replicated mode +``` + +Directly afterwards, run `docker service ls`, to see the actual number of +replicas. + +```bash +$ docker service ls --filter name=frontend + +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 15/50 nginx:alpine +``` + +You can also scale a service using the [`docker service update`](service_update.md) +command. The following commands are equivalent: + +```bash +$ docker service scale frontend=50 +$ docker service update --replicas=50 frontend +``` + +### Scale multiple services + +The `docker service scale` command allows you to set the desired number of +tasks for multiple services at once. The following example scales both the +backend and frontend services: + +```bash +$ docker service scale backend=3 frontend=5 + +backend scaled to 3 +frontend scaled to 5 + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/docs/reference/commandline/service_update.md b/docs/reference/commandline/service_update.md new file mode 100644 index 0000000000..93c5750eee --- /dev/null +++ b/docs/reference/commandline/service_update.md @@ -0,0 +1,265 @@ +--- +title: "service update" +description: "The service update command description and usage" +keywords: "service, update" +--- + + + +# service update + +```Markdown +Usage: docker service update [OPTIONS] SERVICE + +Update a service + +Options: + --args command Service command args + --constraint-add list Add or update a placement constraint + --constraint-rm list Remove a constraint + --container-label-add list Add or update a container label + --container-label-rm list Remove a container label by its key + -d, --detach Exit immediately instead of waiting for the service to converge (default true) + --dns-add list Add or update a custom DNS server + --dns-option-add list Add or update a DNS option + --dns-option-rm list Remove a DNS option + --dns-rm list Remove a custom DNS server + --dns-search-add list Add or update a custom DNS search domain + --dns-search-rm list Remove a DNS search domain + --endpoint-mode string Endpoint mode (vip or dnsrr) + --entrypoint command Overwrite the default ENTRYPOINT of the image + --env-add list Add or update an environment variable + --env-rm list Remove an environment variable + --force Force update even if no changes require it + --group-add list Add an additional supplementary user group to the container + --group-rm list Remove a previously added supplementary user group from the container + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-start-period duration Start period for the container to initialize before counting retries towards unstable (ms|s|m|h) + --health-timeout duration Maximum time to allow one check to run (ms|s|m|h) + --help Print usage + --host-add list Add or update a custom host-to-IP mapping (host:ip) + --host-rm list Remove a custom host-to-IP mapping (host:ip) + --hostname string Container hostname + --image string Service image tag + --label-add list Add or update a service label + --label-rm list Remove a label by its key + --limit-cpu decimal Limit CPUs + --limit-memory bytes Limit Memory + --log-driver string Logging driver for service + --log-opt list Logging driver options + --mount-add mount Add or update a mount on a service + --mount-rm list Remove a mount by its target path + --network-add list Add a network + --network-rm list Remove a network + --no-healthcheck Disable any container-specified HEALTHCHECK + --placement-pref-add pref Add a placement preference + --placement-pref-rm pref Remove a placement preference + --publish-add port Add or update a published port + --publish-rm port Remove a published port by its target port + -q, --quiet Suppress progress output + --read-only Mount the container's root filesystem as read only + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs + --reserve-memory bytes Reserve Memory + --restart-condition string Restart when condition is met ("none"|"on-failure"|"any") + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --rollback Rollback to previous specification + --rollback-delay duration Delay between task rollbacks (ns|us|ms|s|m|h) + --rollback-failure-action string Action on rollback failure ("pause"|"continue") + --rollback-max-failure-ratio float Failure rate to tolerate during a rollback + --rollback-monitor duration Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) + --rollback-order string Rollback order ("start-first"|"stop-first") (default "stop-first") + --rollback-parallelism uint Maximum number of tasks rolled back simultaneously (0 to roll back all at once) + --secret-add secret Add or update a secret on a service + --secret-rm list Remove a secret + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) + --stop-signal string Signal to stop the container + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) + --update-failure-action string Action on update failure ("pause"|"continue"|"rollback") + --update-max-failure-ratio float Failure rate to tolerate during an update + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) + --update-order string Update order ("start-first"|"stop-first") + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +## Description + +Updates a service as described by the specified parameters. This command has to be run targeting a manager node. +The parameters are the same as [`docker service create`](service_create.md). Please look at the description there +for further information. + +Normally, updating a service will only cause the service's tasks to be replaced with new ones if a change to the +service requires recreating the tasks for it to take effect. For example, only changing the +`--update-parallelism` setting will not recreate the tasks, because the individual tasks are not affected by this +setting. However, the `--force` flag will cause the tasks to be recreated anyway. This can be used to perform a +rolling restart without any changes to the service parameters. + +## Examples + +### Update a service + +```bash +$ docker service update --limit-cpu 2 redis +``` + +### Perform a rolling restart with no parameter changes + +```bash +$ docker service update --force --update-parallelism 1 --update-delay 30s redis +``` + +In this example, the `--force` flag causes the service's tasks to be shut down +and replaced with new ones even though none of the other parameters would +normally cause that to happen. The `--update-parallelism 1` setting ensures +that only one task is replaced at a time (this is the default behavior). The +`--update-delay 30s` setting introduces a 30 second delay between tasks, so +that the rolling restart happens gradually. + +### Add or remove mounts + +Use the `--mount-add` or `--mount-rm` options add or remove a service's bind-mounts +or volumes. + +The following example creates a service which mounts the `test-data` volume to +`/somewhere`. The next step updates the service to also mount the `other-volume` +volume to `/somewhere-else`volume, The last step unmounts the `/somewhere` mount +point, effectively removing the `test-data` volume. Each command returns the +service name. + +- The `--mount-add` flag takes the same parameters as the `--mount` flag on + `service create`. Refer to the [volumes and + bind-mounts](service_create.md#volumes-and-bind-mounts-mount) section in the + `service create` reference for details. + +- The `--mount-rm` flag takes the `target` path of the mount. + +```bash +$ docker service create \ + --name=myservice \ + --mount \ + type=volume,source=test-data,target=/somewhere \ + nginx:alpine \ + myservice + +myservice + +$ docker service update \ + --mount-add \ + type=volume,source=other-volume,target=/somewhere-else \ + myservice + +myservice + +$ docker service update --mount-rm /somewhere myservice + +myservice +``` + +### Rolling back to the previous version of a service + +Use the `--rollback` option to roll back to the previous version of the service. + +This will revert the service to the configuration that was in place before the most recent `docker service update` command. + +The following example updates the number of replicas for the service from 4 to 5, and then rolls back to the previous configuration. + +```bash +$ docker service update --replicas=5 web + +web + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +80bvrzp6vxf3 web replicated 0/5 nginx:alpine + +``` +Roll back the `web` service... + +```bash +$ docker service update --rollback web + +web + +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +80bvrzp6vxf3 web replicated 0/4 nginx:alpine + +``` + +Other options can be combined with `--rollback` as well, for example, `--update-delay 0s` to execute the rollback without a delay between tasks: + +```bash +$ docker service update \ + --rollback \ + --update-delay 0s + web + +web + +``` + +Services can also be set up to roll back to the previous version automatically +when an update fails. To set up a service for automatic rollback, use +`--update-failure-action=rollback`. A rollback will be triggered if the fraction +of the tasks which failed to update successfully exceeds the value given with +`--update-max-failure-ratio`. + +The rate, parallelism, and other parameters of a rollback operation are +determined by the values passed with the following flags: + +- `--rollback-delay` +- `--rollback-failure-action` +- `--rollback-max-failure-ratio` +- `--rollback-monitor` +- `--rollback-parallelism` + +For example, a service set up with `--update-parallelism 1 --rollback-parallelism 3` +will update one task at a time during a normal update, but during a rollback, 3 +tasks at a time will get rolled back. These rollback parameters are respected both +during automatic rollbacks and for rollbacks initiated manually using `--rollback`. + +### Add or remove secrets + +Use the `--secret-add` or `--secret-rm` options add or remove a service's +secrets. + +The following example adds a secret named `ssh-2` and removes `ssh-1`: + +```bash +$ docker service update \ + --secret-add source=ssh-2,target=ssh-2 \ + --secret-rm ssh-1 \ + myservice +``` + +### Update services using templates + +Some flags of `service update` support the use of templating. +See [`service create`](./service_create.md#templating) for the reference. + +## Related commands + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service ps](service_ps.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) diff --git a/docs/reference/commandline/stack.md b/docs/reference/commandline/stack.md new file mode 100644 index 0000000000..94e3e252f9 --- /dev/null +++ b/docs/reference/commandline/stack.md @@ -0,0 +1,39 @@ +--- +title: "stack" +description: "The stack command description and usage" +keywords: "stack" +--- + + + +# stack + +```markdown +Usage: docker stack COMMAND + +Manage Docker stacks + +Options: + --help Print usage + +Commands: + deploy Deploy a new stack or update an existing stack + ls List stacks + ps List the tasks in the stack + rm Remove the stack + services List the services in the stack + +Run 'docker stack COMMAND --help' for more information on a command. +``` + +## Description + +Manage stacks. + diff --git a/docs/reference/commandline/stack_deploy.md b/docs/reference/commandline/stack_deploy.md new file mode 100644 index 0000000000..d57ef0f76c --- /dev/null +++ b/docs/reference/commandline/stack_deploy.md @@ -0,0 +1,107 @@ +--- +title: "stack deploy" +description: "The stack deploy command description and usage" +keywords: "stack, deploy, up" +--- + + + +# stack deploy + +```markdown +Usage: docker stack deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + -c, --compose-file string Path to a Compose file + --help Print usage + --prune Prune services that are no longer referenced + --with-registry-auth Send registry authentication details to Swarm agents +``` + +## Description + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Examples + +### Compose file + +The `deploy` command supports compose file version `3.0` and above." + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility + +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +### DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility + +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls + +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related commands + +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/docs/reference/commandline/stack_ls.md b/docs/reference/commandline/stack_ls.md new file mode 100644 index 0000000000..91349b6947 --- /dev/null +++ b/docs/reference/commandline/stack_ls.md @@ -0,0 +1,76 @@ +--- +title: "stack ls" +description: "The stack ls command description and usage" +keywords: "stack, ls" +--- + + + +# stack ls + +```markdown +Usage: docker stack ls + +List stacks + +Aliases: + ls, list + +Options: + --help Print usage + --format string Pretty-print stacks using a Go template +``` + +## Description + +Lists the stacks. + +## Examples + +The following command shows all stacks and some additional information: + +```bash +$ docker stack ls + +ID SERVICES +vossibility-stack 6 +myapp 2 +``` + +### Formatting + +The formatting option (`--format`) pretty-prints stacks using a Go template. + +Valid placeholders for the Go template are listed below: + +| Placeholder | Description | +| ----------- | ------------------ | +| `.Name` | Stack name | +| `.Services` | Number of services | + +When using the `--format` option, the `stack ls` command either outputs +the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Services` entries separated by a colon for all stacks: + +```bash +$ docker stack ls --format "{{.Name}}: {{.Services}}" +web-server: 1 +web-cache: 4 +``` + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/docs/reference/commandline/stack_ps.md b/docs/reference/commandline/stack_ps.md new file mode 100644 index 0000000000..901b46b22d --- /dev/null +++ b/docs/reference/commandline/stack_ps.md @@ -0,0 +1,230 @@ +--- +title: "stack ps" +description: "The stack ps command description and usage" +keywords: "stack, ps" +--- + + + +# stack ps + +```markdown +Usage: docker stack ps [OPTIONS] STACK + +List the tasks in the stack + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print tasks using a Go template + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output + -q, --quiet Only display task IDs +``` + +## Description + +Lists the tasks that are running as part of the specified stack. This +command has to be run targeting a manager node. + +## Examples + +### List the tasks that are part of a stack + +The following command shows all the tasks that are part of the `voting` stack: + +```bash +$ docker stack ps voting +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +xim5bcqtgk1b voting_worker.1 dockersamples/examplevotingapp_worker:latest node2 Running Running 2 minutes ago +q7yik0ks1in6 voting_result.1 dockersamples/examplevotingapp_result:before node1 Running Running 2 minutes ago +rx5yo0866nfx voting_vote.1 dockersamples/examplevotingapp_vote:before node3 Running Running 2 minutes ago +tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 2 minutes ago +w48spazhbmxc voting_redis.1 redis:alpine node2 Running Running 3 minutes ago +6jj1m02freg1 voting_visualizer.1 dockersamples/visualizer:stable node1 Running Running 2 minutes ago +kqgdmededccb voting_vote.2 dockersamples/examplevotingapp_vote:before node2 Running Running 2 minutes ago +t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 3 minutes ago +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* [id](#id) +* [name](#name) +* [node](#node) +* [desired-state](#desired-state) + +#### id + +The `id` filter matches on all or a prefix of a task's ID. + +```bash +$ docker stack ps -f "id=t" voting +ID NAME IMAGE NODE DESIRED STATE CURRENTSTATE ERROR PORTS +tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 14 minutes ago +t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 14 minutes ago +``` + +#### name + +The `name` filter matches on task names. + +```bash +$ docker stack ps -f "name=voting_redis" voting +ID NAME IMAGE NODE DESIRED STATE CURRENTSTATE ERROR PORTS +w48spazhbmxc voting_redis.1 redis:alpine node2 Running Running 17 minutes ago +t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 17 minutes ago +``` + +#### node + +The `node` filter matches on a node name or a node ID. + +```bash +$ docker stack ps -f "node=node1" voting +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +q7yik0ks1in6 voting_result.1 dockersamples/examplevotingapp_result:before node1 Running Running 18 minutes ago +tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 18 minutes ago +6jj1m02freg1 voting_visualizer.1 dockersamples/visualizer:stable node1 Running Running 18 minutes ago +``` + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, or `accepted`. + +```bash +$ docker stack ps -f "desired-state=running" voting +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +xim5bcqtgk1b voting_worker.1 dockersamples/examplevotingapp_worker:latest node2 Running Running 21 minutes ago +q7yik0ks1in6 voting_result.1 dockersamples/examplevotingapp_result:before node1 Running Running 21 minutes ago +rx5yo0866nfx voting_vote.1 dockersamples/examplevotingapp_vote:before node3 Running Running 21 minutes ago +tz6j82jnwrx7 voting_db.1 postgres:9.4 node1 Running Running 21 minutes ago +w48spazhbmxc voting_redis.1 redis:alpine node2 Running Running 21 minutes ago +6jj1m02freg1 voting_visualizer.1 dockersamples/visualizer:stable node1 Running Running 21 minutes ago +kqgdmededccb voting_vote.2 dockersamples/examplevotingapp_vote:before node2 Running Running 21 minutes ago +t72q3z038jeh voting_redis.2 redis:alpine node3 Running Running 21 minutes ago +``` + +### Formatting + +The formatting options (`--format`) pretty-prints tasks output using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +----------------|------------------------------------------------------------------------------------------ +`.ID` | Task ID +`.Name` | Task name +`.Image` | Task image +`.Node` | Node ID +`.DesiredState` | Desired state of the task (`running`, `shutdown`, or `accepted`) +`.CurrentState` | Current state of the task +`.Error` | Error +`.Ports` | Task published ports + +When using the `--format` option, the `stack ps` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Image` entries separated by a colon for all tasks: + +```bash +$ docker stack ps --format "{{.Name}}: {{.Image}}" voting +voting_worker.1: dockersamples/examplevotingapp_worker:latest +voting_result.1: dockersamples/examplevotingapp_result:before +voting_vote.1: dockersamples/examplevotingapp_vote:before +voting_db.1: postgres:9.4 +voting_redis.1: redis:alpine +voting_visualizer.1: dockersamples/visualizer:stable +voting_vote.2: dockersamples/examplevotingapp_vote:before +voting_redis.2: redis:alpine +``` + +### Do not map IDs to Names + +The `--no-resolve` option shows IDs for task name, without mapping IDs to Names. + +```bash +$ docker stack ps --no-resolve voting +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +xim5bcqtgk1b 10z9fjfqzsxnezo4hb81p8mqg.1 dockersamples/examplevotingapp_worker:latest qaqt4nrzo775jrx6detglho01 Running Running 30 minutes ago +q7yik0ks1in6 hbxltua1na7mgqjnidldv5m65.1 dockersamples/examplevotingapp_result:before mxpaef1tlh23s052erw88a4w5 Running Running 30 minutes ago +rx5yo0866nfx qyprtqw1g5nrki557i974ou1d.1 dockersamples/examplevotingapp_vote:before kanqcxfajd1r16wlnqcblobmm Running Running 31 minutes ago +tz6j82jnwrx7 122f0xxngg17z52be7xspa72x.1 postgres:9.4 mxpaef1tlh23s052erw88a4w5 Running Running 31 minutes ago +w48spazhbmxc tg61x8myx563ueo3urmn1ic6m.1 redis:alpine qaqt4nrzo775jrx6detglho01 Running Running 31 minutes ago +6jj1m02freg1 8cqlyi444kzd3panjb7edh26v.1 dockersamples/visualizer:stable mxpaef1tlh23s052erw88a4w5 Running Running 31 minutes ago +kqgdmededccb qyprtqw1g5nrki557i974ou1d.2 dockersamples/examplevotingapp_vote:before qaqt4nrzo775jrx6detglho01 Running Running 31 minutes ago +t72q3z038jeh tg61x8myx563ueo3urmn1ic6m.2 redis:alpine kanqcxfajd1r16wlnqcblobmm Running Running 31 minutes ago +``` + +### Do not truncate output + +When deploying a service, docker resolves the digest for the service's +image, and pins the service to that digest. The digest is not shown by +default, but is printed if `--no-trunc` is used. The `--no-trunc` option +also shows the non-truncated task IDs, and error-messages, as can be seen below: + +```bash +$ docker stack ps --no-trunc voting +ID NAME IMAGE NODE DESIRED STATE CURREN STATE ERROR PORTS +xim5bcqtgk1bxqz91jzo4a1s5 voting_worker.1 dockersamples/examplevotingapp_worker:latest@sha256:3e4ddf59c15f432280a2c0679c4fc5a2ee5a797023c8ef0d3baf7b1385e9fed node2 Running Runnin 32 minutes ago +q7yik0ks1in6kv32gg6y6yjf7 voting_result.1 dockersamples/examplevotingapp_result:before@sha256:83b56996e930c292a6ae5187fda84dd6568a19d97cdb933720be15c757b7463 node1 Running Runnin 32 minutes ago +rx5yo0866nfxc58zf4irsss6n voting_vote.1 dockersamples/examplevotingapp_vote:before@sha256:8e64b182c87de902f2b72321c89b4af4e2b942d76d0b772532ff27ec4c6ebf6 node3 Running Runnin 32 minutes ago +tz6j82jnwrx7n2offljp3mn03 voting_db.1 postgres:9.4@sha256:6046af499eae34d2074c0b53f9a8b404716d415e4a03e68bc1d2f8064f2b027 node1 Running Runnin 32 minutes ago +w48spazhbmxcmbjfi54gs7x90 voting_redis.1 redis:alpine@sha256:9cd405cd1ec1410eaab064a1383d0d8854d1ef74a54e1e4a92fb4ec7bdc3ee7 node2 Running Runnin 32 minutes ago +6jj1m02freg1n3z9n1evrzsbl voting_visualizer.1 dockersamples/visualizer:stable@sha256:f924ad66c8e94b10baaf7bdb9cd491ef4e982a1d048a56a17e02bf5945401e5 node1 Running Runnin 32 minutes ago +kqgdmededccbhz2wuc0e9hx7g voting_vote.2 dockersamples/examplevotingapp_vote:before@sha256:8e64b182c87de902f2b72321c89b4af4e2b942d76d0b772532ff27ec4c6ebf6 node2 Running Runnin 32 minutes ago +t72q3z038jehe1wbh9gdum076 voting_redis.2 redis:alpine@sha256:9cd405cd1ec1410eaab064a1383d0d8854d1ef74a54e1e4a92fb4ec7bdc3ee7 node3 Running Runnin 32 minutes ago +``` + +### Only display task IDs + +The `-q ` or `--quiet` option only shows IDs of the tasks in the stack. +This example outputs all task IDs of the "voting" stack; + +```bash +$ docker stack ps -q voting +xim5bcqtgk1b +q7yik0ks1in6 +rx5yo0866nfx +tz6j82jnwrx7 +w48spazhbmxc +6jj1m02freg1 +kqgdmededccb +t72q3z038jeh +``` + +This option can be used to perform batch operations. For example, you can use +the task IDs as input for other commands, such as `docker inspect`. The +following example inspects all tasks of the "voting" stack; + +```bash +$ docker inspect $(docker stack ps -q voting) + +[ + { + "ID": "xim5bcqtgk1b1gk0krq1", + "Version": { +(...) +``` + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/docs/reference/commandline/stack_rm.md b/docs/reference/commandline/stack_rm.md new file mode 100644 index 0000000000..a1854ae6f0 --- /dev/null +++ b/docs/reference/commandline/stack_rm.md @@ -0,0 +1,78 @@ +--- +title: "stack rm" +description: "The stack rm command description and usage" +keywords: "stack, rm, remove, down" +--- + + + +# stack rm + +```markdown +Usage: docker stack rm STACK [STACK...] + +Remove one or more stacks + +Aliases: + rm, remove, down + +Options: + --help Print usage +``` + +## Description + +Remove the stack from the swarm. This command has to be run targeting +a manager node. + +## Examples + +### Remove a stack + +This will remove the stack with the name `myapp`. Services, networks, and secrets associated with the stack will be removed. + +```bash +$ docker stack rm myapp + +Removing service myapp_redis +Removing service myapp_web +Removing service myapp_lb +Removing network myapp_default +Removing network myapp_frontend +``` + +### Remove multiple stacks + +This will remove all the specified stacks, `myapp` and `vossibility`. Services, networks, and secrets associated with all the specified stacks will be removed. + +```bash +$ docker stack rm myapp vossibility + +Removing service myapp_redis +Removing service myapp_web +Removing service myapp_lb +Removing network myapp_default +Removing network myapp_frontend +Removing service vossibility_nsqd +Removing service vossibility_logstash +Removing service vossibility_elasticsearch +Removing service vossibility_kibana +Removing service vossibility_ghollector +Removing service vossibility_lookupd +Removing network vossibility_default +Removing network vossibility_vossibility +``` + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack services](stack_services.md) diff --git a/docs/reference/commandline/stack_services.md b/docs/reference/commandline/stack_services.md new file mode 100644 index 0000000000..b45047d409 --- /dev/null +++ b/docs/reference/commandline/stack_services.md @@ -0,0 +1,105 @@ +--- +title: "stack services" +description: "The stack services command description and usage" +keywords: "stack, services" +advisory: "experimental" +--- + + + +# stack services (experimental) + +```markdown +Usage: docker stack services [OPTIONS] STACK + +List the services in the stack + +Options: + -f, --filter filter Filter output based on conditions provided + --format string Pretty-print services using a Go template + --help Print usage + -q, --quiet Only display IDs +``` + +## Description + +Lists the services that are running as part of the specified stack. This +command has to be run targeting a manager node. + +## Examples + +The following command shows all services in the `myapp` stack: + +```bash +$ docker stack services myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. + +The following command shows both the `web` and `db` services: + +```bash +$ docker stack services --filter name=myapp_web --filter name=myapp_db myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +The currently supported filters are: + +* id / ID (`--filter id=7be5ei6sqeye`, or `--filter ID=7be5ei6sqeye`) +* name (`--filter name=myapp_web`) +* label (`--filter label=key=value`) + +### Formatting + +The formatting options (`--format`) pretty-prints services output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------|------------------------------------------------------------------------------------------ +`.ID` | Service ID +`.Name` | Service name +`.Mode` | Service mode (replicated, global) +`.Replicas` | Service replicas +`.Image` | Service image + +When using the `--format` option, the `stack services` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID`, `Mode`, and `Replicas` entries separated by a colon for all services: + +```bash +$ docker stack services --format "{{.ID}}: {{.Mode}} {{.Replicas}}" + +0zmvwuiu3vue: replicated 10/10 +fm6uf97exkul: global 5/5 +``` + + +## Related commands + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) diff --git a/docs/reference/commandline/start.md b/docs/reference/commandline/start.md new file mode 100644 index 0000000000..aa672289ec --- /dev/null +++ b/docs/reference/commandline/start.md @@ -0,0 +1,34 @@ +--- +title: "start" +description: "The start command description and usage" +keywords: "Start, container, stopped" +--- + + + +# start + +```markdown +Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] + +Start one or more stopped containers + +Options: + -a, --attach Attach STDOUT/STDERR and forward signals + --detach-keys string Override the key sequence for detaching a container + --help Print usage + -i, --interactive Attach container's STDIN +``` + +## Examples + +```bash +$ docker start my_container +``` diff --git a/docs/reference/commandline/stats.md b/docs/reference/commandline/stats.md new file mode 100644 index 0000000000..f5c058524d --- /dev/null +++ b/docs/reference/commandline/stats.md @@ -0,0 +1,140 @@ +--- +title: "stats" +description: "The stats command description and usage" +keywords: "container, resource, statistics" +--- + + + +# stats + +```markdown +Usage: docker stats [OPTIONS] [CONTAINER...] + +Display a live stream of container(s) resource usage statistics + +Options: + -a, --all Show all containers (default shows just running) + --format string Pretty-print images using a Go template + --help Print usage + --no-stream Disable streaming stats and only pull the first result +``` + +## Description + +The `docker stats` command returns a live data stream for running containers. To limit data to one or more specific containers, specify a list of container names or ids separated by a space. You can specify a stopped container but stopped containers do not return any data. + +If you want more detailed information about a container's resource usage, use the `/containers/(id)/stats` API endpoint. + +## Examples + +Running `docker stats` on all running containers against a Linux daemon. + +```bash +$ docker stats +CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O +1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB +9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B +d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B +``` + +Running `docker stats` on multiple containers by name and id against a Linux daemon. + +```bash +$ docker stats fervent_panini 5acfcb1b4fd1 +CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O +5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B +fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B +``` + +Running `docker stats` with customized format on all (Running and Stopped) containers. + +```bash +$ docker stats --all --format "table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}" +CONTAINER ID NAME CPU % MEM USAGE / LIMIT +c9dfa83f0317f87637d5b7e67aa4223337d947215c5a9947e697e4f7d3e0f834 ecstatic_noether 0.00% 56KiB / 15.57GiB +8f92d01cf3b29b4f5fca4cd33d907e05def7af5a3684711b20a2369d211ec67f stoic_goodall 0.07% 32.86MiB / 15.57GiB +38dd23dba00f307d53d040c1d18a91361bbdcccbf592315927d56cf13d8b7343 drunk_visvesvaraya 0.00% 0B / 0B +5a8b07ec4cc52823f3cbfdb964018623c1ba307bce2c057ccdbde5f4f6990833 big_heisenberg 0.00% 0B / 0B +``` + +`drunk_visvesvaraya` and `big_heisenberg` are stopped containers in the above example. + +Running `docker stats` on all running containers against a Windows daemon. + +```powershell +PS E:\> docker stats +CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O +09d3bb5b1604 6.61% 38.21 MiB 17.1 kB / 7.73 kB 10.7 MB / 3.57 MB +9db7aa4d986d 9.19% 38.26 MiB 15.2 kB / 7.65 kB 10.6 MB / 3.3 MB +3f214c61ad1d 0.00% 28.64 MiB 64 kB / 6.84 kB 4.42 MB / 6.93 MB +``` + +Running `docker stats` on multiple containers by name and id against a Windows daemon. + +```powershell +PS E:\> docker ps -a +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +3f214c61ad1d nanoserver "cmd" 2 minutes ago Up 2 minutes big_minsky +9db7aa4d986d windowsservercore "cmd" 2 minutes ago Up 2 minutes mad_wilson +09d3bb5b1604 windowsservercore "cmd" 2 minutes ago Up 2 minutes affectionate_easley + +PS E:\> docker stats 3f214c61ad1d mad_wilson +CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O +3f214c61ad1d 0.00% 46.25 MiB 76.3 kB / 7.92 kB 10.3 MB / 14.7 MB +mad_wilson 9.59% 40.09 MiB 27.6 kB / 8.81 kB 17 MB / 20.1 MB +``` + +### Formatting + +The formatting option (`--format`) pretty prints container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------ | -------------------------------------------- +`.Container` | Container name or ID (user input) +`.Name` | Container name +`.ID` | Container ID +`.CPUPerc` | CPU percentage +`.MemUsage` | Memory usage +`.NetIO` | Network IO +`.BlockIO` | Block IO +`.MemPerc` | Memory percentage (Not available on Windows) +`.PIDs` | Number of PIDs (Not available on Windows) + + +When using the `--format` option, the `stats` command either +outputs the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Container` and `CPUPerc` entries separated by a colon for all images: + +```bash +$ docker stats --format "{{.Container}}: {{.CPUPerc}}" + +09d3bb5b1604: 6.61% +9db7aa4d986d: 9.19% +3f214c61ad1d: 0.00% +``` + +To list all containers statistics with their name, CPU percentage and memory +usage in a table format you can use: + +```bash +$ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" + +CONTAINER CPU % PRIV WORKING SET +1285939c1fd3 0.07% 796 KiB / 64 MiB +9c76f7834ae2 0.07% 2.746 MiB / 64 MiB +d1ea048f04e4 0.03% 4.583 MiB / 64 MiB +``` diff --git a/docs/reference/commandline/stop.md b/docs/reference/commandline/stop.md new file mode 100644 index 0000000000..dc00b38af5 --- /dev/null +++ b/docs/reference/commandline/stop.md @@ -0,0 +1,37 @@ +--- +title: "stop" +description: "The stop command description and usage" +keywords: "stop, SIGKILL, SIGTERM" +--- + + + +# stop + +```markdown +Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] + +Stop one or more running containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing it (default 10) +``` + +## Description + +The main process inside the container will receive `SIGTERM`, and after a grace +period, `SIGKILL`. + +## Examples + +```bash +$ docker stop my_container +``` diff --git a/docs/reference/commandline/swarm.md b/docs/reference/commandline/swarm.md new file mode 100644 index 0000000000..e8a8224f84 --- /dev/null +++ b/docs/reference/commandline/swarm.md @@ -0,0 +1,41 @@ +--- +title: "swarm" +description: "The swarm command description and usage" +keywords: "swarm" +--- + + + +# swarm + +```markdown +Usage: docker swarm COMMAND + +Manage Swarm + +Options: + --help Print usage + +Commands: + ca Manage root CA + init Initialize a swarm + join Join a swarm as a node and/or manager + join-token Manage join tokens + leave Leave the swarm + unlock Unlock swarm + unlock-key Manage the unlock key + update Update the swarm + +Run 'docker swarm COMMAND --help' for more information on a command. +``` + +## Description + +Manage the swarm. diff --git a/docs/reference/commandline/swarm_ca.md b/docs/reference/commandline/swarm_ca.md new file mode 100644 index 0000000000..4a9010c896 --- /dev/null +++ b/docs/reference/commandline/swarm_ca.md @@ -0,0 +1,122 @@ +--- +title: "swarm ca" +description: "The swarm ca command description and usage" +keywords: "swarm, ca" +--- + + + +# swarm ca + +```markdown +Usage: docker swarm ca [OPTIONS] + +Manage root CA + +Options: + --ca-cert pem-file Path to the PEM-formatted root CA certificate to use for the new cluster + --ca-key pem-file Path to the PEM-formatted root CA key to use for the new cluster + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + -d, --detach Exit immediately instead of waiting for the root rotation to converge + --external-ca external-ca Specifications of one or more certificate signing endpoints + --help Print usage + -q, --quiet Suppress progress output + --rotate Rotate the swarm CA - if no certificate or key are provided, new ones will be generated +``` + +## Description + +View or rotate the current swarm CA certificate. This command must target a manager node. + +## Examples + +Run the `docker swarm ca` command without any options to view the current root CA certificate +in PEM format. + +```bash +$ docker swarm ca +-----BEGIN CERTIFICATE----- +MIIBazCCARCgAwIBAgIUJPzo67QC7g8Ebg2ansjkZ8CbmaswCgYIKoZIzj0EAwIw +EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTAzMTcxMDAwWhcNMzcwNDI4MTcx +MDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABKL6/C0sihYEb935wVPRA8MqzPLn3jzou0OJRXHsCLcVExigrMdgmLCC+Va4 ++sJ+SLVO1eQbvLHH8uuDdF/QOU6jQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBSfUy5bjUnBAx/B0GkOBKp91XvxzjAKBggqhkjO +PQQDAgNJADBGAiEAnbvh0puOS5R/qvy1PMHY1iksYKh2acsGLtL/jAIvO4ACIQCi +lIwQqLkJ48SQqCjG1DBTSBsHmMSRT+6mE2My+Z3GKA== +-----END CERTIFICATE----- +``` + +Pass the `--rotate` flag (and optionally a `--ca-cert`, along with a `--ca-key` or +`--external-ca` parameter flag), in order to rotate the current swarm root CA. + +``` +$ docker swarm ca --rotate +desired root digest: sha256:05da740cf2577a25224c53019e2cce99bcc5ba09664ad6bb2a9425d9ebd1b53e + rotated TLS certificates: [=========================> ] 1/2 nodes + rotated CA certificates: [> ] 0/2 nodes +``` + +Once the rotation os finished (all the progress bars have completed) the now-current +CA certificate will be printed: + +``` +$ docker swarm ca --rotate +desired root digest: sha256:05da740cf2577a25224c53019e2cce99bcc5ba09664ad6bb2a9425d9ebd1b53e + rotated TLS certificates: [==================================================>] 2/2 nodes + rotated CA certificates: [==================================================>] 2/2 nodes +-----BEGIN CERTIFICATE----- +MIIBazCCARCgAwIBAgIUFynG04h5Rrl4lKyA4/E65tYKg8IwCgYIKoZIzj0EAwIw +EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNTE2MDAxMDAwWhcNMzcwNTExMDAx +MDAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABC2DuNrIETP7C7lfiEPk39tWaaU0I2RumUP4fX4+3m+87j0DU0CsemUaaOG6 ++PxHhGu2VXQ4c9pctPHgf7vWeVajQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBSEL02z6mCI3SmMDmITMr12qCRY2jAKBggqhkjO +PQQDAgNJADBGAiEA263Eb52+825EeNQZM0AME+aoH1319Zp9/J5ijILW+6ACIQCg +gyg5u9Iliel99l7SuMhNeLkrU7fXs+Of1nTyyM73ig== +-----END CERTIFICATE----- +``` + +### `--rotate` + +Root CA Rotation is recommended if one or more of the swarm managers have been +compromised, so that those managers can no longer connect to or be trusted by +any other node in the cluster. + +Alternately, root CA rotation can be used to give control of the swarm CA +to an external CA, or to take control back from an external CA. + +The `--rotate` flag does not require any parameters to do a rotation, but you can +optionally specify a certificate and key, or a certificate and external CA URL, +and those will be used instead of an automatically-generated certificate/key pair. + +Because the root CA key should be kept secret, if provided it will not be visible +when viewing swarm any information via the CLI or API. + +The root CA rotation will not be completed until all registered nodes have +rotated their TLS certificates. If the rotation is not completing within a +reasonable amount of time, try running +`docker node ls --format {{.ID}} {{.Hostname}} {{.Status}} {{.TLSStatus}}` to +see if any nodes are down or otherwise unable to rotate TLS certificates. + + +### `--detach` + +Initiate the root CA rotation, but do not wait for the completion of or display the +progress of the rotation. + +## Related commands + +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) diff --git a/docs/reference/commandline/swarm_init.md b/docs/reference/commandline/swarm_init.md new file mode 100644 index 0000000000..f011758a3a --- /dev/null +++ b/docs/reference/commandline/swarm_init.md @@ -0,0 +1,168 @@ +--- +title: "swarm init" +description: "The swarm init command description and usage" +keywords: "swarm, init" +--- + + + +# swarm init + +```markdown +Usage: docker swarm init [OPTIONS] + +Initialize a swarm + +Options: + --advertise-addr string Advertised address (format: [:port]) + --autolock Enable manager autolocking (requiring an unlock key to start a stopped manager) + --availability string Availability of the node ("active"|"pause"|"drain") (default "active") + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --data-path-addr string Address or interface to use for data path traffic (format: ) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --force-new-cluster Force create a new cluster from current state + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +## Description + +Initialize a swarm. The docker engine targeted by this command becomes a manager +in the newly created single-node swarm. + +## Examples + +```bash +$ docker swarm init --advertise-addr 192.168.99.121 +Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. +``` + +`docker swarm init` generates two random tokens, a worker token and a manager token. When you join +a new node to the swarm, the node joins as a worker or manager node based upon the token you pass +to [swarm join](swarm_join.md). + +After you create the swarm, you can display or rotate the token using +[swarm join-token](swarm_join_token.md). + +### `--autolock` + +This flag enables automatic locking of managers with an encryption key. The +private keys and data stored by all managers will be protected by the +encryption key printed in the output, and will not be accessible without it. +Thus, it is very important to store this key in order to activate a manager +after it restarts. The key can be passed to `docker swarm unlock` to reactivate +the manager. Autolock can be disabled by running +`docker swarm update --autolock=false`. After disabling it, the encryption key +is no longer required to start the manager, and it will start up on its own +without user intervention. + +### `--cert-expiry` + +This flag sets the validity period for node certificates. + +### `--dispatcher-heartbeat` + +This flag sets the frequency with which nodes are told to use as a +period to report their health. + +### `--external-ca` + +This flag sets up the swarm to use an external CA to issue node certificates. The value takes +the form `protocol=X,url=Y`. The value for `protocol` specifies what protocol should be used +to send signing requests to the external CA. Currently, the only supported value is `cfssl`. +The URL specifies the endpoint where signing requests should be submitted. + +### `--force-new-cluster` + +This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data. + +### `--listen-addr` + +The node listens for inbound swarm manager traffic on this address. The default is to listen on +0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface's +address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--advertise-addr` + +This flag specifies the address that will be advertised to other members of the +swarm for API access and overlay networking. If unspecified, Docker will check +if the system has a single IP address, and use that IP address with the +listening port (see `--listen-addr`). If the system has multiple IP addresses, +`--advertise-addr` must be specified so that the correct address is chosen for +inter-manager communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--data-path-addr` + +This flag specifies the address that global scope network drivers will publish towards +other nodes in order to reach the containers running on this node. +Using this parameter it is then possible to separate the container's data traffic from the +management traffic of the cluster. +If unspecified, Docker will use the same IP address or interface that is used for the +advertise address. + +### `--task-history-limit` + +This flag sets up task history retention limit. + +### `--max-snapshots` + +This flag sets the number of old Raft snapshots to retain in addition to the +current Raft snapshots. By default, no old snapshots are retained. This option +may be used for debugging, or to store old snapshots of the swarm state for +disaster recovery purposes. + +### `--snapshot-interval` + +This flag specifies how many log entries to allow in between Raft snapshots. +Setting this to a higher number will trigger snapshots less frequently. +Snapshots compact the Raft log and allow for more efficient transfer of the +state to new managers. However, there is a performance cost to taking snapshots +frequently. + +### `--availability` + +This flag specifies the availability of the node at the time the node joins a master. +Possible availability values are `active`, `pause`, or `drain`. + +This flag is useful in certain situations. For example, a cluster may want to have +dedicated manager nodes that are not served as worker nodes. This could be achieved +by passing `--availability=drain` to `docker swarm init`. + + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/docs/reference/commandline/swarm_join.md b/docs/reference/commandline/swarm_join.md new file mode 100644 index 0000000000..8d2dfe6d00 --- /dev/null +++ b/docs/reference/commandline/swarm_join.md @@ -0,0 +1,130 @@ +--- +title: "swarm join" +description: "The swarm join command description and usage" +keywords: "swarm, join" +--- + + + +# swarm join + +```markdown +Usage: docker swarm join [OPTIONS] HOST:PORT + +Join a swarm as a node and/or manager + +Options: + --advertise-addr string Advertised address (format: [:port]) + --availability string Availability of the node ("active"|"pause"|"drain") (default "active") + --data-path-addr string Address or interface to use for data path traffic (format: ) + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --token string Token for entry into the swarm +``` + +## Description + +Join a node to a swarm. The node joins as a manager node or worker node based upon the token you +pass with the `--token` flag. If you pass a manager token, the node joins as a manager. If you +pass a worker token, the node joins as a worker. + +## Examples + +### Join a node to swarm as a manager + +The example below demonstrates joining a manager node using a manager token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 192.168.99.121:2377 +This node joined a swarm as a manager. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +dkp8vy1dq1kxleu9g4u78tlag * manager2 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 manager1 Ready Active Leader +``` + +A cluster should only have 3-7 managers at most, because a majority of managers must be available +for the cluster to function. Nodes that aren't meant to participate in this management quorum +should join as workers instead. Managers should be stable hosts that have static IP addresses. + +### Join a node to swarm as a worker + +The example below demonstrates joining a worker node using a worker token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 192.168.99.121:2377 +This node joined a swarm as a worker. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +### `--listen-addr value` + +If the node is a manager, it will listen for inbound swarm manager traffic on this +address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a +network interface to listen on that interface's address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--advertise-addr value` + +This flag specifies the address that will be advertised to other members of the +swarm for API access. If unspecified, Docker will check if the system has a +single IP address, and use that IP address with the listening port (see +`--listen-addr`). If the system has multiple IP addresses, `--advertise-addr` +must be specified so that the correct address is chosen for inter-manager +communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--data-path-addr` + +This flag specifies the address that global scope network drivers will publish towards +other nodes in order to reach the containers running on this node. +Using this parameter it is then possible to separate the container's data traffic from the +management traffic of the cluster. +If unspecified, Docker will use the same IP address or interface that is used for the +advertise address. + +### `--token string` + +Secret value required for nodes to join the swarm + +### `--availability` + +This flag specifies the availability of the node at the time the node joins a master. +Possible availability values are `active`, `pause`, or `drain`. + +This flag is useful in certain situations. For example, a cluster may want to have +dedicated manager nodes that are not served as worker nodes. This could be achieved +by passing `--availability=drain` to `docker swarm join`. + + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/docs/reference/commandline/swarm_join_token.md b/docs/reference/commandline/swarm_join_token.md new file mode 100644 index 0000000000..44d9cf3ec7 --- /dev/null +++ b/docs/reference/commandline/swarm_join_token.md @@ -0,0 +1,115 @@ +--- +title: "swarm join-token" +description: "The swarm join-token command description and usage" +keywords: "swarm, join-token" +--- + + + +# swarm join-token + +```markdown +Usage: docker swarm join-token [OPTIONS] (worker|manager) + +Manage join tokens + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate join token +``` + +## Description + +Join tokens are secrets that allow a node to join the swarm. There are two +different join tokens available, one for the worker role and one for the manager +role. You pass the token using the `--token` flag when you run +[swarm join](swarm_join.md). Nodes use the join token only when they join the +swarm. + +## Examples + +You can view or rotate the join tokens using `swarm join-token`. + +As a convenience, you can pass `worker` or `manager` as an argument to +`join-token` to print the full `docker swarm join` command to join a new node to +the swarm: + +```bash +$ docker swarm join-token worker +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +$ docker swarm join-token manager +To add a manager to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \ + 172.17.0.2:2377 +``` + +Use the `--rotate` flag to generate a new join token for the specified role: + +```bash +$ docker swarm join-token --rotate worker +Successfully rotated worker join token. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \ + 172.17.0.2:2377 +``` + +After using `--rotate`, only the new token will be valid for joining with the specified role. + +The `-q` (or `--quiet`) flag only prints the token: + +```bash +$ docker swarm join-token -q worker + +SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t +``` + +### `--rotate` + +Because tokens allow new nodes to join the swarm, you should keep them secret. +Be particularly careful with manager tokens since they allow new manager nodes +to join the swarm. A rogue manager has the potential to disrupt the operation of +your swarm. + +Rotate your swarm's join token if a token gets checked-in to version control, +stolen, or a node is compromised. You may also want to periodically rotate the +token to ensure any unknown token leaks do not allow a rogue node to join +the swarm. + +To rotate the join token and print the newly generated token, run +`docker swarm join-token --rotate` and pass the role: `manager` or `worker`. + +Rotating a join-token means that no new nodes will be able to join the swarm +using the old token. Rotation does not affect existing nodes in the swarm +because the join token is only used for authorizing new nodes joining the swarm. + +### `--quiet` + +Only print the token. Do not print a complete command for joining. + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/docs/reference/commandline/swarm_leave.md b/docs/reference/commandline/swarm_leave.md new file mode 100644 index 0000000000..1c898ea9a5 --- /dev/null +++ b/docs/reference/commandline/swarm_leave.md @@ -0,0 +1,72 @@ +--- +title: "swarm leave" +description: "The swarm leave command description and usage" +keywords: "swarm, leave" +--- + + + +# swarm leave + +```markdown +Usage: docker swarm leave [OPTIONS] + +Leave the swarm + +Options: + -f, --force Force this node to leave the swarm, ignoring warnings + --help Print usage +``` + +## Description + +When you run this command on a worker, that worker leaves the swarm. + +You can use the `--force` option on a manager to remove it from the swarm. +However, this does not reconfigure the swarm to ensure that there are enough +managers to maintain a quorum in the swarm. The safe way to remove a manager +from a swarm is to demote it to a worker and then direct it to leave the quorum +without using `--force`. Only use `--force` in situations where the swarm will +no longer be used after the manager leaves, such as in a single-node swarm. + +## Examples + +Consider the following swarm, as seen from the manager: + +```bash +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +To remove `worker2`, issue the following command from `worker2` itself: + +```bash +$ docker swarm leave +Node left the default swarm. +``` + +The node will still appear in the node list, and marked as `down`. It no longer +affects swarm operation, but a long list of `down` nodes can clutter the node +list. To remove an inactive node from the list, use the [`node rm`](node_rm.md) +command. + +## Related commands + +* [swarm ca](swarm_ca.md) +* [node rm](node_rm.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/docs/reference/commandline/swarm_unlock.md b/docs/reference/commandline/swarm_unlock.md new file mode 100644 index 0000000000..d2f5fc4954 --- /dev/null +++ b/docs/reference/commandline/swarm_unlock.md @@ -0,0 +1,49 @@ +--- +title: "swarm unlock" +description: "The swarm unlock command description and usage" +keywords: "swarm, unlock" +--- + + + +# swarm unlock + +```markdown +Usage: docker swarm unlock + +Unlock swarm + +Options: + --help Print usage +``` + +## Description + +Unlocks a locked manager using a user-supplied unlock key. This command must be +used to reactivate a manager after its Docker daemon restarts if the autolock +setting is turned on. The unlock key is printed at the time when autolock is +enabled, and is also available from the `docker swarm unlock-key` command. + +## Examples + +```bash +$ docker swarm unlock +Please enter unlock key: +``` + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock-key](swarm_unlock_key.md) +* [swarm update](swarm_update.md) diff --git a/docs/reference/commandline/swarm_unlock_key.md b/docs/reference/commandline/swarm_unlock_key.md new file mode 100644 index 0000000000..c0efd04922 --- /dev/null +++ b/docs/reference/commandline/swarm_unlock_key.md @@ -0,0 +1,92 @@ +--- +title: "swarm unlock-key" +description: "The swarm unlock-keycommand description and usage" +keywords: "swarm, unlock-key" +--- + + + +# swarm unlock-key + +```markdown +Usage: docker swarm unlock-key [OPTIONS] + +Manage the unlock key + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate unlock key +``` + +## Description + +An unlock key is a secret key needed to unlock a manager after its Docker daemon +restarts. These keys are only used when the autolock feature is enabled for the +swarm. + +You can view or rotate the unlock key using `swarm unlock-key`. To view the key, +run the `docker swarm unlock-key` command without any arguments: + +## Examples + +```bash +$ docker swarm unlock-key + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-fySn8TY4w5lKcWcJPIpKufejh9hxx5KYwx6XZigx3Q4 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +Use the `--rotate` flag to rotate the unlock key to a new, randomly-generated +key: + +```bash +$ docker swarm unlock-key --rotate +Successfully rotated manager unlock key. + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +The `-q` (or `--quiet`) flag only prints the key: + +```bash +$ docker swarm unlock-key -q +SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 +``` + +### `--rotate` + +This flag rotates the unlock key, replacing it with a new randomly-generated +key. The old unlock key will no longer be accepted. + +### `--quiet` + +Only print the unlock key, without instructions. + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm update](swarm_update.md) diff --git a/docs/reference/commandline/swarm_update.md b/docs/reference/commandline/swarm_update.md new file mode 100644 index 0000000000..544ef381c0 --- /dev/null +++ b/docs/reference/commandline/swarm_update.md @@ -0,0 +1,52 @@ +--- +title: "swarm update" +description: "The swarm update command description and usage" +keywords: "swarm, update" +--- + + + +# swarm update + +```markdown +Usage: docker swarm update [OPTIONS] + +Update the swarm + +Options: + --autolock Change manager autolocking setting (true|false) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --help Print usage + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +## Description + +Updates a swarm with new parameter values. This command must target a manager node. + +## Examples + +```bash +$ docker swarm update --cert-expiry 720h +``` + +## Related commands + +* [swarm ca](swarm_ca.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm join-token](swarm_join_token.md) +* [swarm leave](swarm_leave.md) +* [swarm unlock](swarm_unlock.md) +* [swarm unlock-key](swarm_unlock_key.md) diff --git a/docs/reference/commandline/system.md b/docs/reference/commandline/system.md new file mode 100644 index 0000000000..2484a4a987 --- /dev/null +++ b/docs/reference/commandline/system.md @@ -0,0 +1,37 @@ +--- +title: "system" +description: "The system command description and usage" +keywords: "system" +--- + + + +# system + +```markdown +Usage: docker system COMMAND + +Manage Docker + +Options: + --help Print usage + +Commands: + df Show docker disk usage + events Get real time events from the server + info Display system-wide information + prune Remove unused data + +Run 'docker system COMMAND --help' for more information on a command. +``` + +## Description + +Manage Docker. diff --git a/docs/reference/commandline/system_df.md b/docs/reference/commandline/system_df.md new file mode 100644 index 0000000000..aedd9779e4 --- /dev/null +++ b/docs/reference/commandline/system_df.md @@ -0,0 +1,140 @@ +--- +title: "system df" +description: "The system df command description and usage" +keywords: "system, data, usage, disk" +--- + + + +# system df + +```markdown +Usage: docker system df [OPTIONS] + +Show docker filesystem usage + +Options: + --format string Pretty-print images using a Go template + --help Print usage + -v, --verbose Show detailed information on space usage +``` + +## Description + +The `docker system df` command displays information regarding the +amount of disk space used by the docker daemon. + +## Examples + +By default the command will just show a summary of the data used: + +```bash +$ docker system df + +TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 5 2 16.43 MB 11.63 MB (70%) +Containers 2 0 212 B 212 B (100%) +Local Volumes 2 1 36 B 0 B (0%) +``` + +A more detailed view can be requested using the `-v, --verbose` flag: + +```bash +$ docker system df -v + +Images space usage: + +REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS +my-curl latest b2789dd875bf 6 minutes ago 11 MB 11 MB 5 B 0 +my-jq latest ae67841be6d0 6 minutes ago 9.623 MB 8.991 MB 632.1 kB 0 + a0971c4015c1 6 minutes ago 11 MB 11 MB 0 B 0 +alpine latest 4e38e38c8ce0 9 weeks ago 4.799 MB 0 B 4.799 MB 1 +alpine 3.3 47cf20d8c26c 9 weeks ago 4.797 MB 4.797 MB 0 B 1 + +Containers space usage: + +CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES +4a7f7eebae0f alpine:latest "sh" 1 0 B 16 minutes ago Exited (0) 5 minutes ago hopeful_yalow +f98f9c2aa1ea alpine:3.3 "sh" 1 212 B 16 minutes ago Exited (0) 48 seconds ago anon-vol + +Local Volumes space usage: + +NAME LINKS SIZE +07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e 2 36 B +my-named-vol 0 0 B +``` + +* `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data) +* `UNIQUE SIZE` is the amount of space that is only used by a given image +* `SIZE` is the virtual size of the image, it is the sum of `SHARED SIZE` and `UNIQUE SIZE` + +> **Note**: Network information is not shown because it doesn't consume the disk +> space. + +## Performance + +The `system df` command can be very resource-intensive. It traverses the +filesystem of every image, container, and volume in the system. You should be +careful running this command in systems with lots of images, containers, or +volumes or in systems where some images, containers, or volumes have very large +filesystems with many files. You should also be careful not to run this command +in systems where performance is critical. + +## Format the output + +The formatting option (`--format`) pretty prints the disk usage output +using a Go template. + +Valid placeholders for the Go template are listed below: + +| Placeholder | Description | +| -------------- | ------------------------------------------ | +| `.Type` | `Images`, `Containers` and `Local Volumes` | +| `.TotalCount` | Total number of items | +| `.Active` | Number of active items | +| `.Size` | Available size | +| `.Reclaimable` | Reclaimable size | + +When using the `--format` option, the `system df` command outputs +the data exactly as the template declares or, when using the +`table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the +`Type` and `TotalCount` entries separated by a colon: + +```bash +$ docker system df --format "{{.Type}}: {{.TotalCount}}" + +Images: 2 +Containers: 4 +Local Volumes: 1 +``` + +To list the disk usage with size and reclaimable size in a table format you +can use: + +```bash +$ docker system df --format "table {{.Type}}\t{{.Size}}\t{{.Reclaimable}}" + +TYPE SIZE RECLAIMABLE +Images 2.547 GB 2.342 GB (91%) +Containers 0 B 0 B +Local Volumes 150.3 MB 150.3 MB (100%) + +``` + +**Note** the format option is meaningless when verbose is true. + +## Related commands +* [system prune](system_prune.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) diff --git a/docs/reference/commandline/system_events.md b/docs/reference/commandline/system_events.md new file mode 100644 index 0000000000..76ef4de8f6 --- /dev/null +++ b/docs/reference/commandline/system_events.md @@ -0,0 +1,345 @@ +--- +title: "system events" +description: "The system events command description and usage" +keywords: "system, events, container, report" +--- + + + +# system events + +```markdown +Usage: docker system events [OPTIONS] + +Get real time events from the server + +Options: + -f, --filter value Filter output based on conditions provided (default []) + --format string Format the output using the given Go template + --help Print usage + --since string Show all events created since timestamp + --until string Stream events until this timestamp +``` + +## Description + +Use `docker system events` to get real-time events from the server. These +events differ per Docker object type. + +### Object types + +#### Containers + +Docker containers report the following events: + +- `attach` +- `commit` +- `copy` +- `create` +- `destroy` +- `detach` +- `die` +- `exec_create` +- `exec_detach` +- `exec_start` +- `export` +- `health_status` +- `kill` +- `oom` +- `pause` +- `rename` +- `resize` +- `restart` +- `start` +- `stop` +- `top` +- `unpause` +- `update` + +#### Images + +Docker images report the following events: + +- `delete` +- `import` +- `load` +- `pull` +- `push` +- `save` +- `tag` +- `untag` + +#### Plugins + +Docker plugins report the following events: + +- `install` +- `enable` +- `disable` +- `remove` + +#### Volumes + +Docker volumes report the following events: + +- `create` +- `mount` +- `unmount` +- `destroy` + +#### Networks + +Docker networks report the following events: + +- `create` +- `connect` +- `disconnect` +- `destroy` + +#### Daemons + +Docker daemons report the following events: + +- `reload` + +### Limiting, filtering, and formatting the output + +#### Limit events by time + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine’s time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +#### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would +like to use multiple filters, pass multiple flags (e.g., +`--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a *OR*; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display +events for container 588a23dac085 *OR* container a8f7720b8c22 + +Using multiple filters will be handled as a *AND*; for example +`--filter container=588a23dac085 --filter event=start` will display events for +container container 588a23dac085 *AND* the event type is *start* + +The currently supported filters are: + +* container (`container=`) +* daemon (`daemon=`) +* event (`event=`) +* image (`image=`) +* label (`label=` or `label==`) +* network (`network=`) +* plugin (`plugin=`) +* type (`type=`) +* volume (`volume=`) + +#### Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default +format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + +## Examples + +### Basic example + +You'll need two shells for this example. + +**Shell 1: Listening for events:** + +```bash +$ docker system events +``` + +**Shell 2: Start and Stop containers:** + +```bash +$ docker create --name test alpine:latest top +$ docker start test +$ docker stop test +``` + +**Shell 1: (Again .. now showing events):** + +```none +2017-01-05T00:35:58.859401177+08:00 container create 0fdb48addc82871eb34eb23a847cfd033dedd1a0a37bef2e6d9eb3870fc7ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1f5ceda09d4300f3a846f0acfaa9a8bb0d89e775eb744c5acecd60e0529e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +To exit the `docker system events` command, use `CTRL+C`. + +### Filter events by time + +You can filter the output by an absolute timestamp or relative time on the host +machine, using the following different time syntaxes: + +```bash +$ docker system events --since 1483283804 +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --since '2017-01-05' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --since '2013-09-03T15:49:29' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --since '10m' +2017-01-05T00:35:41.241772953+08:00 volume create testVol (driver=local) +2017-01-05T00:35:58.859401177+08:00 container create d9cd...4d70 (image=alpine:latest, name=test) +2017-01-05T00:36:04.703631903+08:00 network connect e2e1...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:04.795031609+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:36:09.830268747+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:36:09.840186338+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:36:09.880113663+08:00 network disconnect e2e...29e2 (container=0fdb...ff37, name=bridge, type=bridge) +2017-01-05T00:36:09.890214053+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +``` + +### Filter events by criteria + +The following commands show several different ways to filter the `docker event` +output. + +```bash +$ docker system events --filter 'event=stop' + +2017-01-05T00:40:22.880175420+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:41:17.888104182+08:00 container stop 2a8f...4e78 (image=alpine, name=kickass_brattain) + +$ docker system events --filter 'image=alpine' + +2017-01-05T00:41:55.784240236+08:00 container create d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:41:55.913156783+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:42:01.106875249+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=15) +2017-01-05T00:42:11.111934041+08:00 container kill d9cd...4d70 (image=alpine, name=happy_meitner, signal=9) +2017-01-05T00:42:11.119578204+08:00 container die d9cd...4d70 (exitCode=137, image=alpine, name=happy_meitner) +2017-01-05T00:42:11.173276611+08:00 container stop d9cd...4d70 (image=alpine, name=happy_meitner) + +$ docker system events --filter 'container=test' + +2017-01-05T00:43:00.139719934+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:43:09.259951086+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=15) +2017-01-05T00:43:09.270102715+08:00 container die 0fdb...ff37 (exitCode=143, image=alpine:latest, name=test) +2017-01-05T00:43:09.312556440+08:00 container stop 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --filter 'container=test' --filter 'container=d9cdb1525ea8' + +2017-01-05T00:44:11.517071981+08:00 container start 0fdb...ff37 (image=alpine:latest, name=test) +2017-01-05T00:44:17.685870901+08:00 container start d9cd...4d70 (image=alpine, name=happy_meitner) +2017-01-05T00:44:29.757658470+08:00 container kill 0fdb...ff37 (image=alpine:latest, name=test, signal=9) +2017-01-05T00:44:29.767718510+08:00 container die 0fdb...ff37 (exitCode=137, image=alpine:latest, name=test) +2017-01-05T00:44:29.815798344+08:00 container destroy 0fdb...ff37 (image=alpine:latest, name=test) + +$ docker system events --filter 'container=test' --filter 'event=stop' + +2017-01-05T00:46:13.664099505+08:00 container stop a9d1...e130 (image=alpine, name=test) + +$ docker system events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562f...5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562f...5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker system events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b11...2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b11...2c5b (name=test-event-network-local, container=b4be...c54e, type=bridge) + +$ docker system events --filter 'container=container_1' --filter 'container=container_2' + +2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) +2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) +2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +$ docker system events --filter 'type=volume' + +2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) +2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) +2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) +2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + +$ docker system events --filter 'type=network' + +2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) +2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) + +$ docker system events --filter 'type=plugin' + +2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) +``` + +### Format the output + +```bash +$ docker system events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + +Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 +``` + +#### Format as JSON + +```none + $ docker system events --format '{{json .}}' + + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. +``` diff --git a/docs/reference/commandline/system_prune.md b/docs/reference/commandline/system_prune.md new file mode 100644 index 0000000000..e09fec4d7f --- /dev/null +++ b/docs/reference/commandline/system_prune.md @@ -0,0 +1,110 @@ +--- +title: "system prune" +description: "Remove unused data" +keywords: "system, prune, delete, remove" +--- + + + +# system prune + +```markdown +Usage: docker system prune [OPTIONS] + +Delete unused data + +Options: + -a, --all Remove all unused images not just dangling ones + --filter filter Provide filter values (e.g. 'until=') + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Description + +Remove all unused containers, volumes, networks and images (both dangling and unreferenced). + +## Examples + +```bash +$ docker system prune -a + +WARNING! This will remove: + - all stopped containers + - all volumes not used by at least one container + - all networks not used by at least one container + - all images without at least one container associated to them +Are you sure you want to continue? [y/N] y +Deleted Containers: +0998aa37185a1a7036b0e12cf1ac1b6442dcfa30a5c9650a42ed5010046f195b +73958bfb884fa81fa4cc6baf61055667e940ea2357b4036acbbe25a60f442a4d + +Deleted Volumes: +named-vol + +Deleted Images: +untagged: my-curl:latest +deleted: sha256:7d88582121f2a29031d92017754d62a0d1a215c97e8f0106c586546e7404447d +deleted: sha256:dd14a93d83593d4024152f85d7c63f76aaa4e73e228377ba1d130ef5149f4d8b +untagged: alpine:3.3 +deleted: sha256:695f3d04125db3266d4ab7bbb3c6b23aa4293923e762aa2562c54f49a28f009f +untagged: alpine:latest +deleted: sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96 +deleted: sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f +deleted: sha256:71fa90c8f04769c9721459d5aa0936db640b92c8c91c9b589b54abd412d120ab +deleted: sha256:bb1c3357b3c30ece26e6604aea7d2ec0ace4166ff34c3616701279c22444c0f3 +untagged: my-jq:latest +deleted: sha256:6e66d724542af9bc4c4abf4a909791d7260b6d0110d8e220708b09e4ee1322e1 +deleted: sha256:07b3fa89d4b17009eb3988dfc592c7d30ab3ba52d2007832dffcf6d40e3eda7f +deleted: sha256:3a88a5c81eb5c283e72db2dbc6d65cbfd8e80b6c89bb6e714cfaaa0eed99c548 + +Total reclaimed space: 13.5 MB +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* until (``) - only remove containers, images, and networks created before given timestamp +* label (`label=`, `label==`, `label!=`, or `label!==`) - only remove containers, images, networks, and volumes with (or without, in case `label!=...` is used) the specified labels. + +The `until` filter can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the daemon machine’s time. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the daemon will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +The `label` filter accepts two formats. One is the `label=...` (`label=` or `label==`), +which removes containers, images, networks, and volumes with the specified labels. The other +format is the `label!=...` (`label!=` or `label!==`), which removes +containers, images, networks, and volumes without the specified labels. + +## Related commands + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/docs/reference/commandline/tag.md b/docs/reference/commandline/tag.md new file mode 100644 index 0000000000..5f9defd8a9 --- /dev/null +++ b/docs/reference/commandline/tag.md @@ -0,0 +1,84 @@ +--- +title: "tag" +description: "The tag command description and usage" +keywords: "tag, name, image" +--- + + + +# tag + +```markdown +Usage: docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] + +Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE + +Options: + --help Print usage +``` + +## Description + +An image name is made up of slash-separated name components, optionally prefixed +by a registry hostname. The hostname must comply with standard DNS rules, but +may not contain underscores. If a hostname is present, it may optionally be +followed by a port number in the format `:8080`. If not present, the command +uses Docker's public registry located at `registry-1.docker.io` by default. Name +components may contain lowercase letters, digits and separators. A separator +is defined as a period, one or two underscores, or one or more dashes. A name +component may not start or end with a separator. + +A tag name must be valid ASCII and may contain lowercase and uppercase letters, +digits, underscores, periods and dashes. A tag name may not start with a +period or a dash and may contain a maximum of 128 characters. + +You can group your images together using names and tags, and then upload them +to [*Share Images via Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +## Examples + +### Tag an image referenced by ID + +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": + +```bash +$ docker tag 0e5574283393 fedora/httpd:version1.0 +``` + +### Tag an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + +```bash +$ docker tag httpd fedora/httpd:version1.0 +``` + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +### Tag an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + +```bash +$ docker tag httpd:test fedora/httpd:version1.0.test +``` + +### Tag an image for a private repository + +To push an image to a private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + +```bash +$ docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 +``` diff --git a/docs/reference/commandline/top.md b/docs/reference/commandline/top.md new file mode 100644 index 0000000000..0a04828775 --- /dev/null +++ b/docs/reference/commandline/top.md @@ -0,0 +1,25 @@ +--- +title: "top" +description: "The top command description and usage" +keywords: "container, running, processes" +--- + + + +# top + +```markdown +Usage: docker top CONTAINER [ps OPTIONS] + +Display the running processes of a container + +Options: + --help Print usage +``` diff --git a/docs/reference/commandline/unpause.md b/docs/reference/commandline/unpause.md new file mode 100644 index 0000000000..8915a43b40 --- /dev/null +++ b/docs/reference/commandline/unpause.md @@ -0,0 +1,44 @@ +--- +title: "unpause" +description: "The unpause command description and usage" +keywords: "cgroups, suspend, container" +--- + + + +# unpause + +```markdown +Usage: docker unpause CONTAINER [CONTAINER...] + +Unpause all processes within one or more containers + +Options: + --help Print usage +``` + +## Description + +The `docker unpause` command un-suspends all processes in the specified containers. +On Linux, it does this using the cgroups freezer. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Examples + +```bash +$ docker unpause my_container +``` + +## Related commands + +* [pause](pause.md) diff --git a/docs/reference/commandline/update.md b/docs/reference/commandline/update.md new file mode 100644 index 0000000000..935dc9bf36 --- /dev/null +++ b/docs/reference/commandline/update.md @@ -0,0 +1,123 @@ +--- +title: "update" +description: "The update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +## update + +```markdown +Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] + +Update configuration of one or more containers + +Options: + --blkio-weight uint16 Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0) + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + -c, --cpu-shares int CPU shares (relative weight) + --cpus decimal Number of CPUs (default 0.000) + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --help Print usage + --kernel-memory string Kernel memory limit + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --restart string Restart policy to apply when a container exits +``` + +## Description + +The `docker update` command dynamically updates container configuration. +You can use this command to prevent containers from consuming too many +resources from their Docker host. With a single command, you can place +limits on a single container or on many. To specify more than one container, +provide space-separated list of container names or IDs. + +With the exception of the `--kernel-memory` option, you can specify these +options on a running or a stopped container. On kernel version older than +4.6, you can only update `--kernel-memory` on a stopped container or on +a running container with kernel memory initialized. + +## Examples + +The following sections illustrate ways to use this command. + +### Update a container's cpu-shares + +To limit a container's cpu-shares to 512, first identify the container +name or ID. You can use `docker ps` to find these values. You can also +use the ID returned from the `docker run` command. Then, do the following: + +```bash +$ docker update --cpu-shares 512 abebf7571666 +``` + +### Update a container with cpu-shares and memory + +To update multiple resource configurations for multiple containers: + +```bash +$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse +``` + +### Update a container's kernel memory constraints + +You can update a container's kernel memory limit using the `--kernel-memory` +option. On kernel version older than 4.6, this option can be updated on a +running container only if the container was started with `--kernel-memory`. +If the container was started *without* `--kernel-memory` you need to stop +the container before updating kernel memory. + +For example, if you started a container with this command: + +```bash +$ docker run -dit --name test --kernel-memory 50M ubuntu bash +``` + +You can update kernel memory while the container is running: + +```bash +$ docker update --kernel-memory 80M test +``` + +If you started a container *without* kernel memory initialized: + +```bash +$ docker run -dit --name test2 --memory 300M ubuntu bash +``` + +Update kernel memory of running container `test2` will fail. You need to stop +the container before updating the `--kernel-memory` setting. The next time you +start it, the container uses the new value. + +Kernel version newer than (include) 4.6 does not have this limitation, you +can use `--kernel-memory` the same way as other options. + +### Update a container's restart policy + +You can change a container's restart policy on a running container. The new +restart policy takes effect instantly after you run `docker update` on a +container. + +To update restart policy for one or more containers: + +```bash +$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse +``` + +Note that if the container is started with "--rm" flag, you cannot update the restart +policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the +container. diff --git a/docs/reference/commandline/version.md b/docs/reference/commandline/version.md new file mode 100644 index 0000000000..b15d13b97f --- /dev/null +++ b/docs/reference/commandline/version.md @@ -0,0 +1,74 @@ +--- +title: "version" +description: "The version command description and usage" +keywords: "version, architecture, api" +--- + + + +# version + +```markdown +Usage: docker version [OPTIONS] + +Show the Docker version information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +By default, this will render all version information in an easy to read +layout. If a format is specified, the given template will be executed instead. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Default output + +```bash +$ docker version + +Client: +Version: 1.8.0 +API version: 1.20 +Go version: go1.4.2 +Git commit: f5bae0a +Built: Tue Jun 23 17:56:00 UTC 2015 +OS/Arch: linux/amd64 + +Server: +Version: 1.8.0 +API version: 1.20 +Go version: go1.4.2 +Git commit: f5bae0a +Built: Tue Jun 23 17:56:00 UTC 2015 +OS/Arch: linux/amd64 +``` + +### Get the server version + +```bash +$ docker version --format '{{.Server.Version}}' + +1.8.0 +``` + +### Dump raw JSON data + +```bash +$ docker version --format '{{json .}}' + +{"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} +``` diff --git a/docs/reference/commandline/volume.md b/docs/reference/commandline/volume.md new file mode 100644 index 0000000000..d5dd9c592f --- /dev/null +++ b/docs/reference/commandline/volume.md @@ -0,0 +1,48 @@ +--- +title: "volume" +description: "The volume command description and usage" +keywords: "volume" +--- + + + +# volume + +```markdown +Usage: docker volume COMMAND + +Manage volumes + +Options: + --help Print usage + +Commands: + create Create a volume + inspect Display detailed information on one or more volumes + ls List volumes + prune Remove all unused volumes + rm Remove one or more volumes + +Run 'docker volume COMMAND --help' for more information on a command. +``` + +## Description + +Manage volumes. You can use subcommands to create, inspect, list, remove, or +prune volumes. + +## Related commands + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume list](volume_list.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/docs/reference/commandline/volume_create.md b/docs/reference/commandline/volume_create.md new file mode 100644 index 0000000000..b1eed37b5c --- /dev/null +++ b/docs/reference/commandline/volume_create.md @@ -0,0 +1,125 @@ +--- +title: "volume create" +description: "The volume create command description and usage" +keywords: "volume, create" +--- + + + +# volume create + +```markdown +Usage: docker volume create [OPTIONS] [VOLUME] + +Create a volume + +Options: + -d, --driver string Specify volume driver name (default "local") + --help Print usage + --label value Set metadata for a volume (default []) + -o, --opt value Set driver specific options (default map[]) +``` + +## Description + +Creates a new volume that containers can consume and store data in. If a name is +not specified, Docker generates a random name. + +## Examples + +Create a volume and then configure the container to use it: + +```bash +$ docker volume create hello + +hello + +$ docker run -d -v hello:/world busybox ls /world +``` + +The mount is created inside the container's `/world` directory. Docker does not +support relative paths for mount points inside the container. + +Multiple containers can use the same volume in the same time period. This is +useful if two containers need access to shared data. For example, if one +container writes and the other reads the data. + +Volume names must be unique among drivers. This means you cannot use the same +volume name with two different drivers. If you attempt this `docker` returns an +error: + +```none +A volume named "hello" already exists with the "some-other" driver. Choose a different volume name. +``` + +If you specify a volume name already in use on the current driver, Docker +assumes you want to re-use the existing volume and does not return an error. + +### Driver-specific options + +Some volume drivers may take options to customize the volume creation. Use the +`-o` or `--opt` flags to pass driver options: + +```bash +$ docker volume create --driver fake \ + --opt tardis=blue \ + --opt timey=wimey \ + foo +``` + +These options are passed directly to the volume driver. Options for +different volume drivers may do different things (or nothing at all). + +The built-in `local` driver on Windows does not support any options. + +The built-in `local` driver on Linux accepts options similar to the linux +`mount` command. You can provide multiple options by passing the `--opt` flag +multiple times. Some `mount` options (such as the `o` option) can take a +comma-separated list of options. Complete list of available mount options can be +found [here](http://man7.org/linux/man-pages/man8/mount.8.html). + +For example, the following creates a `tmpfs` volume called `foo` with a size of +100 megabyte and `uid` of 1000. + +```bash +$ docker volume create --driver local \ + --opt type=tmpfs \ + --opt device=tmpfs \ + --opt o=size=100m,uid=1000 \ + foo +``` + +Another example that uses `btrfs`: + +```bash +$ docker volume create --driver local \ + --opt type=btrfs \ + --opt device=/dev/sda2 \ + foo +``` + +Another example that uses `nfs` to mount the `/path/to/dir` in `rw` mode from +`192.168.1.1`: + +```bash +$ docker volume create --driver local \ + --opt type=nfs \ + --opt o=addr=192.168.1.1,rw \ + --opt device=:/path/to/dir \ + foo +``` + +## Related commands + +* [volume inspect](volume_inspect.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/docs/reference/commandline/volume_inspect.md b/docs/reference/commandline/volume_inspect.md new file mode 100644 index 0000000000..bbdc6bd3ee --- /dev/null +++ b/docs/reference/commandline/volume_inspect.md @@ -0,0 +1,61 @@ +--- +title: "volume inspect" +description: "The volume inspect command description and usage" +keywords: "volume, inspect" +--- + + + +# volume inspect + +```markdown +Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] + +Display detailed information on one or more volumes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +## Description + +Returns information about a volume. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +## Examples + +```bash +$ docker volume create +85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d +$ docker volume inspect 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d +[ + { + "Name": "85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data", + "Status": null + } +] + +$ docker volume inspect --format '{{ .Mountpoint }}' 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d +/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data +``` + +## Related commands + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/docs/reference/commandline/volume_ls.md b/docs/reference/commandline/volume_ls.md new file mode 100644 index 0000000000..713922d60f --- /dev/null +++ b/docs/reference/commandline/volume_ls.md @@ -0,0 +1,199 @@ +--- +title: "volume ls" +description: "The volume ls command description and usage" +keywords: "volume, list" +--- + + + +# volume ls + +```markdown +Usage: docker volume ls [OPTIONS] + +List volumes + +Aliases: + ls, list + +Options: + -f, --filter value Provide filter values (e.g. 'dangling=true') (default []) + - dangling= a volume if referenced or not + - driver= a volume's driver name + - label= or label== + - name= a volume's name + --format string Pretty-print volumes using a Go template + --help Print usage + -q, --quiet Only display volume names +``` + +## Description + +List all the volumes known to Docker. You can filter using the `-f` or +`--filter` flag. Refer to the [filtering](#filtering) section for more +information about available filter options. + +## Examples + +### Create a volume +```bash +$ docker volume create rosemary + +rosemary + +$ docker volume create tyler + +tyler + +$ docker volume ls + +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +### Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false, 0 or 1) +* driver (a volume driver's name) +* label (`label=` or `label==`) +* name (a volume's name) + +#### dangling + +The `dangling` filter matches on all volumes not referenced by any containers + +```bash +$ docker run -d -v tyler:/tmpwork busybox + +f86a7dd02898067079c99ceacd810149060a70528eff3754d0b0f1a93bd0af18 +$ docker volume ls -f dangling=true +DRIVER VOLUME NAME +local rosemary +``` + +#### driver + +The `driver` filter matches volumes based on their driver. + +The following example matches volumes that are created with the `local` driver: + +```bash +$ docker volume ls -f driver=local + +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +#### label + +The `label` filter matches volumes based on the presence of a `label` alone or +a `label` and a value. + +First, let's create some volumes to illustrate this; + +```bash +$ docker volume create the-doctor --label is-timelord=yes + +the-doctor +$ docker volume create daleks --label is-timelord=no + +daleks +``` + +The following example filter matches volumes with the `is-timelord` label +regardless of its value. + +```bash +$ docker volume ls --filter label=is-timelord + +DRIVER VOLUME NAME +local daleks +local the-doctor +``` + +As the above example demonstrates, both volumes with `is-timelord=yes`, and +`is-timelord=no` are returned. + +Filtering on both `key` *and* `value` of the label, produces the expected result: + +```bash +$ docker volume ls --filter label=is-timelord=yes + +DRIVER VOLUME NAME +local the-doctor +``` + +Specifying multiple label filter produces an "and" search; all conditions +should be met; + +```bash +$ docker volume ls --filter label=is-timelord=yes --filter label=is-timelord=no + +DRIVER VOLUME NAME +``` + +#### name + +The `name` filter matches on all or part of a volume's name. + +The following filter matches all volumes with a name containing the `rose` string. + +```bash +$ docker volume ls -f name=rose + +DRIVER VOLUME NAME +local rosemary +``` + +### Formatting + +The formatting options (`--format`) pretty-prints volumes output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|------------------------------------------------------------------------------------------ +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.Mountpoint` | Whether the network is internal or not. +`.Labels` | All labels assigned to the volume. +`.Label` | Value of a specific label for this volume. For example `{{.Label "project.version"}}` + +When using the `--format` option, the `volume ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Driver` entries separated by a colon for all volumes: + +```bash +$ docker volume ls --format "{{.Name}}: {{.Driver}}" + +vol1: local +vol2: local +vol3: local +``` + +## Related commands + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/docs/reference/commandline/volume_prune.md b/docs/reference/commandline/volume_prune.md new file mode 100644 index 0000000000..7a1ae76c69 --- /dev/null +++ b/docs/reference/commandline/volume_prune.md @@ -0,0 +1,73 @@ +--- +title: "volume prune" +description: "Remove unused volumes" +keywords: "volume, prune, delete" +--- + + + +# volume prune + +```markdown +Usage: docker volume prune [OPTIONS] + +Remove all unused volumes + +Options: + --filter filter Provide filter values (e.g. 'label=