mirror of https://github.com/docker/cli.git
Merge branch 'master' into 2968-device
This commit is contained in:
commit
51dcc574d5
|
@ -2,133 +2,15 @@ version: 2
|
|||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:19.03-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 19.03.12
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
name: "Docker version"
|
||||
command: docker version
|
||||
- run:
|
||||
name: "Docker info"
|
||||
command: docker info
|
||||
- run:
|
||||
name: "Shellcheck - build image"
|
||||
command: |
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.shellcheck --tag cli-validator:$CIRCLE_BUILD_NUM .
|
||||
- run:
|
||||
name: "Shellcheck"
|
||||
command: |
|
||||
docker run --rm cli-validator:$CIRCLE_BUILD_NUM \
|
||||
make shellcheck
|
||||
- run:
|
||||
name: "Lint - build image"
|
||||
command: |
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.lint --tag cli-linter:$CIRCLE_BUILD_NUM .
|
||||
- run:
|
||||
name: "Lint"
|
||||
command: |
|
||||
docker run --rm cli-linter:$CIRCLE_BUILD_NUM
|
||||
|
||||
cross:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:19.03-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
parallelism: 3
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 19.03.12
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
name: "Docker version"
|
||||
command: docker version
|
||||
- run:
|
||||
name: "Docker info"
|
||||
command: docker info
|
||||
- run:
|
||||
name: "Cross - build image"
|
||||
command: |
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.cross --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
- run:
|
||||
name: "Cross"
|
||||
command: |
|
||||
name=cross-$CIRCLE_BUILD_NUM-$CIRCLE_NODE_INDEX
|
||||
docker run \
|
||||
-e CROSS_GROUP=$CIRCLE_NODE_INDEX \
|
||||
--name $name cli-builder:$CIRCLE_BUILD_NUM \
|
||||
make cross
|
||||
docker cp \
|
||||
$name:/go/src/github.com/docker/cli/build \
|
||||
/work/build
|
||||
- store_artifacts:
|
||||
path: /work/build
|
||||
|
||||
test:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:19.03-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 19.03.12
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
name: "Docker version"
|
||||
command: docker version
|
||||
- run:
|
||||
name: "Docker info"
|
||||
command: docker info
|
||||
- run:
|
||||
name: "Unit Test with Coverage - build image"
|
||||
command: |
|
||||
mkdir -p test-results/unit-tests
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.dev --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
- run:
|
||||
name: "Unit Test with Coverage"
|
||||
command: |
|
||||
docker run \
|
||||
-e GOTESTSUM_JUNITFILE=/tmp/junit.xml \
|
||||
--name \
|
||||
test-$CIRCLE_BUILD_NUM cli-builder:$CIRCLE_BUILD_NUM \
|
||||
make test-coverage
|
||||
docker cp \
|
||||
test-$CIRCLE_BUILD_NUM:/tmp/junit.xml \
|
||||
./test-results/unit-tests/junit.xml
|
||||
- run:
|
||||
name: "Upload to Codecov"
|
||||
command: |
|
||||
docker cp \
|
||||
test-$CIRCLE_BUILD_NUM:/go/src/github.com/docker/cli/coverage.txt \
|
||||
coverage.txt
|
||||
apk add -U bash curl
|
||||
curl -s https://codecov.io/bash | bash || \
|
||||
echo 'Codecov failed to upload'
|
||||
- store_test_results:
|
||||
path: test-results
|
||||
- store_artifacts:
|
||||
path: test-results
|
||||
|
||||
validate:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:19.03-git'}]
|
||||
docker: [{image: 'docker:20.10-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 19.03.12
|
||||
version: 20.10.6
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
|
@ -153,7 +35,4 @@ workflows:
|
|||
version: 2
|
||||
ci:
|
||||
jobs:
|
||||
- lint
|
||||
- cross
|
||||
- test
|
||||
- validate
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
.circleci
|
||||
.dockerignore
|
||||
.git
|
||||
.github
|
||||
.gitignore
|
||||
appveyor.yml
|
||||
build
|
||||
/build/
|
||||
/cli/winresources/versioninfo.json
|
||||
/cli/winresources/*.syso
|
||||
/man/man*/
|
||||
/docs/yaml/gen/
|
||||
profile.out
|
||||
/vndr.log
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
name: build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]{2}'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- cross
|
||||
- dynbinary-cross
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Run ${{ matrix.target }}
|
||||
uses: docker/bake-action@v1
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
-
|
||||
name: Flatten artifacts
|
||||
working-directory: ./build
|
||||
run: |
|
||||
for dir in */; do
|
||||
base=$(basename "$dir")
|
||||
echo "Creating ${base}.tar.gz ..."
|
||||
tar -cvzf "${base}.tar.gz" "$dir"
|
||||
rm -rf "$dir"
|
||||
done
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ matrix.target }}
|
||||
path: ./build/*
|
||||
if-no-files-found: error
|
||||
|
||||
plugins:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Build plugins
|
||||
uses: docker/bake-action@v1
|
||||
with:
|
||||
targets: plugins-cross
|
|
@ -0,0 +1,60 @@
|
|||
name: e2e
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]{2}'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- non-experimental
|
||||
- experimental
|
||||
- connhelper-ssh
|
||||
base:
|
||||
- alpine
|
||||
- buster
|
||||
engine-version:
|
||||
# - 20.10-dind # FIXME: Fails on 20.10
|
||||
- stable-dind # TODO: Use 20.10-dind, stable-dind is deprecated
|
||||
include:
|
||||
- target: non-experimental
|
||||
engine-version: 19.03-dind
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Update daemon.json
|
||||
run: |
|
||||
sudo jq '.experimental = true' < /etc/docker/daemon.json > /tmp/docker.json
|
||||
sudo mv /tmp/docker.json /etc/docker/daemon.json
|
||||
sudo cat /etc/docker/daemon.json
|
||||
sudo service docker restart
|
||||
docker version
|
||||
docker info
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Run ${{ matrix.target }}
|
||||
run: |
|
||||
make -f docker.Makefile test-e2e-${{ matrix.target }}
|
||||
env:
|
||||
BASE_VARIANT: ${{ matrix.base }}
|
||||
E2E_ENGINE_VERSION: ${{ matrix.engine-version }}
|
||||
TESTFLAGS: -coverprofile=/tmp/coverage/coverage.txt
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
file: ./build/coverage/coverage.txt
|
|
@ -0,0 +1,75 @@
|
|||
name: test
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]{2}'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
ctn:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Test
|
||||
uses: docker/bake-action@v1
|
||||
with:
|
||||
targets: test-coverage
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
file: ./build/coverage/coverage.txt
|
||||
|
||||
host:
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
GOBIN: ${{ github.workspace }}/bin
|
||||
GO111MODULE: auto
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- macos-latest
|
||||
# - windows-latest # FIXME: some tests are failing on the Windows runner, as well as on Appveyor since June 24, 2018: https://ci.appveyor.com/project/docker/cli/history
|
||||
steps:
|
||||
-
|
||||
name: Prepare git
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
git config --system core.autocrlf false
|
||||
git config --system core.eol lf
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.GOPATH }}/src/github.com/docker/cli
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.11
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
go test -coverprofile=/tmp/coverage.txt $(go list ./... | grep -vE '/vendor/|/e2e/')
|
||||
go tool cover -func=/tmp/coverage.txt
|
||||
working-directory: ${{ env.GOPATH }}/src/github.com/docker/cli
|
||||
shell: bash
|
||||
-
|
||||
name: Send to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
file: /tmp/coverage.txt
|
||||
working-directory: ${{ env.GOPATH }}/src/github.com/docker/cli
|
|
@ -0,0 +1,32 @@
|
|||
name: validate
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]{2}'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- lint
|
||||
- shellcheck
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Run
|
||||
uses: docker/bake-action@v1
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
|
@ -8,12 +8,11 @@
|
|||
Thumbs.db
|
||||
.editorconfig
|
||||
/build/
|
||||
cli/winresources/rsrc_386.syso
|
||||
cli/winresources/rsrc_amd64.syso
|
||||
/cli/winresources/versioninfo.json
|
||||
/cli/winresources/*.syso
|
||||
/man/man1/
|
||||
/man/man5/
|
||||
/man/man8/
|
||||
/docs/yaml/gen/
|
||||
coverage.txt
|
||||
profile.out
|
||||
/vndr.log
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
# syntax=docker/dockerfile:1.3
|
||||
|
||||
ARG BASE_VARIANT=alpine
|
||||
ARG GO_VERSION=1.16.11
|
||||
ARG XX_VERSION=1.0.0-rc.2
|
||||
ARG GOVERSIONINFO_VERSION=v1.3.0
|
||||
ARG GOTESTSUM_VERSION=v1.7.0
|
||||
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-${BASE_VARIANT} AS gostable
|
||||
FROM --platform=$BUILDPLATFORM golang:1.17rc1-${BASE_VARIANT} AS golatest
|
||||
|
||||
FROM gostable AS go-linux
|
||||
FROM gostable AS go-darwin
|
||||
FROM gostable AS go-windows-amd64
|
||||
FROM gostable AS go-windows-386
|
||||
FROM gostable AS go-windows-arm
|
||||
FROM golatest AS go-windows-arm64
|
||||
FROM go-windows-${TARGETARCH} AS go-windows
|
||||
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
|
||||
FROM go-${TARGETOS} AS build-base-alpine
|
||||
COPY --from=xx / /
|
||||
RUN apk add --no-cache bash clang lld llvm file git
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
|
||||
FROM build-base-alpine AS build-alpine
|
||||
ARG TARGETPLATFORM
|
||||
# gcc is installed for libgcc only
|
||||
RUN xx-apk add --no-cache musl-dev gcc
|
||||
|
||||
FROM go-${TARGETOS} AS build-base-buster
|
||||
COPY --from=xx / /
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y bash clang lld file
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
|
||||
FROM build-base-buster AS build-buster
|
||||
ARG TARGETPLATFORM
|
||||
RUN xx-apt install --no-install-recommends -y libc6-dev libgcc-8-dev
|
||||
|
||||
FROM build-base-${BASE_VARIANT} AS goversioninfo
|
||||
ARG GOVERSIONINFO_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/out GO111MODULE=on go install "github.com/josephspurrier/goversioninfo/cmd/goversioninfo@${GOVERSIONINFO_VERSION}"
|
||||
|
||||
FROM build-base-${BASE_VARIANT} AS gotestsum
|
||||
ARG GOTESTSUM_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/out GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \
|
||||
&& /out/gotestsum --version
|
||||
|
||||
FROM build-${BASE_VARIANT} AS build
|
||||
# GO_LINKMODE defines if static or dynamic binary should be produced
|
||||
ARG GO_LINKMODE=static
|
||||
# GO_BUILDTAGS defines additional build tags
|
||||
ARG GO_BUILDTAGS
|
||||
# GO_STRIP strips debugging symbols if set
|
||||
ARG GO_STRIP
|
||||
# CGO_ENABLED manually sets if cgo is used
|
||||
ARG CGO_ENABLED
|
||||
# VERSION sets the version for the produced binary
|
||||
ARG VERSION
|
||||
# COMPANY_NAME sets the company that produced the windows binary
|
||||
ARG COMPANY_NAME
|
||||
COPY --from=goversioninfo /out/goversioninfo /usr/bin/goversioninfo
|
||||
RUN --mount=type=bind,target=.,ro \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=from=dockercore/golang-cross:xx-sdk-extras,target=/xx-sdk,src=/xx-sdk \
|
||||
--mount=type=tmpfs,target=cli/winresources \
|
||||
# override the default behavior of go with xx-go
|
||||
xx-go --wrap && \
|
||||
# export GOCACHE=$(go env GOCACHE)/$(xx-info)$([ -f /etc/alpine-release ] && echo "alpine") && \
|
||||
TARGET=/out ./scripts/build/binary && \
|
||||
xx-verify $([ "$GO_LINKMODE" = "static" ] && echo "--static") /out/docker
|
||||
|
||||
FROM build-${BASE_VARIANT} AS test
|
||||
COPY --from=gotestsum /out/gotestsum /usr/bin/gotestsum
|
||||
ENV GO111MODULE=auto
|
||||
RUN --mount=type=bind,target=.,rw \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
gotestsum -- -coverprofile=/tmp/coverage.txt $(go list ./... | grep -vE '/vendor/|/e2e/')
|
||||
|
||||
FROM scratch AS test-coverage
|
||||
COPY --from=test /tmp/coverage.txt /coverage.txt
|
||||
|
||||
FROM build-${BASE_VARIANT} AS build-plugins
|
||||
ARG GO_LINKMODE=static
|
||||
ARG GO_BUILDTAGS
|
||||
ARG GO_STRIP
|
||||
ARG CGO_ENABLED
|
||||
ARG VERSION
|
||||
RUN --mount=ro --mount=type=cache,target=/root/.cache \
|
||||
--mount=from=dockercore/golang-cross:xx-sdk-extras,target=/xx-sdk,src=/xx-sdk \
|
||||
xx-go --wrap && \
|
||||
TARGET=/out ./scripts/build/plugins e2e/cli-plugins/plugins/*
|
||||
|
||||
FROM build-base-alpine AS e2e-base-alpine
|
||||
RUN apk add --no-cache build-base curl docker-compose openssl openssh-client
|
||||
|
||||
FROM build-base-buster AS e2e-base-buster
|
||||
RUN apt-get update && apt-get install -y build-essential curl openssl openssh-client
|
||||
ARG COMPOSE_VERSION=1.29.2
|
||||
RUN curl -fsSL https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose && \
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
FROM e2e-base-${BASE_VARIANT} AS e2e
|
||||
ARG NOTARY_VERSION=v0.6.1
|
||||
ADD --chmod=0755 https://github.com/theupdateframework/notary/releases/download/${NOTARY_VERSION}/notary-Linux-amd64 /usr/local/bin/notary
|
||||
COPY e2e/testdata/notary/root-ca.cert /usr/share/ca-certificates/notary.cert
|
||||
RUN echo 'notary.cert' >> /etc/ca-certificates.conf && update-ca-certificates
|
||||
COPY --from=gotestsum /out/gotestsum /usr/bin/gotestsum
|
||||
COPY --from=build /out ./build/
|
||||
COPY --from=build-plugins /out ./build/
|
||||
COPY . .
|
||||
ENV DOCKER_BUILDKIT=1
|
||||
ENV PATH=/go/src/github.com/docker/cli/build:$PATH
|
||||
CMD ./scripts/test/e2e/entry
|
||||
|
||||
FROM build-base-${BASE_VARIANT} AS dev
|
||||
COPY . .
|
||||
|
||||
FROM scratch AS binary
|
||||
COPY --from=build /out .
|
||||
|
||||
FROM scratch AS plugins
|
||||
COPY --from=build-plugins /out .
|
|
@ -1,47 +0,0 @@
|
|||
pipeline {
|
||||
agent {
|
||||
label "linux && x86_64"
|
||||
}
|
||||
|
||||
options {
|
||||
timeout(time: 60, unit: 'MINUTES')
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Docker info") {
|
||||
steps {
|
||||
sh "docker version"
|
||||
sh "docker info"
|
||||
}
|
||||
}
|
||||
stage("e2e (non-experimental) - stable engine") {
|
||||
steps {
|
||||
sh "E2E_UNIQUE_ID=clie2e${BUILD_NUMBER} \
|
||||
IMAGE_TAG=clie2e${BUILD_NUMBER} \
|
||||
make -f docker.Makefile test-e2e-non-experimental"
|
||||
}
|
||||
}
|
||||
stage("e2e (non-experimental) - 18.09 engine") {
|
||||
steps {
|
||||
sh "E2E_ENGINE_VERSION=18.09-dind \
|
||||
E2E_UNIQUE_ID=clie2e${BUILD_NUMBER} \
|
||||
IMAGE_TAG=clie2e${BUILD_NUMBER} \
|
||||
make -f docker.Makefile test-e2e-non-experimental"
|
||||
}
|
||||
}
|
||||
stage("e2e (experimental)") {
|
||||
steps {
|
||||
sh "E2E_UNIQUE_ID=clie2e${BUILD_NUMBER} \
|
||||
IMAGE_TAG=clie2e${BUILD_NUMBER} \
|
||||
make -f docker.Makefile test-e2e-experimental"
|
||||
}
|
||||
}
|
||||
stage("e2e (ssh connhelper)") {
|
||||
steps {
|
||||
sh "E2E_UNIQUE_ID=clie2e${BUILD_NUMBER} \
|
||||
IMAGE_TAG=clie2e${BUILD_NUMBER} \
|
||||
make -f docker.Makefile test-e2e-connhelper-ssh"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
69
Makefile
69
Makefile
|
@ -1,8 +1,11 @@
|
|||
#
|
||||
# github.com/docker/cli
|
||||
#
|
||||
all: binary
|
||||
|
||||
# Sets the name of the company that produced the windows binary.
|
||||
COMPANY_NAME ?=
|
||||
|
||||
all: binary
|
||||
|
||||
_:=$(shell ./scripts/warn-outside-container $(MAKECMDGOALS))
|
||||
|
||||
|
@ -10,58 +13,42 @@ _:=$(shell ./scripts/warn-outside-container $(MAKECMDGOALS))
|
|||
clean: ## remove build artifacts
|
||||
rm -rf ./build/* cli/winresources/rsrc_* ./man/man[1-9] docs/yaml/gen
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## run unit tests, to change the output format use: GOTESTSUM_FORMAT=(dots|short|standard-quiet|short-verbose|standard-verbose) make test-unit
|
||||
gotestsum $(TESTFLAGS) -- $${TESTDIRS:-$(shell go list ./... | grep -vE '/vendor/|/e2e/')}
|
||||
|
||||
.PHONY: test
|
||||
test: test-unit ## run tests
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## run unit tests, to change the output format use: GOTESTSUM_FORMAT=(dots|short|standard-quiet|short-verbose|standard-verbose) make test-unit
|
||||
gotestsum -- $${TESTDIRS:-$(shell go list ./... | grep -vE '/vendor/|/e2e/')} $(TESTFLAGS)
|
||||
|
||||
.PHONY: test-coverage
|
||||
test-coverage: ## run test coverage
|
||||
gotestsum -- -coverprofile=coverage.txt $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
||||
mkdir -p $(CURDIR)/build/coverage
|
||||
gotestsum -- $(shell go list ./... | grep -vE '/vendor/|/e2e/') -coverprofile=$(CURDIR)/build/coverage/coverage.txt
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## run all the lint tools
|
||||
golangci-lint run
|
||||
|
||||
.PHONY: shellcheck
|
||||
shellcheck: ## run shellcheck validation
|
||||
find scripts/ contrib/completion/bash -type f | grep -v scripts/winresources | grep -v '.*.ps1' | xargs shellcheck
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
go list -f {{.Dir}} ./... | xargs gofmt -w -s -d
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## run all the lint tools
|
||||
gometalinter --config gometalinter.json ./...
|
||||
|
||||
.PHONY: binary
|
||||
binary: ## build executable for Linux
|
||||
@echo "WARNING: binary creates a Linux executable. Use cross for macOS or Windows."
|
||||
./scripts/build/binary
|
||||
|
||||
.PHONY: dynbinary
|
||||
dynbinary: ## build dynamically linked binary
|
||||
GO_LINKMODE=dynamic ./scripts/build/binary
|
||||
|
||||
.PHONY: plugins
|
||||
plugins: ## build example CLI plugins
|
||||
./scripts/build/plugins
|
||||
|
||||
.PHONY: cross
|
||||
cross: ## build executable for macOS and Windows
|
||||
./scripts/build/cross
|
||||
|
||||
.PHONY: binary-windows
|
||||
binary-windows: ## build executable for Windows
|
||||
./scripts/build/windows
|
||||
|
||||
.PHONY: plugins-windows
|
||||
plugins-windows: ## build example CLI plugins for Windows
|
||||
./scripts/build/plugins-windows
|
||||
|
||||
.PHONY: binary-osx
|
||||
binary-osx: ## build executable for macOS
|
||||
./scripts/build/osx
|
||||
|
||||
.PHONY: plugins-osx
|
||||
plugins-osx: ## build example CLI plugins for macOS
|
||||
./scripts/build/plugins-osx
|
||||
|
||||
.PHONY: dynbinary
|
||||
dynbinary: ## build dynamically linked binary
|
||||
./scripts/build/dynbinary
|
||||
|
||||
vendor: vendor.conf ## check that vendor matches vendor.conf
|
||||
rm -rf vendor
|
||||
bash -c 'vndr |& grep -v -i clone | tee ./vndr.log'
|
||||
|
@ -80,24 +67,12 @@ manpages: ## generate man pages from go source and markdown
|
|||
yamldocs: ## generate documentation YAML files consumed by docs repo
|
||||
scripts/docs/generate-yaml.sh
|
||||
|
||||
.PHONY: shellcheck
|
||||
shellcheck: ## run shellcheck validation
|
||||
scripts/validate/shellcheck
|
||||
|
||||
.PHONY: help
|
||||
help: ## print this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
|
||||
cli/compose/schema/bindata.go: cli/compose/schema/data/*.json
|
||||
go generate github.com/docker/cli/cli/compose/schema
|
||||
|
||||
compose-jsonschema: cli/compose/schema/bindata.go ## generate compose-file schemas
|
||||
scripts/validate/check-git-diff cli/compose/schema/bindata.go
|
||||
|
||||
.PHONY: ci-validate
|
||||
ci-validate:
|
||||
time make -B vendor
|
||||
time make -B compose-jsonschema
|
||||
time make manpages
|
||||
time make yamldocs
|
||||
|
|
64
README.md
64
README.md
|
@ -1,57 +1,73 @@
|
|||
[![build status](https://circleci.com/gh/docker/cli.svg?style=shield)](https://circleci.com/gh/docker/cli/tree/master)
|
||||
[![Build Status](https://ci.docker.com/public/job/cli/job/master/badge/icon)](https://ci.docker.com/public/job/cli/job/master)
|
||||
# Docker CLI
|
||||
|
||||
docker/cli
|
||||
==========
|
||||
[![PkgGoDev](https://img.shields.io/badge/go.dev-docs-007d9c?logo=go&logoColor=white)](https://pkg.go.dev/github.com/docker/cli)
|
||||
[![Build Status](https://img.shields.io/github/workflow/status/docker/cli/build?logo=github)](https://github.com/docker/cli/actions?query=workflow%3Abuild)
|
||||
[![Test Status](https://img.shields.io/github/workflow/status/docker/cli/test?logo=github)](https://github.com/docker/cli/actions?query=workflow%3Atest)
|
||||
[![CircleCI Status](https://img.shields.io/circleci/build/github/docker/cli/master?logo=circleci)](https://circleci.com/gh/docker/cli/tree/master)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/docker/cli)](https://goreportcard.com/report/github.com/docker/cli)
|
||||
[![Codecov](https://codecov.io/gh/docker/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/docker/cli)
|
||||
|
||||
## About
|
||||
|
||||
This repository is the home of the cli used in the Docker CE and
|
||||
Docker EE products.
|
||||
|
||||
Development
|
||||
===========
|
||||
## Development
|
||||
|
||||
`docker/cli` is developed using Docker.
|
||||
|
||||
Build a linux binary:
|
||||
Build CLI from source:
|
||||
|
||||
```
|
||||
$ make -f docker.Makefile binary
|
||||
```shell
|
||||
docker buildx bake
|
||||
```
|
||||
|
||||
Build binaries for all supported platforms:
|
||||
|
||||
```shell
|
||||
docker buildx bake cross
|
||||
```
|
||||
$ make -f docker.Makefile cross
|
||||
|
||||
Build for a specific platform:
|
||||
|
||||
```shell
|
||||
docker buildx bake --set binary.platform=linux/arm64
|
||||
```
|
||||
|
||||
Build dynamic binary for glibc or musl:
|
||||
|
||||
```shell
|
||||
USE_GLIBC=1 docker buildx bake dynbinary
|
||||
```
|
||||
|
||||
Run all linting:
|
||||
|
||||
```shell
|
||||
docker buildx bake lint shellcheck
|
||||
```
|
||||
$ make -f docker.Makefile lint
|
||||
|
||||
Run test:
|
||||
|
||||
```shell
|
||||
docker buildx bake test
|
||||
```
|
||||
|
||||
List all the available targets:
|
||||
|
||||
```
|
||||
$ make help
|
||||
```shell
|
||||
make help
|
||||
```
|
||||
|
||||
### In-container development environment
|
||||
|
||||
Start an interactive development environment:
|
||||
|
||||
```
|
||||
$ make -f docker.Makefile shell
|
||||
```shell
|
||||
make -f docker.Makefile shell
|
||||
```
|
||||
|
||||
In the development environment you can run many tasks, including build binaries:
|
||||
## Legal
|
||||
|
||||
```
|
||||
$ make binary
|
||||
```
|
||||
|
||||
Legal
|
||||
=====
|
||||
*Brought to you courtesy of our legal counsel. For more context,
|
||||
please see the [NOTICE](https://github.com/docker/cli/blob/master/NOTICE) document in this repo.*
|
||||
|
||||
|
@ -63,8 +79,8 @@ violate applicable laws.
|
|||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
Licensing
|
||||
=========
|
||||
## Licensing
|
||||
|
||||
docker/cli is licensed under the Apache License, Version 2.0. See
|
||||
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
|
||||
license text.
|
||||
|
|
23
appveyor.yml
23
appveyor.yml
|
@ -1,23 +0,0 @@
|
|||
version: "{build}"
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\docker\cli
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.13.15
|
||||
DEPVERSION: v0.4.1
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi
|
||||
- msiexec /i go%GOVERSION%.windows-amd64.msi /q
|
||||
- go version
|
||||
- go env
|
||||
|
||||
deploy: false
|
||||
|
||||
build_script:
|
||||
- ps: .\scripts\make.ps1 -Binary
|
||||
|
||||
test_script:
|
||||
- ps: .\scripts\make.ps1 -TestUnit
|
|
@ -58,11 +58,8 @@ func setupCommonRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *p
|
|||
// SetupRootCommand sets default usage, help, and error handling for the
|
||||
// root command.
|
||||
func SetupRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *pflag.FlagSet, *cobra.Command) {
|
||||
opts, flags, helpCmd := setupCommonRootCommand(rootCmd)
|
||||
|
||||
rootCmd.SetVersionTemplate("Docker version {{.Version}}\n")
|
||||
|
||||
return opts, flags, helpCmd
|
||||
return setupCommonRootCommand(rootCmd)
|
||||
}
|
||||
|
||||
// SetupPluginRootCommand sets default usage, help and error handling for a plugin root command.
|
||||
|
|
|
@ -255,7 +255,7 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
|
|||
if tlsconfig.IsErrEncryptedKey(err) {
|
||||
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
|
||||
newClient := func(password string) (client.APIClient, error) {
|
||||
cli.dockerEndpoint.TLSPassword = password
|
||||
cli.dockerEndpoint.TLSPassword = password //nolint: staticcheck // SA1019: cli.dockerEndpoint.TLSPassword is deprecated
|
||||
return newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile)
|
||||
}
|
||||
cli.client, err = getClientWithPassword(passRetriever, newClient)
|
||||
|
|
|
@ -7,8 +7,10 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
|
@ -19,7 +21,6 @@ import (
|
|||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/env"
|
||||
"gotest.tools/v3/fs"
|
||||
)
|
||||
|
@ -30,42 +31,58 @@ func TestNewAPIClientFromFlags(t *testing.T) {
|
|||
host = "npipe://./"
|
||||
}
|
||||
opts := &flags.CommonOptions{Hosts: []string{host}}
|
||||
configFile := &configfile.ConfigFile{
|
||||
HTTPHeaders: map[string]string{
|
||||
"My-Header": "Custom-Value",
|
||||
},
|
||||
}
|
||||
apiclient, err := NewAPIClientFromFlags(opts, configFile)
|
||||
apiClient, err := NewAPIClientFromFlags(opts, &configfile.ConfigFile{})
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(host, apiclient.DaemonHost()))
|
||||
|
||||
expectedHeaders := map[string]string{
|
||||
"My-Header": "Custom-Value",
|
||||
"User-Agent": UserAgent(),
|
||||
}
|
||||
assert.Check(t, is.DeepEqual(expectedHeaders, apiclient.(*client.Client).CustomHTTPHeaders()))
|
||||
assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion()))
|
||||
assert.DeepEqual(t, configFile.HTTPHeaders, map[string]string{"My-Header": "Custom-Value"})
|
||||
assert.Equal(t, apiClient.DaemonHost(), host)
|
||||
assert.Equal(t, apiClient.ClientVersion(), api.DefaultVersion)
|
||||
}
|
||||
|
||||
func TestNewAPIClientFromFlagsForDefaultSchema(t *testing.T) {
|
||||
host := ":2375"
|
||||
slug := "tcp://localhost"
|
||||
if runtime.GOOS == "windows" {
|
||||
slug = "tcp://127.0.0.1"
|
||||
}
|
||||
opts := &flags.CommonOptions{Hosts: []string{host}}
|
||||
apiClient, err := NewAPIClientFromFlags(opts, &configfile.ConfigFile{})
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, apiClient.DaemonHost(), slug+host)
|
||||
assert.Equal(t, apiClient.ClientVersion(), api.DefaultVersion)
|
||||
}
|
||||
|
||||
func TestNewAPIClientFromFlagsWithCustomHeaders(t *testing.T) {
|
||||
var received map[string]string
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
received = map[string]string{
|
||||
"My-Header": r.Header.Get("My-Header"),
|
||||
"User-Agent": r.Header.Get("User-Agent"),
|
||||
}
|
||||
_, _ = w.Write([]byte("OK"))
|
||||
}))
|
||||
defer ts.Close()
|
||||
host := strings.Replace(ts.URL, "http://", "tcp://", 1)
|
||||
opts := &flags.CommonOptions{Hosts: []string{host}}
|
||||
configFile := &configfile.ConfigFile{
|
||||
HTTPHeaders: map[string]string{
|
||||
"My-Header": "Custom-Value",
|
||||
},
|
||||
}
|
||||
apiclient, err := NewAPIClientFromFlags(opts, configFile)
|
||||
|
||||
apiClient, err := NewAPIClientFromFlags(opts, configFile)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal("tcp://localhost"+host, apiclient.DaemonHost()))
|
||||
assert.Equal(t, apiClient.DaemonHost(), host)
|
||||
assert.Equal(t, apiClient.ClientVersion(), api.DefaultVersion)
|
||||
|
||||
// verify User-Agent is not appended to the configfile. see https://github.com/docker/cli/pull/2756
|
||||
assert.DeepEqual(t, configFile.HTTPHeaders, map[string]string{"My-Header": "Custom-Value"})
|
||||
|
||||
expectedHeaders := map[string]string{
|
||||
"My-Header": "Custom-Value",
|
||||
"User-Agent": UserAgent(),
|
||||
}
|
||||
assert.Check(t, is.DeepEqual(expectedHeaders, apiclient.(*client.Client).CustomHTTPHeaders()))
|
||||
assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion()))
|
||||
_, err = apiClient.Ping(context.Background())
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, received, expectedHeaders)
|
||||
}
|
||||
|
||||
func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) {
|
||||
|
@ -77,25 +94,7 @@ func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) {
|
|||
configFile := &configfile.ConfigFile{}
|
||||
apiclient, err := NewAPIClientFromFlags(opts, configFile)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(customVersion, apiclient.ClientVersion()))
|
||||
}
|
||||
|
||||
func TestNewAPIClientFromFlagsWithHttpProxyEnv(t *testing.T) {
|
||||
defer env.Patch(t, "HTTP_PROXY", "http://proxy.acme.com:1234")()
|
||||
defer env.Patch(t, "DOCKER_HOST", "tcp://docker.acme.com:2376")()
|
||||
|
||||
opts := &flags.CommonOptions{}
|
||||
configFile := &configfile.ConfigFile{}
|
||||
apiclient, err := NewAPIClientFromFlags(opts, configFile)
|
||||
assert.NilError(t, err)
|
||||
transport, ok := apiclient.HTTPClient().Transport.(*http.Transport)
|
||||
assert.Assert(t, ok)
|
||||
assert.Assert(t, transport.Proxy != nil)
|
||||
request, err := http.NewRequest(http.MethodGet, "tcp://docker.acme.com:2376", nil)
|
||||
assert.NilError(t, err)
|
||||
url, err := transport.Proxy(request)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal("http://proxy.acme.com:1234", url.String()))
|
||||
assert.Equal(t, apiclient.ClientVersion(), customVersion)
|
||||
}
|
||||
|
||||
type fakeClient struct {
|
||||
|
@ -161,8 +160,8 @@ func TestInitializeFromClient(t *testing.T) {
|
|||
|
||||
cli := &DockerCli{client: apiclient}
|
||||
cli.initializeFromClient()
|
||||
assert.Check(t, is.DeepEqual(testcase.expectedServer, cli.serverInfo))
|
||||
assert.Check(t, is.Equal(testcase.negotiated, apiclient.negotiated))
|
||||
assert.DeepEqual(t, cli.serverInfo, testcase.expectedServer)
|
||||
assert.Equal(t, apiclient.negotiated, testcase.negotiated)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -205,7 +204,7 @@ func TestExperimentalCLI(t *testing.T) {
|
|||
err := cli.Initialize(flags.NewClientOptions())
|
||||
assert.NilError(t, err)
|
||||
// For backward-compatibility, HasExperimental will always be "true"
|
||||
assert.Check(t, is.Equal(true, cli.ClientInfo().HasExperimental))
|
||||
assert.Equal(t, cli.ClientInfo().HasExperimental, true)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -97,7 +97,7 @@ func runAttach(dockerCli command.Cli, opts *attachOptions) error {
|
|||
}
|
||||
|
||||
if opts.proxy && !c.Config.Tty {
|
||||
sigc := notfiyAllSignals()
|
||||
sigc := notifyAllSignals()
|
||||
go ForwardAllSignals(ctx, dockerCli, opts.container, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package container
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -12,6 +13,8 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/morikuni/aec"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -21,6 +24,7 @@ type copyOptions struct {
|
|||
destination string
|
||||
followLink bool
|
||||
copyUIDGID bool
|
||||
quiet bool
|
||||
}
|
||||
|
||||
type copyDirection int
|
||||
|
@ -34,11 +38,38 @@ const (
|
|||
type cpConfig struct {
|
||||
followLink bool
|
||||
copyUIDGID bool
|
||||
quiet bool
|
||||
sourcePath string
|
||||
destPath string
|
||||
container string
|
||||
}
|
||||
|
||||
// copyProgressPrinter wraps io.ReadCloser to print progress information when
|
||||
// copying files to/from a container.
|
||||
type copyProgressPrinter struct {
|
||||
io.ReadCloser
|
||||
toContainer bool
|
||||
total *float64
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (pt *copyProgressPrinter) Read(p []byte) (int, error) {
|
||||
n, err := pt.ReadCloser.Read(p)
|
||||
*pt.total += float64(n)
|
||||
|
||||
if err == nil {
|
||||
fmt.Fprint(pt.writer, aec.Restore)
|
||||
fmt.Fprint(pt.writer, aec.EraseLine(aec.EraseModes.All))
|
||||
if pt.toContainer {
|
||||
fmt.Fprintln(pt.writer, "Copying to container - "+units.HumanSize(*pt.total))
|
||||
} else {
|
||||
fmt.Fprintln(pt.writer, "Copying from container - "+units.HumanSize(*pt.total))
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// NewCopyCommand creates a new `docker cp` command
|
||||
func NewCopyCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts copyOptions
|
||||
|
@ -64,6 +95,10 @@ func NewCopyCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
opts.source = args[0]
|
||||
opts.destination = args[1]
|
||||
if !cmd.Flag("quiet").Changed {
|
||||
// User did not specify "quiet" flag; suppress output if no terminal is attached
|
||||
opts.quiet = !dockerCli.Out().IsTerminal()
|
||||
}
|
||||
return runCopy(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
@ -71,6 +106,7 @@ func NewCopyCommand(dockerCli command.Cli) *cobra.Command {
|
|||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH")
|
||||
flags.BoolVarP(&opts.copyUIDGID, "archive", "a", false, "Archive mode (copy all uid/gid information)")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress progress output during copy. Progress output is automatically suppressed if no terminal is attached")
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -81,6 +117,7 @@ func runCopy(dockerCli command.Cli, opts copyOptions) error {
|
|||
copyConfig := cpConfig{
|
||||
followLink: opts.followLink,
|
||||
copyUIDGID: opts.copyUIDGID,
|
||||
quiet: opts.quiet,
|
||||
sourcePath: srcPath,
|
||||
destPath: destPath,
|
||||
}
|
||||
|
@ -171,12 +208,34 @@ func copyFromContainer(ctx context.Context, dockerCli command.Cli, copyConfig cp
|
|||
RebaseName: rebaseName,
|
||||
}
|
||||
|
||||
var copiedSize float64
|
||||
if !copyConfig.quiet {
|
||||
content = ©ProgressPrinter{
|
||||
ReadCloser: content,
|
||||
toContainer: false,
|
||||
writer: dockerCli.Err(),
|
||||
total: &copiedSize,
|
||||
}
|
||||
}
|
||||
|
||||
preArchive := content
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
_, srcBase := archive.SplitPathDirEntry(srcInfo.Path)
|
||||
preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName)
|
||||
}
|
||||
|
||||
if copyConfig.quiet {
|
||||
return archive.CopyTo(preArchive, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
fmt.Fprint(dockerCli.Err(), aec.Save)
|
||||
fmt.Fprintln(dockerCli.Err(), "Preparing to copy...")
|
||||
res := archive.CopyTo(preArchive, srcInfo, dstPath)
|
||||
fmt.Fprint(dockerCli.Err(), aec.Restore)
|
||||
fmt.Fprint(dockerCli.Err(), aec.EraseLine(aec.EraseModes.All))
|
||||
fmt.Fprintln(dockerCli.Err(), "Successfully copied", units.HumanSize(copiedSize), "to", dstPath)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// In order to get the copy behavior right, we need to know information
|
||||
|
@ -229,8 +288,9 @@ func copyToContainer(ctx context.Context, dockerCli command.Cli, copyConfig cpCo
|
|||
}
|
||||
|
||||
var (
|
||||
content io.Reader
|
||||
content io.ReadCloser
|
||||
resolvedDstPath string
|
||||
copiedSize float64
|
||||
)
|
||||
|
||||
if srcPath == "-" {
|
||||
|
@ -272,13 +332,33 @@ func copyToContainer(ctx context.Context, dockerCli command.Cli, copyConfig cpCo
|
|||
|
||||
resolvedDstPath = dstDir
|
||||
content = preparedArchive
|
||||
if !copyConfig.quiet {
|
||||
content = ©ProgressPrinter{
|
||||
ReadCloser: content,
|
||||
toContainer: true,
|
||||
writer: dockerCli.Err(),
|
||||
total: &copiedSize,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
options := types.CopyToContainerOptions{
|
||||
AllowOverwriteDirWithFile: false,
|
||||
CopyUIDGID: copyConfig.copyUIDGID,
|
||||
}
|
||||
|
||||
if copyConfig.quiet {
|
||||
return client.CopyToContainer(ctx, copyConfig.container, resolvedDstPath, content, options)
|
||||
}
|
||||
|
||||
fmt.Fprint(dockerCli.Err(), aec.Save)
|
||||
fmt.Fprintln(dockerCli.Err(), "Preparing to copy...")
|
||||
res := client.CopyToContainer(ctx, copyConfig.container, resolvedDstPath, content, options)
|
||||
fmt.Fprint(dockerCli.Err(), aec.Restore)
|
||||
fmt.Fprint(dockerCli.Err(), aec.EraseLine(aec.EraseModes.All))
|
||||
fmt.Fprintln(dockerCli.Err(), "Successfully copied", units.HumanSize(copiedSize), "to", copyConfig.container+":"+dstInfo.Path)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be
|
||||
|
|
|
@ -77,7 +77,7 @@ func TestRunCopyFromContainerToFilesystem(t *testing.T) {
|
|||
return readCloser, types.ContainerPathStat{}, err
|
||||
},
|
||||
}
|
||||
options := copyOptions{source: "container:/path", destination: destDir.Path()}
|
||||
options := copyOptions{source: "container:/path", destination: destDir.Path(), quiet: true}
|
||||
cli := test.NewFakeCli(fakeClient)
|
||||
err := runCopy(cli, options)
|
||||
assert.NilError(t, err)
|
||||
|
|
|
@ -133,7 +133,7 @@ func TestCreateContainerImagePullPolicy(t *testing.T) {
|
|||
return ioutil.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
infoFunc: func() (types.Info, error) {
|
||||
return types.Info{IndexServerAddress: "http://indexserver"}, nil
|
||||
return types.Info{IndexServerAddress: "https://indexserver.example.com"}, nil
|
||||
},
|
||||
}
|
||||
cli := test.NewFakeCli(client)
|
||||
|
@ -277,6 +277,8 @@ func TestCreateContainerWithProxyConfig(t *testing.T) {
|
|||
"no_proxy=noProxy",
|
||||
"FTP_PROXY=ftpProxy",
|
||||
"ftp_proxy=ftpProxy",
|
||||
"ALL_PROXY=allProxy",
|
||||
"all_proxy=allProxy",
|
||||
}
|
||||
sort.Strings(expected)
|
||||
|
||||
|
@ -299,6 +301,7 @@ func TestCreateContainerWithProxyConfig(t *testing.T) {
|
|||
HTTPSProxy: "httpsProxy",
|
||||
NoProxy: "noProxy",
|
||||
FTPProxy: "ftpProxy",
|
||||
AllProxy: "allProxy",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -184,7 +183,7 @@ func addFlags(flags *pflag.FlagSet) *containerOptions {
|
|||
flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels")
|
||||
flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only")
|
||||
flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits")
|
||||
flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, "Signal to stop a container")
|
||||
flags.StringVar(&copts.stopSignal, "stop-signal", "", "Signal to stop the container")
|
||||
flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container")
|
||||
flags.SetAnnotation("stop-timeout", "version", []string{"1.25"})
|
||||
flags.Var(copts.sysctls, "sysctl", "Sysctl options")
|
||||
|
@ -600,11 +599,9 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
|||
Entrypoint: entrypoint,
|
||||
WorkingDir: copts.workingDir,
|
||||
Labels: opts.ConvertKVStringsToMap(labels),
|
||||
StopSignal: copts.stopSignal,
|
||||
Healthcheck: healthConfig,
|
||||
}
|
||||
if flags.Changed("stop-signal") {
|
||||
config.StopSignal = copts.stopSignal
|
||||
}
|
||||
if flags.Changed("stop-timeout") {
|
||||
config.StopTimeout = &copts.stopTimeout
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package container
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
|
@ -61,7 +62,7 @@ func runPort(dockerCli command.Cli, opts *portOptions) error {
|
|||
}
|
||||
if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil {
|
||||
for _, frontend := range frontends {
|
||||
fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort)
|
||||
fmt.Fprintln(dockerCli.Out(), net.JoinHostPort(frontend.HostIP, frontend.HostPort))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -70,7 +71,7 @@ func runPort(dockerCli command.Cli, opts *portOptions) error {
|
|||
|
||||
for from, frontends := range c.NetworkSettings.Ports {
|
||||
for _, frontend := range frontends {
|
||||
fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort)
|
||||
fmt.Fprintf(dockerCli.Out(), "%s -> %s\n", from, net.JoinHostPort(frontend.HostIP, frontend.HostPort))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"gotest.tools/v3/assert"
|
||||
"gotest.tools/v3/golden"
|
||||
)
|
||||
|
||||
func TestNewPortCommandOutput(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
ips []string
|
||||
}{
|
||||
{
|
||||
name: "container-port-ipv4",
|
||||
ips: []string{"0.0.0.0"},
|
||||
},
|
||||
{
|
||||
name: "container-port-ipv6",
|
||||
ips: []string{"::"},
|
||||
},
|
||||
{
|
||||
name: "container-port-ipv6-and-ipv4",
|
||||
ips: []string{"::", "0.0.0.0"},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
inspectFunc: func(string) (types.ContainerJSON, error) {
|
||||
ci := types.ContainerJSON{NetworkSettings: &types.NetworkSettings{}}
|
||||
ci.NetworkSettings.Ports = nat.PortMap{
|
||||
"80/tcp": make([]nat.PortBinding, len(tc.ips)),
|
||||
}
|
||||
for i, ip := range tc.ips {
|
||||
ci.NetworkSettings.Ports["80/tcp"][i] = nat.PortBinding{
|
||||
HostIP: ip, HostPort: "3456",
|
||||
}
|
||||
}
|
||||
return ci, nil
|
||||
},
|
||||
}, test.EnableContentTrust)
|
||||
cmd := NewPortCommand(cli)
|
||||
cmd.SetErr(ioutil.Discard)
|
||||
cmd.SetArgs([]string{"some_container", "80"})
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, cli.OutBuffer().String(), tc.name+".golden")
|
||||
})
|
||||
}
|
||||
}
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/moby/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -131,7 +131,7 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
|||
return runStartContainerErr(err)
|
||||
}
|
||||
if opts.sigProxy {
|
||||
sigc := notfiyAllSignals()
|
||||
sigc := notifyAllSignals()
|
||||
go ForwardAllSignals(ctx, dockerCli, createResponse.ID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
@ -214,32 +214,7 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
|||
return nil
|
||||
}
|
||||
|
||||
func attachContainer(
|
||||
ctx context.Context,
|
||||
dockerCli command.Cli,
|
||||
errCh *chan error,
|
||||
config *container.Config,
|
||||
containerID string,
|
||||
) (func(), error) {
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
var (
|
||||
out, cerr io.Writer
|
||||
in io.ReadCloser
|
||||
)
|
||||
if config.AttachStdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if config.AttachStdout {
|
||||
out = stdout
|
||||
}
|
||||
if config.AttachStderr {
|
||||
if config.Tty {
|
||||
cerr = stdout
|
||||
} else {
|
||||
cerr = stderr
|
||||
}
|
||||
}
|
||||
|
||||
func attachContainer(ctx context.Context, dockerCli command.Cli, errCh *chan error, config *container.Config, containerID string) (func(), error) {
|
||||
options := types.ContainerAttachOptions{
|
||||
Stream: true,
|
||||
Stdin: config.AttachStdin,
|
||||
|
@ -253,6 +228,24 @@ func attachContainer(
|
|||
return nil, errAttach
|
||||
}
|
||||
|
||||
var (
|
||||
out, cerr io.Writer
|
||||
in io.ReadCloser
|
||||
)
|
||||
if config.AttachStdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if config.AttachStdout {
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if config.AttachStderr {
|
||||
if config.Tty {
|
||||
cerr = dockerCli.Out()
|
||||
} else {
|
||||
cerr = dockerCli.Err()
|
||||
}
|
||||
}
|
||||
|
||||
ch := make(chan error, 1)
|
||||
*errCh = ch
|
||||
|
||||
|
@ -284,7 +277,7 @@ func reportError(stderr io.Writer, name string, str string, withHelp bool) {
|
|||
if withHelp {
|
||||
str += "\nSee 'docker " + name + " --help'."
|
||||
}
|
||||
fmt.Fprintln(stderr, "docker:", str)
|
||||
_, _ = fmt.Fprintln(stderr, "docker:", str)
|
||||
}
|
||||
|
||||
// if container start fails with 'not found'/'no such' error, return 127
|
||||
|
|
|
@ -2,12 +2,11 @@ package container
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -15,10 +14,16 @@ import (
|
|||
//
|
||||
// The channel you pass in must already be setup to receive any signals you want to forward.
|
||||
func ForwardAllSignals(ctx context.Context, cli command.Cli, cid string, sigc <-chan os.Signal) {
|
||||
var s os.Signal
|
||||
var (
|
||||
s os.Signal
|
||||
ok bool
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case s = <-sigc:
|
||||
case s, ok = <-sigc:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
@ -40,7 +45,6 @@ func ForwardAllSignals(ctx context.Context, cli command.Cli, cid string, sigc <-
|
|||
}
|
||||
}
|
||||
if sig == "" {
|
||||
fmt.Fprintf(cli.Err(), "Unsupported signal: %v. Discarding.\n", s)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -50,7 +54,7 @@ func ForwardAllSignals(ctx context.Context, cli command.Cli, cid string, sigc <-
|
|||
}
|
||||
}
|
||||
|
||||
func notfiyAllSignals() chan os.Signal {
|
||||
func notifyAllSignals() chan os.Signal {
|
||||
sigc := make(chan os.Signal, 128)
|
||||
gosignal.Notify(sigc)
|
||||
return sigc
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/moby/sys/signal"
|
||||
)
|
||||
|
||||
func TestForwardSignals(t *testing.T) {
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// +build !windows
|
||||
|
||||
package container
|
||||
|
||||
import (
|
|
@ -1,3 +1,5 @@
|
|||
// +build !windows
|
||||
|
||||
package container
|
||||
|
||||
import (
|
|
@ -1,5 +1,3 @@
|
|||
// +build !linux
|
||||
|
||||
package container
|
||||
|
||||
import "os"
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/moby/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -74,8 +74,8 @@ func runStart(dockerCli command.Cli, opts *startOptions) error {
|
|||
|
||||
// We always use c.ID instead of container to maintain consistency during `docker start`
|
||||
if !c.Config.Tty {
|
||||
sigc := notfiyAllSignals()
|
||||
ForwardAllSignals(ctx, dockerCli, c.ID, sigc)
|
||||
sigc := notifyAllSignals()
|
||||
go ForwardAllSignals(ctx, dockerCli, c.ID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
0.0.0.0:3456
|
|
@ -0,0 +1,2 @@
|
|||
[::]:3456
|
||||
0.0.0.0:3456
|
|
@ -0,0 +1 @@
|
|||
[::]:3456
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
@ -62,8 +62,11 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
&opts.DefaultStackOrchestrator,
|
||||
"default-stack-orchestrator", "",
|
||||
"Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)")
|
||||
flags.SetAnnotation("default-stack-orchestrator", "deprecated", nil)
|
||||
flags.StringToStringVar(&opts.Docker, "docker", nil, "set the docker endpoint")
|
||||
flags.StringToStringVar(&opts.Kubernetes, "kubernetes", nil, "set the kubernetes endpoint")
|
||||
flags.SetAnnotation("kubernetes", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubernetes", "deprecated", nil)
|
||||
flags.StringVar(&opts.From, "from", "", "create context from a named context")
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ func validateTestKubeEndpoint(t *testing.T, s store.Reader, name string) {
|
|||
kubeMeta := ctxMetadata.Endpoints[kubernetes.KubernetesEndpoint].(kubernetes.EndpointMeta)
|
||||
kubeEP, err := kubeMeta.WithTLSData(s, name)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "https://someserver", kubeEP.Host)
|
||||
assert.Equal(t, "https://someserver.example.com", kubeEP.Host)
|
||||
assert.Equal(t, "the-ca", string(kubeEP.TLSData.CA))
|
||||
assert.Equal(t, "the-cert", string(kubeEP.TLSData.Cert))
|
||||
assert.Equal(t, "the-key", string(kubeEP.TLSData.Key))
|
||||
|
@ -287,7 +287,7 @@ func TestCreateFromContext(t *testing.T) {
|
|||
assert.Equal(t, newContextTyped.Description, c.expectedDescription)
|
||||
assert.Equal(t, newContextTyped.StackOrchestrator, c.expectedOrchestrator)
|
||||
assert.Equal(t, dockerEndpoint.Host, "tcp://42.42.42.42:2375")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver.example.com")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -361,7 +361,7 @@ func TestCreateFromCurrent(t *testing.T) {
|
|||
assert.Equal(t, newContextTyped.Description, c.expectedDescription)
|
||||
assert.Equal(t, newContextTyped.StackOrchestrator, c.expectedOrchestrator)
|
||||
assert.Equal(t, dockerEndpoint.Host, "tcp://42.42.42.42:2375")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver.example.com")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,6 +46,8 @@ func newExportCommand(dockerCli command.Cli) *cobra.Command {
|
|||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.Kubeconfig, "kubeconfig", false, "Export as a kubeconfig file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "deprecated", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ func createTestContextWithKubeAndSwarm(t *testing.T, cli command.Cli, name strin
|
|||
DefaultStackOrchestrator: orchestrator,
|
||||
Description: "description of " + name,
|
||||
Kubernetes: map[string]string{keyFrom: "default"},
|
||||
Docker: map[string]string{keyHost: "https://someswarmserver"},
|
||||
Docker: map[string]string{keyHost: "https://someswarmserver.example.com"},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
|
|
@ -7,11 +7,11 @@
|
|||
},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "https://someswarmserver",
|
||||
"Host": "https://someswarmserver.example.com",
|
||||
"SkipTLSVerify": false
|
||||
},
|
||||
"kubernetes": {
|
||||
"Host": "https://someserver",
|
||||
"Host": "https://someserver.example.com",
|
||||
"SkipTLSVerify": false,
|
||||
"DefaultNamespace": "default"
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
current * description of current https://someswarmserver https://someserver (default) all
|
||||
current * description of current https://someswarmserver.example.com https://someserver.example.com (default) all
|
||||
default Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm
|
||||
other description of other https://someswarmserver https://someserver (default) all
|
||||
unset description of unset https://someswarmserver https://someserver (default)
|
||||
other description of other https://someswarmserver.example.com https://someserver.example.com (default) all
|
||||
unset description of unset https://someswarmserver.example.com https://someserver.example.com (default)
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
server: https://someserver.example.com
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
|
|
|
@ -61,8 +61,11 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
|||
&opts.DefaultStackOrchestrator,
|
||||
"default-stack-orchestrator", "",
|
||||
"Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)")
|
||||
flags.SetAnnotation("default-stack-orchestrator", "deprecated", nil)
|
||||
flags.StringToStringVar(&opts.Docker, "docker", nil, "set the docker endpoint")
|
||||
flags.StringToStringVar(&opts.Kubernetes, "kubernetes", nil, "set the kubernetes endpoint")
|
||||
flags.SetAnnotation("kubernetes", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubernetes", "deprecated", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
@ -378,26 +378,32 @@ func AddDockerfileToBuildContext(dockerfileCtx io.ReadCloser, buildCtx io.ReadCl
|
|||
return nil, "", err
|
||||
}
|
||||
now := time.Now()
|
||||
hdrTmpl := &tar.Header{
|
||||
randomName := ".dockerfile." + stringid.GenerateRandomID()[:20]
|
||||
|
||||
buildCtx = archive.ReplaceFileTarWrapper(buildCtx, map[string]archive.TarModifierFunc{
|
||||
// Add the dockerfile with a random filename
|
||||
randomName: func(_ string, _ *tar.Header, _ io.Reader) (*tar.Header, []byte, error) {
|
||||
header := &tar.Header{
|
||||
Name: randomName,
|
||||
Mode: 0600,
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
ModTime: now,
|
||||
Typeflag: tar.TypeReg,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
}
|
||||
randomName := ".dockerfile." + stringid.GenerateRandomID()[:20]
|
||||
|
||||
buildCtx = archive.ReplaceFileTarWrapper(buildCtx, map[string]archive.TarModifierFunc{
|
||||
// Add the dockerfile with a random filename
|
||||
randomName: func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
return hdrTmpl, file, nil
|
||||
return header, file, nil
|
||||
},
|
||||
// Update .dockerignore to include the random filename
|
||||
".dockerignore": func(_ string, h *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
if h == nil {
|
||||
h = hdrTmpl
|
||||
h = &tar.Header{
|
||||
Name: ".dockerignore",
|
||||
Mode: 0600,
|
||||
ModTime: now,
|
||||
Typeflag: tar.TypeReg,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
}
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
|
|
|
@ -245,6 +245,8 @@ func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) {
|
|||
func createTestTempDir(t *testing.T, prefix string) (string, func()) {
|
||||
path, err := ioutil.TempDir("", prefix)
|
||||
assert.NilError(t, err)
|
||||
path, err = filepath.EvalSymlinks(path)
|
||||
assert.NilError(t, err)
|
||||
return path, func() { assert.NilError(t, os.RemoveAll(path)) }
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/moby/buildkit/session/secrets/secretsprovider"
|
||||
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/moby/buildkit/util/gitutil"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -186,10 +187,15 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
|||
}
|
||||
s.Allow(sp)
|
||||
}
|
||||
if len(options.ssh) > 0 {
|
||||
sshp, err := parseSSHSpecs(options.ssh)
|
||||
|
||||
sshSpecs := options.ssh
|
||||
if len(sshSpecs) == 0 && isGitSSH(remote) {
|
||||
sshSpecs = []string{"default"}
|
||||
}
|
||||
if len(sshSpecs) > 0 {
|
||||
sshp, err := parseSSHSpecs(sshSpecs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not parse ssh: %v", options.ssh)
|
||||
return errors.Wrapf(err, "could not parse ssh: %v", sshSpecs)
|
||||
}
|
||||
s.Allow(sshp)
|
||||
}
|
||||
|
@ -512,3 +518,8 @@ func parseSSH(value string) *sshprovider.AgentConfig {
|
|||
}
|
||||
return &cfg
|
||||
}
|
||||
|
||||
func isGitSSH(url string) bool {
|
||||
_, gitProtocol := gitutil.ParseProtocol(url)
|
||||
return gitProtocol == gitutil.SSHProtocol
|
||||
}
|
||||
|
|
|
@ -15,17 +15,17 @@ import (
|
|||
)
|
||||
|
||||
func TestENVTrustServer(t *testing.T) {
|
||||
defer env.PatchAll(t, map[string]string{"DOCKER_CONTENT_TRUST_SERVER": "https://notary-test.com:5000"})()
|
||||
defer env.PatchAll(t, map[string]string{"DOCKER_CONTENT_TRUST_SERVER": "https://notary-test.example.com:5000"})()
|
||||
indexInfo := ®istrytypes.IndexInfo{Name: "testserver"}
|
||||
output, err := trust.Server(indexInfo)
|
||||
expectedStr := "https://notary-test.com:5000"
|
||||
expectedStr := "https://notary-test.example.com:5000"
|
||||
if err != nil || output != expectedStr {
|
||||
t.Fatalf("Expected server to be %s, got %s", expectedStr, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPENVTrustServer(t *testing.T) {
|
||||
defer env.PatchAll(t, map[string]string{"DOCKER_CONTENT_TRUST_SERVER": "http://notary-test.com:5000"})()
|
||||
defer env.PatchAll(t, map[string]string{"DOCKER_CONTENT_TRUST_SERVER": "http://notary-test.example.com:5000"})()
|
||||
indexInfo := ®istrytypes.IndexInfo{Name: "testserver"}
|
||||
_, err := trust.Server(indexInfo)
|
||||
if err == nil {
|
||||
|
|
|
@ -63,17 +63,14 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf
|
|||
indexServer := registry.GetAuthConfigKey(index)
|
||||
isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli)
|
||||
authConfig, err := GetDefaultAuthConfig(cli, true, indexServer, isDefaultRegistry)
|
||||
if authConfig == nil {
|
||||
authConfig = &types.AuthConfig{}
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err)
|
||||
}
|
||||
err = ConfigureAuth(cli, "", "", authConfig, isDefaultRegistry)
|
||||
err = ConfigureAuth(cli, "", "", &authConfig, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return EncodeAuthToBase64(*authConfig)
|
||||
return EncodeAuthToBase64(authConfig)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -92,7 +89,7 @@ func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexI
|
|||
|
||||
// GetDefaultAuthConfig gets the default auth config given a serverAddress
|
||||
// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it
|
||||
func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (*types.AuthConfig, error) {
|
||||
func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) {
|
||||
if !isDefaultRegistry {
|
||||
serverAddress = registry.ConvertToHostname(serverAddress)
|
||||
}
|
||||
|
@ -101,13 +98,15 @@ func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, is
|
|||
if checkCredStore {
|
||||
authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return types.AuthConfig{
|
||||
ServerAddress: serverAddress,
|
||||
}, err
|
||||
}
|
||||
}
|
||||
authconfig.ServerAddress = serverAddress
|
||||
authconfig.IdentityToken = ""
|
||||
res := types.AuthConfig(authconfig)
|
||||
return &res, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ConfigureAuth handles prompting of user's username and password if needed
|
||||
|
|
|
@ -114,22 +114,19 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
|
|||
var response registrytypes.AuthenticateOKBody
|
||||
isDefaultRegistry := serverAddress == authServer
|
||||
authConfig, err := command.GetDefaultAuthConfig(dockerCli, opts.user == "" && opts.password == "", serverAddress, isDefaultRegistry)
|
||||
if authConfig == nil {
|
||||
authConfig = &types.AuthConfig{}
|
||||
}
|
||||
if err == nil && authConfig.Username != "" && authConfig.Password != "" {
|
||||
response, err = loginWithCredStoreCreds(ctx, dockerCli, authConfig)
|
||||
response, err = loginWithCredStoreCreds(ctx, dockerCli, &authConfig)
|
||||
}
|
||||
if err != nil || authConfig.Username == "" || authConfig.Password == "" {
|
||||
err = command.ConfigureAuth(dockerCli, opts.user, opts.password, authConfig, isDefaultRegistry)
|
||||
err = command.ConfigureAuth(dockerCli, opts.user, opts.password, &authConfig, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
response, err = clnt.RegistryLogin(ctx, *authConfig)
|
||||
response, err = clnt.RegistryLogin(ctx, authConfig)
|
||||
if err != nil && client.IsErrConnectionFailed(err) {
|
||||
// If the server isn't responding (yet) attempt to login purely client side
|
||||
response, err = loginClientSide(ctx, *authConfig)
|
||||
response, err = loginClientSide(ctx, authConfig)
|
||||
}
|
||||
// If we (still) have an error, give up
|
||||
if err != nil {
|
||||
|
@ -152,7 +149,7 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
|
|||
}
|
||||
}
|
||||
|
||||
if err := creds.Store(configtypes.AuthConfig(*authConfig)); err != nil {
|
||||
if err := creds.Store(configtypes.AuthConfig(authConfig)); err != nil {
|
||||
return errors.Errorf("Error saving credentials: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -66,10 +66,10 @@ func TestElectAuthServer(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
expectedAuthServer: "https://foo.bar",
|
||||
expectedAuthServer: "https://foo.example.com",
|
||||
expectedWarning: "",
|
||||
infoFunc: func() (types.Info, error) {
|
||||
return types.Info{IndexServerAddress: "https://foo.bar"}, nil
|
||||
return types.Info{IndexServerAddress: "https://foo.example.com"}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -145,7 +145,21 @@ func TestGetDefaultAuthConfig(t *testing.T) {
|
|||
assert.Check(t, is.Equal(tc.expectedErr, err.Error()))
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(tc.expectedAuthConfig, *authconfig))
|
||||
assert.Check(t, is.DeepEqual(tc.expectedAuthConfig, authconfig))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDefaultAuthConfig_HelperError(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{})
|
||||
errBuf := new(bytes.Buffer)
|
||||
cli.SetErr(errBuf)
|
||||
cli.ConfigFile().CredentialsStore = "fake-does-not-exist"
|
||||
serverAddress := "test-server-address"
|
||||
expectedAuthConfig := types.AuthConfig{
|
||||
ServerAddress: serverAddress,
|
||||
}
|
||||
authconfig, err := GetDefaultAuthConfig(cli, true, serverAddress, serverAddress == "https://index.docker.io/v1/")
|
||||
assert.Check(t, is.DeepEqual(expectedAuthConfig, authconfig))
|
||||
assert.Check(t, is.ErrorContains(err, "docker-credential-fake-does-not-exist"))
|
||||
}
|
||||
|
|
|
@ -67,13 +67,6 @@ func terminalState(state swarm.TaskState) bool {
|
|||
return numberedStates[state] > numberedStates[swarm.TaskStateRunning]
|
||||
}
|
||||
|
||||
func stateToProgress(state swarm.TaskState, rollback bool) int64 {
|
||||
if !rollback {
|
||||
return numberedStates[state]
|
||||
}
|
||||
return numberedStates[swarm.TaskStateRunning] - numberedStates[state]
|
||||
}
|
||||
|
||||
// ServiceProgress outputs progress information for convergence of a service.
|
||||
// nolint: gocyclo
|
||||
func ServiceProgress(ctx context.Context, client client.APIClient, serviceID string, progressWriter io.WriteCloser) error {
|
||||
|
@ -343,7 +336,7 @@ func (u *replicatedProgressUpdater) update(service swarm.Service, tasks []swarm.
|
|||
running++
|
||||
}
|
||||
|
||||
u.writeTaskProgress(task, mappedSlot, replicas, rollback)
|
||||
u.writeTaskProgress(task, mappedSlot, replicas)
|
||||
}
|
||||
|
||||
if !u.done {
|
||||
|
@ -389,7 +382,7 @@ func (u *replicatedProgressUpdater) tasksBySlot(tasks []swarm.Task, activeNodes
|
|||
return tasksBySlot
|
||||
}
|
||||
|
||||
func (u *replicatedProgressUpdater) writeTaskProgress(task swarm.Task, mappedSlot int, replicas uint64, rollback bool) {
|
||||
func (u *replicatedProgressUpdater) writeTaskProgress(task swarm.Task, mappedSlot int, replicas uint64) {
|
||||
if u.done || replicas > maxProgressBars || uint64(mappedSlot) > replicas {
|
||||
return
|
||||
}
|
||||
|
@ -406,7 +399,7 @@ func (u *replicatedProgressUpdater) writeTaskProgress(task swarm.Task, mappedSlo
|
|||
u.progressOut.WriteProgress(progress.Progress{
|
||||
ID: fmt.Sprintf("%d/%d", mappedSlot, replicas),
|
||||
Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State),
|
||||
Current: stateToProgress(task.Status.State, rollback),
|
||||
Current: numberedStates[task.Status.State],
|
||||
Total: maxProgress,
|
||||
HideCounts: true,
|
||||
})
|
||||
|
@ -463,7 +456,7 @@ func (u *globalProgressUpdater) update(service swarm.Service, tasks []swarm.Task
|
|||
running++
|
||||
}
|
||||
|
||||
u.writeTaskProgress(task, nodeCount, rollback)
|
||||
u.writeTaskProgress(task, nodeCount)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -507,7 +500,7 @@ func (u *globalProgressUpdater) tasksByNode(tasks []swarm.Task) map[string]swarm
|
|||
return tasksByNode
|
||||
}
|
||||
|
||||
func (u *globalProgressUpdater) writeTaskProgress(task swarm.Task, nodeCount int, rollback bool) {
|
||||
func (u *globalProgressUpdater) writeTaskProgress(task swarm.Task, nodeCount int) {
|
||||
if u.done || nodeCount > maxProgressBars {
|
||||
return
|
||||
}
|
||||
|
@ -524,7 +517,7 @@ func (u *globalProgressUpdater) writeTaskProgress(task swarm.Task, nodeCount int
|
|||
u.progressOut.WriteProgress(progress.Progress{
|
||||
ID: stringid.TruncateID(task.NodeID),
|
||||
Action: fmt.Sprintf("%-[1]*s", longestState, task.Status.State),
|
||||
Current: stateToProgress(task.Status.State, rollback),
|
||||
Current: numberedStates[task.Status.State],
|
||||
Total: maxProgress,
|
||||
HideCounts: true,
|
||||
})
|
||||
|
|
|
@ -69,7 +69,9 @@ func NewStackCommand(dockerCli command.Cli) *cobra.Command {
|
|||
flags := cmd.PersistentFlags()
|
||||
flags.String("kubeconfig", "", "Kubernetes config file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "deprecated", nil)
|
||||
flags.String("orchestrator", "", "Orchestrator to use (swarm|kubernetes|all)")
|
||||
flags.SetAnnotation("orchestrator", "deprecated", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ func NewOptions(flags *flag.FlagSet, orchestrator command.Orchestrator) Options
|
|||
func AddNamespaceFlag(flags *flag.FlagSet) {
|
||||
flags.String("namespace", "", "Kubernetes namespace to use")
|
||||
flags.SetAnnotation("namespace", "kubernetes", nil)
|
||||
flags.SetAnnotation("namespace", "deprecated", nil)
|
||||
}
|
||||
|
||||
// WrapCli wraps command.Cli with kubernetes specifics
|
||||
|
|
|
@ -30,8 +30,10 @@ func newListCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command
|
|||
flags.StringVar(&opts.Format, "format", "", "Pretty-print stacks using a Go template")
|
||||
flags.StringSliceVar(&opts.Namespaces, "namespace", []string{}, "Kubernetes namespaces to use")
|
||||
flags.SetAnnotation("namespace", "kubernetes", nil)
|
||||
flags.SetAnnotation("namespace", "deprecated", nil)
|
||||
flags.BoolVarP(&opts.AllNamespaces, "all-namespaces", "", false, "List stacks from all Kubernetes namespaces")
|
||||
flags.SetAnnotation("all-namespaces", "kubernetes", nil)
|
||||
flags.SetAnnotation("all-namespaces", "deprecated", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
|
|
@ -104,20 +104,20 @@ func TestDisplayTrustRootInvalidFlags(t *testing.T) {
|
|||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{
|
||||
args: []string{"--external-ca=protocol=cfssl,url=https://some.com/https/url"},
|
||||
args: []string{"--external-ca=protocol=cfssl,url=https://some.example.com/https/url"},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{ // to make sure we're not erroring because we didn't provide a CA cert and external CA
|
||||
args: []string{
|
||||
"--ca-cert=" + tmpfile,
|
||||
"--external-ca=protocol=cfssl,url=https://some.com/https/url",
|
||||
"--external-ca=protocol=cfssl,url=https://some.example.com/https/url",
|
||||
},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{
|
||||
args: []string{
|
||||
"--rotate",
|
||||
"--external-ca=protocol=cfssl,url=https://some.com/https/url",
|
||||
"--external-ca=protocol=cfssl,url=https://some.example.com/https/url",
|
||||
},
|
||||
errorMsg: "rotating to an external CA requires the `--ca-cert` flag to specify the external CA's cert - " +
|
||||
"to add an external CA with the current root CA certificate, use the `update` command instead",
|
||||
|
@ -243,7 +243,7 @@ func TestUpdateSwarmSpecCertAndExternalCA(t *testing.T) {
|
|||
"--rotate",
|
||||
"--detach",
|
||||
"--ca-cert=" + certfile,
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca"})
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca.example.com"})
|
||||
cmd.SetOut(cli.OutBuffer())
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
||||
|
@ -253,7 +253,7 @@ func TestUpdateSwarmSpecCertAndExternalCA(t *testing.T) {
|
|||
expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{
|
||||
{
|
||||
Protocol: swarm.ExternalCAProtocolCFSSL,
|
||||
URL: "https://some.external.ca",
|
||||
URL: "https://some.external.ca.example.com",
|
||||
CACert: cert,
|
||||
Options: make(map[string]string),
|
||||
},
|
||||
|
@ -281,7 +281,7 @@ func TestUpdateSwarmSpecCertAndKeyAndExternalCA(t *testing.T) {
|
|||
"--detach",
|
||||
"--ca-cert=" + certfile,
|
||||
"--ca-key=" + keyfile,
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca"})
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca.example.com"})
|
||||
cmd.SetOut(cli.OutBuffer())
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
||||
|
@ -291,7 +291,7 @@ func TestUpdateSwarmSpecCertAndKeyAndExternalCA(t *testing.T) {
|
|||
expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{
|
||||
{
|
||||
Protocol: swarm.ExternalCAProtocolCFSSL,
|
||||
URL: "https://some.external.ca",
|
||||
URL: "https://some.external.ca.example.com",
|
||||
CACert: cert,
|
||||
Options: make(map[string]string),
|
||||
},
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -37,7 +38,8 @@ func newDiskUsageCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error {
|
||||
du, err := dockerCli.Client().DiskUsage(context.Background())
|
||||
// TODO expose types.DiskUsageOptions.Types as flag on the command-line and/or as separate commands (docker container df / docker container usage)
|
||||
du, err := dockerCli.Client().DiskUsage(context.Background(), types.DiskUsageOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
@ -14,6 +16,7 @@ import (
|
|||
"github.com/docker/cli/templates"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -64,13 +67,6 @@ func NewInfoCommand(dockerCli command.Cli) *cobra.Command {
|
|||
func runInfo(cmd *cobra.Command, dockerCli command.Cli, opts *infoOptions) error {
|
||||
var info info
|
||||
|
||||
ctx := context.Background()
|
||||
if dinfo, err := dockerCli.Client().Info(ctx); err == nil {
|
||||
info.Info = &dinfo
|
||||
} else {
|
||||
info.ServerErrors = append(info.ServerErrors, err.Error())
|
||||
}
|
||||
|
||||
info.ClientInfo = &clientInfo{
|
||||
Context: dockerCli.CurrentContext(),
|
||||
Debug: debug.IsEnabled(),
|
||||
|
@ -81,19 +77,67 @@ func runInfo(cmd *cobra.Command, dockerCli command.Cli, opts *infoOptions) error
|
|||
info.ClientErrors = append(info.ClientErrors, err.Error())
|
||||
}
|
||||
|
||||
if needsServerInfo(opts.format, info) {
|
||||
ctx := context.Background()
|
||||
if dinfo, err := dockerCli.Client().Info(ctx); err == nil {
|
||||
info.Info = &dinfo
|
||||
} else {
|
||||
info.ServerErrors = append(info.ServerErrors, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if opts.format == "" {
|
||||
return prettyPrintInfo(dockerCli, info)
|
||||
}
|
||||
return formatInfo(dockerCli, info, opts.format)
|
||||
}
|
||||
|
||||
// placeHolders does a rudimentary match for possible placeholders in a
|
||||
// template, matching a '.', followed by an letter (a-z/A-Z).
|
||||
var placeHolders = regexp.MustCompile(`\.[a-zA-Z]`)
|
||||
|
||||
// needsServerInfo detects if the given template uses any server information.
|
||||
// If only client-side information is used in the template, we can skip
|
||||
// connecting to the daemon. This allows (e.g.) to only get cli-plugin
|
||||
// information, without also making a (potentially expensive) API call.
|
||||
func needsServerInfo(template string, info info) bool {
|
||||
if len(template) == 0 || placeHolders.FindString(template) == "" {
|
||||
// The template is empty, or does not contain formatting fields
|
||||
// (e.g. `table` or `raw` or `{{ json .}}`). Assume we need server-side
|
||||
// information to render it.
|
||||
return true
|
||||
}
|
||||
|
||||
// A template is provided and has at least one field set.
|
||||
tmpl, err := templates.NewParse("", template)
|
||||
if err != nil {
|
||||
// ignore parsing errors here, and let regular code handle them
|
||||
return true
|
||||
}
|
||||
|
||||
type sparseInfo struct {
|
||||
ClientInfo *clientInfo `json:",omitempty"`
|
||||
ClientErrors []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// This constructs an "info" object that only has the client-side fields.
|
||||
err = tmpl.Execute(ioutil.Discard, sparseInfo{
|
||||
ClientInfo: info.ClientInfo,
|
||||
ClientErrors: info.ClientErrors,
|
||||
})
|
||||
// If executing the template failed, it means the template needs
|
||||
// server-side information as well. If it succeeded without server-side
|
||||
// information, we don't need to make API calls to collect that information.
|
||||
return err != nil
|
||||
}
|
||||
|
||||
func prettyPrintInfo(dockerCli command.Cli, info info) error {
|
||||
fmt.Fprintln(dockerCli.Out(), "Client:")
|
||||
if info.ClientInfo != nil {
|
||||
prettyPrintClientInfo(dockerCli, *info.ClientInfo)
|
||||
}
|
||||
for _, err := range info.ClientErrors {
|
||||
fmt.Fprintln(dockerCli.Out(), "ERROR:", err)
|
||||
fmt.Fprintln(dockerCli.Err(), "ERROR:", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
|
@ -104,7 +148,7 @@ func prettyPrintInfo(dockerCli command.Cli, info info) error {
|
|||
}
|
||||
}
|
||||
for _, err := range info.ServerErrors {
|
||||
fmt.Fprintln(dockerCli.Out(), "ERROR:", err)
|
||||
fmt.Fprintln(dockerCli.Err(), "ERROR:", err)
|
||||
}
|
||||
|
||||
if len(info.ServerErrors) > 0 || len(info.ClientErrors) > 0 {
|
||||
|
@ -211,9 +255,6 @@ func prettyPrintServerInfo(dockerCli command.Cli, info types.Info) []error {
|
|||
for _, o := range so.Options {
|
||||
switch o.Key {
|
||||
case "profile":
|
||||
if o.Value != "default" {
|
||||
fmt.Fprintln(dockerCli.Err(), " WARNING: You're not using the default seccomp profile")
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), " Profile:", o.Value)
|
||||
}
|
||||
}
|
||||
|
@ -378,6 +419,9 @@ func printSwarmInfo(dockerCli command.Cli, info types.Info) {
|
|||
}
|
||||
|
||||
func printServerWarnings(dockerCli command.Cli, info types.Info) {
|
||||
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.42") {
|
||||
printSecurityOptionsWarnings(dockerCli, info)
|
||||
}
|
||||
if len(info.Warnings) > 0 {
|
||||
fmt.Fprintln(dockerCli.Err(), strings.Join(info.Warnings, "\n"))
|
||||
return
|
||||
|
@ -387,6 +431,29 @@ func printServerWarnings(dockerCli command.Cli, info types.Info) {
|
|||
printServerWarningsLegacy(dockerCli, info)
|
||||
}
|
||||
|
||||
// printSecurityOptionsWarnings prints warnings based on the security options
|
||||
// returned by the daemon.
|
||||
// DEPRECATED: warnings are now generated by the daemon, and returned in
|
||||
// info.Warnings. This function is used to provide backward compatibility with
|
||||
// daemons that do not provide these warnings. No new warnings should be added
|
||||
// here.
|
||||
func printSecurityOptionsWarnings(dockerCli command.Cli, info types.Info) {
|
||||
if info.OSType == "windows" {
|
||||
return
|
||||
}
|
||||
kvs, _ := types.DecodeSecurityOptions(info.SecurityOptions)
|
||||
for _, so := range kvs {
|
||||
if so.Name != "seccomp" {
|
||||
continue
|
||||
}
|
||||
for _, o := range so.Options {
|
||||
if o.Key == "profile" && o.Value != "default" && o.Value != "builtin" {
|
||||
_, _ = fmt.Fprintln(dockerCli.Err(), "WARNING: You're not using the default seccomp profile")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// printServerWarningsLegacy generates warnings based on information returned by the daemon.
|
||||
// DEPRECATED: warnings are now generated by the daemon, and returned in
|
||||
// info.Warnings. This function is used to provide backward compatibility with
|
||||
|
@ -402,10 +469,7 @@ func printServerWarningsLegacy(dockerCli command.Cli, info types.Info) {
|
|||
if !info.SwapLimit {
|
||||
fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support")
|
||||
}
|
||||
if !info.KernelMemory {
|
||||
fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support")
|
||||
}
|
||||
if !info.OomKillDisable {
|
||||
if !info.OomKillDisable && info.CgroupVersion != "2" {
|
||||
fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support")
|
||||
}
|
||||
if !info.CPUCfsQuota {
|
||||
|
|
|
@ -248,7 +248,6 @@ func TestPrettyPrintInfo(t *testing.T) {
|
|||
sampleInfoDaemonWarnings.Warnings = []string{
|
||||
"WARNING: No memory limit support",
|
||||
"WARNING: No swap limit support",
|
||||
"WARNING: No kernel memory limit support",
|
||||
"WARNING: No oom kill disable support",
|
||||
"WARNING: No cpu cfs quota support",
|
||||
"WARNING: No cpu cfs period support",
|
||||
|
@ -343,17 +342,19 @@ func TestPrettyPrintInfo(t *testing.T) {
|
|||
},
|
||||
prettyGolden: "docker-info-errors",
|
||||
jsonGolden: "docker-info-errors",
|
||||
warningsGolden: "docker-info-errors-stderr",
|
||||
expectedError: "errors pretty printing info",
|
||||
},
|
||||
{
|
||||
doc: "bad security info",
|
||||
dockerInfo: info{
|
||||
Info: &sampleInfoBadSecurity,
|
||||
ServerErrors: []string{"an error happened"},
|
||||
ServerErrors: []string{"a server error occurred"},
|
||||
ClientInfo: &clientInfo{Debug: false},
|
||||
},
|
||||
prettyGolden: "docker-info-badsec",
|
||||
jsonGolden: "docker-info-badsec",
|
||||
warningsGolden: "docker-info-badsec-stderr",
|
||||
expectedError: "errors pretty printing info",
|
||||
},
|
||||
} {
|
||||
|
@ -421,3 +422,55 @@ func TestFormatInfo(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedsServerInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
doc string
|
||||
template string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
doc: "no template",
|
||||
template: "",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
doc: "JSON",
|
||||
template: "json",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
doc: "JSON (all fields)",
|
||||
template: "{{json .}}",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
doc: "JSON (Server ID)",
|
||||
template: "{{json .ID}}",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
doc: "ClientInfo",
|
||||
template: "{{json .ClientInfo}}",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
doc: "JSON ClientInfo",
|
||||
template: "{{json .ClientInfo}}",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
doc: "JSON (Active context)",
|
||||
template: "{{json .ClientInfo.Context}}",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
inf := info{ClientInfo: &clientInfo{}}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.doc, func(t *testing.T) {
|
||||
assert.Equal(t, needsServerInfo(tc.template, inf), tc.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
ERROR: a server error occurred
|
||||
ERROR: invalid empty security option
|
|
@ -51,5 +51,3 @@ Server:
|
|||
Default Address Pools:
|
||||
Base: 10.123.0.0/16, Size: 24
|
||||
|
||||
ERROR: an error happened
|
||||
ERROR: invalid empty security option
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSVersion":"","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["foo="],"DefaultAddressPools":[{"Base":"10.123.0.0/16","Size":24}],"Warnings":null,"ServerErrors":["an error happened"],"ClientInfo":{"Debug":false,"Context":"","Plugins":[],"Warnings":null}}
|
||||
{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSVersion":"","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["foo="],"DefaultAddressPools":[{"Base":"10.123.0.0/16","Size":24}],"Warnings":null,"ServerErrors":["a server error occurred"],"ClientInfo":{"Debug":false,"Context":"","Plugins":[],"Warnings":null}}
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSVersion":"","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["name=apparmor","name=seccomp,profile=default"],"DefaultAddressPools":[{"Base":"10.123.0.0/16","Size":24}],"Warnings":["WARNING: No memory limit support","WARNING: No swap limit support","WARNING: No kernel memory limit support","WARNING: No oom kill disable support","WARNING: No cpu cfs quota support","WARNING: No cpu cfs period support","WARNING: No cpu shares support","WARNING: No cpuset support","WARNING: IPv4 forwarding is disabled","WARNING: bridge-nf-call-iptables is disabled","WARNING: bridge-nf-call-ip6tables is disabled"],"ClientInfo":{"Debug":true,"Context":"default","Plugins":[],"Warnings":null}}
|
||||
{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSVersion":"","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["name=apparmor","name=seccomp,profile=default"],"DefaultAddressPools":[{"Base":"10.123.0.0/16","Size":24}],"Warnings":["WARNING: No memory limit support","WARNING: No swap limit support","WARNING: No oom kill disable support","WARNING: No cpu cfs quota support","WARNING: No cpu cfs period support","WARNING: No cpu shares support","WARNING: No cpuset support","WARNING: IPv4 forwarding is disabled","WARNING: bridge-nf-call-iptables is disabled","WARNING: bridge-nf-call-ip6tables is disabled"],"ClientInfo":{"Debug":true,"Context":"default","Plugins":[],"Warnings":null}}
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
ERROR: a client error occurred
|
||||
ERROR: a server error occurred
|
|
@ -1,5 +1,3 @@
|
|||
Client:
|
||||
ERROR: a client error occurred
|
||||
|
||||
Server:
|
||||
ERROR: a server error occurred
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
WARNING: No memory limit support
|
||||
WARNING: No swap limit support
|
||||
WARNING: No kernel memory limit support
|
||||
WARNING: No oom kill disable support
|
||||
WARNING: No cpu cfs quota support
|
||||
WARNING: No cpu cfs period support
|
||||
|
|
|
@ -114,6 +114,7 @@ func NewVersionCommand(dockerCli command.Cli) *cobra.Command {
|
|||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template")
|
||||
flags.StringVar(&opts.kubeConfig, "kubeconfig", "", "Kubernetes config file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "deprecated", nil)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -1,649 +0,0 @@
|
|||
// Code generated by "esc -o bindata.go -pkg schema -ignore .*\.go -private -modtime=1518458244 data"; DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type _escLocalFS struct{}
|
||||
|
||||
var _escLocal _escLocalFS
|
||||
|
||||
type _escStaticFS struct{}
|
||||
|
||||
var _escStatic _escStaticFS
|
||||
|
||||
type _escDirectory struct {
|
||||
fs http.FileSystem
|
||||
name string
|
||||
}
|
||||
|
||||
type _escFile struct {
|
||||
compressed string
|
||||
size int64
|
||||
modtime int64
|
||||
local string
|
||||
isDir bool
|
||||
|
||||
once sync.Once
|
||||
data []byte
|
||||
name string
|
||||
}
|
||||
|
||||
func (_escLocalFS) Open(name string) (http.File, error) {
|
||||
f, present := _escData[path.Clean(name)]
|
||||
if !present {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return os.Open(f.local)
|
||||
}
|
||||
|
||||
func (_escStaticFS) prepare(name string) (*_escFile, error) {
|
||||
f, present := _escData[path.Clean(name)]
|
||||
if !present {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
var err error
|
||||
f.once.Do(func() {
|
||||
f.name = path.Base(name)
|
||||
if f.size == 0 {
|
||||
return
|
||||
}
|
||||
var gr *gzip.Reader
|
||||
b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed))
|
||||
gr, err = gzip.NewReader(b64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
f.data, err = ioutil.ReadAll(gr)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (fs _escStaticFS) Open(name string) (http.File, error) {
|
||||
f, err := fs.prepare(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.File()
|
||||
}
|
||||
|
||||
func (dir _escDirectory) Open(name string) (http.File, error) {
|
||||
return dir.fs.Open(dir.name + name)
|
||||
}
|
||||
|
||||
func (f *_escFile) File() (http.File, error) {
|
||||
type httpFile struct {
|
||||
*bytes.Reader
|
||||
*_escFile
|
||||
}
|
||||
return &httpFile{
|
||||
Reader: bytes.NewReader(f.data),
|
||||
_escFile: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if !f.isDir {
|
||||
return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name)
|
||||
}
|
||||
|
||||
fis, ok := _escDirs[f.local]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local)
|
||||
}
|
||||
limit := count
|
||||
if count <= 0 || limit > len(fis) {
|
||||
limit = len(fis)
|
||||
}
|
||||
|
||||
if len(fis) == 0 && count > 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return fis[0:limit], nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Stat() (os.FileInfo, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *_escFile) Size() int64 {
|
||||
return f.size
|
||||
}
|
||||
|
||||
func (f *_escFile) Mode() os.FileMode {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *_escFile) ModTime() time.Time {
|
||||
return time.Unix(f.modtime, 0)
|
||||
}
|
||||
|
||||
func (f *_escFile) IsDir() bool {
|
||||
return f.isDir
|
||||
}
|
||||
|
||||
func (f *_escFile) Sys() interface{} {
|
||||
return f
|
||||
}
|
||||
|
||||
// _escFS returns a http.Filesystem for the embedded assets. If useLocal is true,
|
||||
// the filesystem's contents are instead used.
|
||||
func _escFS(useLocal bool) http.FileSystem {
|
||||
if useLocal {
|
||||
return _escLocal
|
||||
}
|
||||
return _escStatic
|
||||
}
|
||||
|
||||
// _escDir returns a http.Filesystem for the embedded assets on a given prefix dir.
|
||||
// If useLocal is true, the filesystem's contents are instead used.
|
||||
func _escDir(useLocal bool, name string) http.FileSystem {
|
||||
if useLocal {
|
||||
return _escDirectory{fs: _escLocal, name: name}
|
||||
}
|
||||
return _escDirectory{fs: _escStatic, name: name}
|
||||
}
|
||||
|
||||
// _escFSByte returns the named file from the embedded assets. If useLocal is
|
||||
// true, the filesystem's contents are instead used.
|
||||
func _escFSByte(useLocal bool, name string) ([]byte, error) {
|
||||
if useLocal {
|
||||
f, err := _escLocal.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := ioutil.ReadAll(f)
|
||||
_ = f.Close()
|
||||
return b, err
|
||||
}
|
||||
f, err := _escStatic.prepare(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.data, nil
|
||||
}
|
||||
|
||||
// _escFSMustByte is the same as _escFSByte, but panics if name is not present.
|
||||
func _escFSMustByte(useLocal bool, name string) []byte {
|
||||
b, err := _escFSByte(useLocal, name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// _escFSString is the string version of _escFSByte.
|
||||
func _escFSString(useLocal bool, name string) (string, error) {
|
||||
b, err := _escFSByte(useLocal, name)
|
||||
return string(b), err
|
||||
}
|
||||
|
||||
// _escFSMustString is the string version of _escFSMustByte.
|
||||
func _escFSMustString(useLocal bool, name string) string {
|
||||
return string(_escFSMustByte(useLocal, name))
|
||||
}
|
||||
|
||||
var _escData = map[string]*_escFile{
|
||||
|
||||
"/data/config_schema_v3.0.json": {
|
||||
name: "config_schema_v3.0.json",
|
||||
local: "data/config_schema_v3.0.json",
|
||||
size: 11063,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xaT4/buA6/+1MYam/NzBR4xQNeb++4p93zDlxDsZlEHVlSKTmdtMh3X8iOHduRJSVx
|
||||
t8ViByiQyiTFf/qRov09SVPyVhc7qCj5mJKdMerj09NnLcVDu/oocftUIt2Yh/cfntq1N2Rl+VhpWQop
|
||||
Nmybt0/y/X8e3z9a9pbEHBRYIrn+DIVp1xC+1AzBMj+TPaBmUpBsldhnCqUCNAw0+Zha5dK0J+kWBmK1
|
||||
QSa2pFk+NhLSlGjAPSsGEnpV3zyd5T/1ZKup1IGyzbqixgCKPy51ax5/eqYP3/7/8Of7h/895g/Zu7ej
|
||||
x9a/CJt2+xI2TDDDpOj3Jz3l8fTr2G9My7Ihpny094ZyDWObBZivEl9CNvdkP8nm0/4Om8fm7CWvq2AE
|
||||
O6qfZEy7/X3xSzqjvbQtxWDvRsFRtrtc5cq2eV/1zprxUgmKy4Ndm/FHS1CBMKR3QZqSdc14OfWoFPC7
|
||||
FfE8WEzT79ODPZDTPB/9bz7g/fMZW/rnhRQGXk1jlH/r1gWyeAHcMA6xHBS32uMyzrTJJeYlKww5Ttgv
|
||||
5IXzaZqK9i9LHAJJQVVOy3JkB0WkB7JKCTNQabeJKakF+1LDbycSgzVM5ZYo1fKCtyhrlSuKNsH87ieF
|
||||
rCoqlsq6a+yI8LwUhjIBmAtahRLJnjoQpc7b+udNo03e8uuJgL4YLhqPUvgSuxVjU9vqRiaMuQaKxe5G
|
||||
fllRJmJ8B8LgQUnW5ssvlwgg9nmPJVe7AcSeoRRVdxpiAKYHecv/qqSGqWMmBg4f9aYmLgh+7gxfpUTU
|
||||
1RrQtnQjyo3Eilplu72TGaxzZN7QgUMbbFmnPOdMvCyf4vBqkOY7qY2+wsU9+w4oN7tiB8WLh31INeKW
|
||||
2sQkOavoNkykihAJp2vgN9m5qPMHYuV2a0nnMu6ic4ms+SWyPWBsAZfq3HClF3+hBiSi+xyRfnpsm0/P
|
||||
qWp+cU6yo0PE5dp4ZWJhXEMxikpFC9s3IGgdyqhTs59XspxL0AtiHYvUVxfC2/rHqNAFLxABa+bUuybL
|
||||
YlL/HHbOqAZ9W0dxIY2p/YfInHDx/tfLO8M6KzO+Rw6IOqvSHDeXIlkSOn8/tIVXrJzHigYhhgdMSTT6
|
||||
55T7duu7q71CtmcctjC+tayl5EDFCHoQaJlLwQ8RlNpQDF4oNBQ1MnPIpTKL9xl6V+WafYNxNM94fxKU
|
||||
jXgOujC31WttSiZyqUAEvaONVPkWaQG5AmSydBm4Gsa6rJHa/S/FaLYVlIccbSq1ufFiYUw43DVnFZs/
|
||||
Bw6AjagBLf67Yd8D+WdNmTCwBXQhpafr8DcdEd3GjuI4oB492jjKjXEzJJG4Oh7+NvJWJ0UyJ/1VcD5V
|
||||
I5tF1KMTUWsdbAwbGqF9TU1POphiLooXtlGyh6Bk6KuZt8yRJ3cW30RxSBqcwPqnm6HJI9N0PZm5uQ63
|
||||
zUbchzEGwSCbxOWEtiM8Af1rDg4Mq0DWxhv7ZMBEBpPZQFAHlNOYPvdB7fqLYOBiDgmC4qygOgREd1xQ
|
||||
a1VSA3n7ouoq6PdgvqJIOQfOdBWDoaQETg83lc+2m6KM1wg5LczpXVgg50glBTMSb9+yoq95t21D4jww
|
||||
s21d7N1y2IrJGgvQS4XoXOtnMqbb8cJ0BG2RpL/6B/kX9YJtSHMlOSsOS7mikKLVIyZz7kxVmze2Z6qU
|
||||
0VFH4ysTpfx6xYbLeVtxWsAEGO91tDZImTBX1/17zbqj7PeJHCgPPV34letMSShUHRwcVVBJPCzd2nTv
|
||||
ngMmdmQLlL+oSeOJyl4sF7+WhKeJWbgpZopWS52O6NkrcRbrwMzCM7eIG6OFb01E12sBJm5U5XwhHH+f
|
||||
Oc7fXu4Dve61yUxUn/vmetX7KosO8ew7i+X0b/r86SzBdSG4smW8A1xO34IEsOVE9S+0/EMS8e/Lr8nY
|
||||
a5BnlzdSX0pEz/uT4QW0V2NK5vgkbwzLvjFH4p//TjY9OdFv+YIZ/vjOU3x87+V+EGovMEJyx3TSsSb9
|
||||
xHv6XdkMqA34L74ys3aKw8XE5Pt4DNh+IZaN/DMhad9yDyAlGzbxc2F0fns2HUJ234BlbrgaD1QS+++Y
|
||||
/BUAAP//72YpJjcrAAA=
|
||||
`,
|
||||
},
|
||||
|
||||
"/data/config_schema_v3.1.json": {
|
||||
name: "config_schema_v3.1.json",
|
||||
local: "data/config_schema_v3.1.json",
|
||||
size: 12209,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+waS4/bNvOuXyEwucW7mw9fUKC59dhTe+7CEbjSWGaWIpkh5awT+L8X1Mt6krStdIOi
|
||||
CwRwqJnhvGc45Pcojslbne6hoORjTPbGqI8PD5+1FHf16r3E/CFDujN37z881GtvyMbiscyipFLsWJ7U
|
||||
X5LD/+//d2/RaxBzVGCB5NNnSE29hvClZAgW+ZEcADWTgmw3kf2mUCpAw0CTj7FlLo47kHahR1YbZCIn
|
||||
1fKpohDHRAMeWNqj0LH65uFM/6ED24yp9pit1hU1BlD8OeWt+vzpkd59++3ur/d3v94nd9t3bwefrX4R
|
||||
dvX2GeyYYIZJ0e1POshT8+vUbUyzrAKmfLD3jnINQ5kFmK8Sn30yd2CvJHOz/4zMQ3EOkpeF14It1CsJ
|
||||
U2+/jv00pAjG77I11Kt5rN3+NoGjVmgnbA3R27ticBDec6qaC69lXXXKWtBSBorLo11b0EcNUIAwpFNB
|
||||
HJOnkvFsrFEp4A9L4rG3GMffx5msR6f6PvjfssG77wuydN9TKQy8mEoo99a1CmT6DLhjHEIxKObaoTLO
|
||||
tEkkJhlLDTmN0Cf0/P40dkX7t41mCJKUqoRm2UAOikiPZBMTZqDQ8yLGpBTsSwm/NyAGSxjTzVCq9Qnn
|
||||
KEuVKIrWwdzqJ6ksCirW8rpL5AjQvBSGMgGYCFr4HMlGHYhMJ3XBd7rRLqnx9YhAV/1XtUcmXI5dk7Gu
|
||||
bXkjI8REA8V0fyW+LCgTIboDYfCoJKv95adzBBCHpMslF6sBxIGhFEUbDSEJpkvyFv9FSQ1jxYwE7H/q
|
||||
RI3mUvBjK/gmJqIsngBtDzuA3EksqGW23TtayHUzntdXYF8GW9YpTzgTz+u7OLwYpMleaqMvUHGHvgfK
|
||||
zT7dQ/rsQO9DDbClNiFOzgqa+4FU6gPh9An4VXKuqvweWZnnFnTJ4yadS2DNz5AdAEMLuFTnhiue/Pka
|
||||
kIDucwD66b5uPh1RVf3inGxPMySma8OVkYRhDcXAKgVNbd+AoLXPo5rTTVLIbMlBJ8A6NFNfXAiv6x+D
|
||||
TOc9QHikWWLvEi8Lcf2z2TmjGvR1HcWEGlOHD4E+MYf7ixN3AXWRZniP7CF1ZqUKtzlGtpEv/n5oC69Y
|
||||
tpwrqgzRDzAl0ejXKff11jdXe4XswDjkMDy1PEnJgYpB6kGgWSIFPwZAakPRe6DQkJbIzDGRyqzeZ+h9
|
||||
kWj2DYbWPOf7htB2xNBoQnKlQZdSkj+MZxKhN1H5UxTRssQUghMJMRRzMOHw5TBs3MD5JcCTQteY8OTP
|
||||
E9FSXjnNhr4+6tRc161pkzGRSAXCGxvaSJXkSFNIFCCTs6rY9CM9K5Ha/adkNMsF5b4wM4XaXXmsNMYf
|
||||
7CVnBVsOmhmvDegA6uo/X/QdBf/MKRMGcusmU6dy9JzuljOg19xTHBrUwUcTmDszjxAFVtXhXUdFb9Mw
|
||||
sp2Fv6iYj9nYLtbT+aAqtfdYUMEI7WppO9De0H7VamHbZBsEGUNXx3TN2H10YnXNk/ug3vm7e7btmzsz
|
||||
TZ9GE9e54LbeiAd/jkEwyEZ2aRN1P5+A/jnHRoYVIEvjtH3UQyK9ubzHqD3IsU0fO6O23aXXcCFBgqA4
|
||||
S6n2JaIbxhOlyqiBpL6XvSj1O3K+okg5B850EZJDSQacHq8qn3UvTRkvERKamubq1+NzpJCCGYnXb1nQ
|
||||
l6TdtgLxdTbDpj50stBvxKvGT69lonOtX/CYdseJ6AjaZpJu8OPFX1UL9jiSKMlZelxLFakUNR8hnnOj
|
||||
q1q/sT1ToYwOCo2vTGTy6wUbrqdtxWkKo8R4q6K1QcqEubju3yrWDWW/c2RPeejg/BfuCyUhVaV3bFhA
|
||||
IfG4dmvTPrXwiNiCrVD+gubMDVQi1frHEv8seetvipmixVrRETx5J7PF2jPgcAw51ptNlE8CTNigcvY5
|
||||
QPh55rR8erkt6bWXZgtWfeya602nq22wiRdvrNbjv+rzx7OEuQPBhS3jDcmlefrkyS0N1H+p5V/iiP+c
|
||||
fzUvzbxPvCqoq4tzwLumn8Bmr22K4QSyZ5LpcMClyeCLt6g/C+jYGIPNPAYeVkjXxClyX8SMNm2U6JZ8
|
||||
xWRz/87RB7guyH9QAV1hmjdv09HhIepuesYPPBfiv4c/ee5p5RTHyfDq+3AiWz/V3A70MwKpn5v0svu2
|
||||
f55aMuPsI9DxPLh9jLlw/TGcbUX23yn6OwAA//8cyfJJsS8AAA==
|
||||
`,
|
||||
},
|
||||
|
||||
"/data/config_schema_v3.2.json": {
|
||||
name: "config_schema_v3.2.json",
|
||||
local: "data/config_schema_v3.2.json",
|
||||
size: 13755,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xbzW7cOBK+91MISm7xT7AbLLC57XFPM+cxFIFNVasZUyRTpNruBH73gaSWWpREkeqW
|
||||
48xgDARwqGKR9cuvWPSPTRTF7zXdQ0Hiz1G8N0Z9vr//qqW4bUbvJOb3GZKduf346b4ZexffVPNYVk2h
|
||||
UuxYnjZf0sO/7/51V01vSMxRQUUkt1+BmmYM4VvJEKrJD/EBUDMp4uRmU31TKBWgYaDjz1G1uSjqSNqB
|
||||
HlttkIk8rodfag5RFGvAA6M9Dt1W392f+d93ZDdDrr3N1uOKGAMofh/vrf785YHcfv/f7R8fb/97l94m
|
||||
H95bnyv9Iuya5TPYMcEMk6JbP+4oX06/vXQLkyyriQm31t4RrsGWWYB5kvjok7kjeyOZT+tPyGyLc5C8
|
||||
LLwWbKneSJhm+XXsp4EiGL/LNlRv5rHV8tcJvGmFnqVtKHpr1xu0wntKVVPh5dZVpyyHljJQXB6rMYc+
|
||||
GoIChIk7FURRvC0Zz4YalQJ+q1g89Aaj6Mcwk/X41N+t/7kN3n13yNJ9p1IYeDa1UPNLNyqQ9BFwxziE
|
||||
ziCY6xmVcaZNKjHNGDWT8ymhe0h3KAsvl13a7EPHLwM+I8Z+xxz6dPWTbCYYxpSolGSZpRCCSI7xTRQz
|
||||
A4We1lUUl4J9K+H/JxKDJQz5ZijV+oxzlKVKFcHKU+ftGFNZFESs5b5L5AjQvBSGMAGYClL4PLIKXxCZ
|
||||
ThvkEOpJFoMORqxqj0zMRUjDpoqRam/xYGKqgSDdXzhfFoSJEN2BMHhUkjX+8ss5AohD2iWlxWoAcWAo
|
||||
RdFGQ1im6s1/VlLDUDEDAfufOlE3U7n8oRX8JopFWWwBKzBsUe4kFqTabLv2xpHrJjyvr8C+DBU+IDzl
|
||||
TDyu7+LwbJCke6nNJYdBvAfCzZ7ugT7OTO9TWbOlNiFOzgqS+4kU9ZFwsgV+kZyrKr/HVuZ5ReryuBEE
|
||||
CgQPGbIDYCgSkOqM3KLRjw/JBMBYi/TLXYNiZ6Kq/o3zOHmZYDEes0cGEoYBCssqBaEVbkDQ2udRpzIp
|
||||
LWTmctARsQ7N1IsPwsuAaJDpvJWIRxrX9pZ4WYjrn83OGdGgL0MUI25MHT4F+sTU3P/MznVMdfIMx8ge
|
||||
Vuet1OE2tZFk44u/V4XwimXuXFFniH6AKYlGX3/cuzy4r642T50P/GbxkTZG5g6atFkeH/7IiGey1FRE
|
||||
EszBLkOYMJADOiaocsuZ3kO2ZA5KI6nkYYExWceGB4PNMLkamylkB8YhH0i8lZIDEdZBgUCyVAp+DKDU
|
||||
hqC3/NNAS2TmmEplVkeFel+kmn0HO/bOXn9ilAw2NLgYe7Xwc7ntK4WNliXS6wJnlr60k9w8cb6EeBTw
|
||||
JxO++LO6O1QmE7U+amouw9baZEykUoHwxoY2UqU5EgqpAmRyUhVWgs1KJNX6Yzaa5YJwX5iZQu0uvAQw
|
||||
xh/sJWcFcwfNhNcG4LUGq01DtBl4FpSyZyqE+QIhoDLYE1xwdNSBuXOcT5tADGS3uGp+N6eNJJP0i6DX
|
||||
cBuJE/1MB1WpvUVcTSN0GnC0T/Rq/hoZ2rJRTZ5clMdPKwXmztfO+sGIwG4KaKYNCHoMX2jLRrfES+uu
|
||||
sKqrpiJ5k2+DC53wWD218X6KKEJSqRymCRfjlQHs4KZjBra6MsyTxMfq/MoYzlnskkbp4GpwrgPYJ/V2
|
||||
TOe7kb5OIdNkO+iRTZ3L1UGCBz88QDDIBp2HFmP1oQDoX/N+3rACZGlmbb/pTYp7nVSPUXuUQ5s+dEZt
|
||||
y3iv4ULONxBZ3QkJOgwRFGeUaB/guOLSuFQZMZA2z24WQbwZbKcIEs6BM12EYKU4A06OF8HkpqFBGC8R
|
||||
UkKdWX0wo5CCGYmXL1mQ57RdtibxVTB28R5639svuOujXq9lojOmd3hMu+JIdARdpZ3uOt47f1UtGIIm
|
||||
VZKzBl2soQoqRbOPEM+50lUrv6lqo0IZHRQaT0xk8mnBgutpW3FCYZBFr1W0NkiYMIvbVNeKdQVG6BzZ
|
||||
c5Z0dP73VI7zg6rS28wpoJB4XBsHtS/pPCK2ZCuclUHdvxNVKtX61w/+Dl/iL36ZIsVa0RHcD40nD2tP
|
||||
mTxTKq93B1luBZg3uCVfMem1TxkcVn3okPhNp6sk2MTOdwTr7b8uCoZ3hlPVAzGG0H1QobEQXV6Rh0bV
|
||||
82QaOlH9k4X+Jj778/zr9ObY+9i3prr4HA944foL2OytTWE3JXomGV86zGly6avexN7GkGziz0Lsw3Su
|
||||
ZbmZv+QaLHpS4rzkKyabuw8zkGHuhdMrnbUrtIOnbTqoMzZd83f41N8R/735o4f/lZziOLoU+2E3AJpH
|
||||
+4mlnwFJ816wl92TfunlMuPknwMM2w/ts3xHR9S+M9tU/142fwYAAP//CLvrnLs1AAA=
|
||||
`,
|
||||
},
|
||||
|
||||
"/data/config_schema_v3.3.json": {
|
||||
name: "config_schema_v3.3.json",
|
||||
local: "data/config_schema_v3.3.json",
|
||||
size: 15491,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+wbzW7bPPLupzDU3uokBVossL3tcU+75w1UgabGNhuKZIeUE7fIuy8oybJEUSJtK036
|
||||
fS1QIJaGQ87/H/VzsVwm7zXdQUGSL8tkZ4z6cnf3TUtxUz+9lbi9y5FszM3Hz3f1s3fJyq5juV1Cpdiw
|
||||
bVa/yfafbj/d2uU1iDkosEBy/Q2oqZ8hfC8Zgl18n+wBNZMiSVcL+06hVICGgU6+LO3hlssW5Pigg1Yb
|
||||
ZGKbVI+fKwzLZaIB94x2MLRHfXd3wn/Xgq1crJ3DVs8VMQZQ/Hd4tur113ty8+NfN//7ePPP2+wm/fC+
|
||||
99ryF2FTb5/DhglmmBTt/kkL+dz89dxuTPK8Aia8t/eGcA19mgWYR4kPIZpbsFeiudnfQ3OfnL3kZRGU
|
||||
4BHqlYipt59HfhooggmrbA31ahprt5+H4NprhAg+Qr0SwfX21xG8OBI9CVtDdPauDtjzZz5W+fzJOK9a
|
||||
Zo1wKQfF5cE+G+FHDVCAMEnLguUyWZeM5y5HpYD/WBT3nYfL5U/XdXfwVO97v8YF3r4foaV9T6Uw8GQq
|
||||
oqa3rlkg6QPghnGIXUGw1uIRlnGmTSYxyxk13vWcrIFfhYESuoNsg7IIYtlkNSU6eXbwDBCHVdu1Cvsv
|
||||
XXgQJpSojOR5j6UEkRyS1TJhBgrt5/YyKQX7XsK/GxCDJbh4c5RqfsRblKXKFEGr69OakFBZFETMZQDn
|
||||
0BHB+YGb7VlVs0f3Vbtb71gj1CwjbMRjlAGjDpu19YqyRBprpXZPglsw8fAly+OBt+cAFzLvn1uUxRpw
|
||||
YJJ9yxr+The+N470DWECMBOkgKAeI+QgDCM80wromM54hDYlriTSmSYIW6YNHrywixFPFeelulTmoEDk
|
||||
OqsLilhv2UPQVhez+pxcTEWBGo2NA/ZsibMw00CQ7i5cLwvCRIyGgDB4UJLVPvHNOTsQ+6zVtrPZAGLP
|
||||
UIri6PHjonFn/ZOSGq73tM2K+yPhq9ZBpI7FbCQWxB72uPeolQw1r8vALg02iyY840w8zK/i8GSQZDup
|
||||
zSUJT7IDws2O7oA+TCzvQvVWS21ilJwVZBsGUjQEcnFil8zK/A5aud1a0DGNGxQKkSl2jmwPGJsvS3Wq
|
||||
b3xhOpQaBIu9HujX27rWm7Cq6i/Ok/TZgyIUk90gFhuOTlIpCLW5MYLWIY1quifZIIE4wQ6AdaynPjsQ
|
||||
XlauRYkuWK8H09Kx1DNey+LS0KPYOSMa9GUZxQAbU/vPkTrhW/uPybUjS0dxxteBAVTdfJdz70HScAb8
|
||||
kmWq6mfxfV9ReYiugSmJ5pcUVic/dQr49ebDWssVd9SilynQJrxUXHnGhIGtrYv8QaBcc6Z3kJ+zBqWR
|
||||
VPI4w/B2e+KNYaJYuyg3U8j2jMPWoXgtJQcieoECgeSZFPwQAakNwWCLQwMtkZlDJpWZPSvUuyLT7Af0
|
||||
be+k9Q2i1DmQ0y//09f4+/Q19EFTc1lurU3ORCYViKBtaCNVtkVCIVOATHpZ0XOweYnE7j9Eo9lWEB4y
|
||||
M1OozYVNAGPCxl5yVrBxo/E2doL5Wp2r+VO0ifQsymVPVAjTBUJEZbAjeEboqAxzMxKfFpE5UH/yXeFb
|
||||
NQdJvfBnpV7uMdLR7MdvVKUOFnEVjNBZRGj3jHB/Dw/dk1EFnl7kx5udIn3nS3v96IygPzrTTBsQ9BC/
|
||||
0ZoNJiHn1l1xVVcFRba1v40udOJttZnu/xJShKRSjYgmnowXTmCdTsdE2jrmYR4lPtj4lTOcktgl1wmc
|
||||
1uDUnLwLGrxXMD2zD83TmSZrZ/jhi8s2kOA+nB4gGGTO5OGYY3VTAdBvsz9vWAGyNJOyX3QWJZ37BgGh
|
||||
diBdmd63Qj2W8UHBxcQ3EHk1CYkKhgiKM0p0KOG4omlcqpwYyJorKzPN7hRBwjlwpouYXCnJgZPDRWly
|
||||
PdAgjJcIGaGjXt1ZUUjBjMTLtyzIU3bctgIJVTBXjh8R6lCv5xLRKacf0Zjjjp6Bq7Zup23HB9fPygVD
|
||||
0GRKclZnF3OwgkpRnyNGc65UVas3tjYqlNFRpvHIRC4fz9hwPm4rTig4XvRaRmuDhAlz9pjKZYtC2ACC
|
||||
oN4MaaJcmCgZ5uvFKJs3v0K38FrhX5FJteYeiLgtXPhu5kiUpaoMjrwKKOT0lZArbkuHSDyCzZBRRM1I
|
||||
G6hMqvmbNOE5aBpuETBFirl8SPTUOPGmNG/BO5RrAeY39A6r4YWPEanet/XKquVVGi3i0dsW852/Kp3c
|
||||
zqqvxiLGELqLKsfOzMGv8EODHoPXDTVQf7zQX0Rnf51+NR9sBD8cqKAujuMRFzzfgMxeWRSDIOYVRQP1
|
||||
RxQvahX9KVpHJMMu2RQnz/3UIu0fwwXzfN7Yz2umZuyL6a6ss2nDxGnKZ/T7tx8msrepK3kvlPbMcH/B
|
||||
L1OnMF60txXcL7jG7f+4fvA9l6VTHAZd3J/9iVX9LVba448DUl9w7QTatNsrGBOj9ysvd152/NpqZITf
|
||||
b/Iu7P/nxf8DAAD//7pHo+CDPAAA
|
||||
`,
|
||||
},
|
||||
|
||||
"/data/config_schema_v3.4.json": {
|
||||
name: "config_schema_v3.4.json",
|
||||
local: "data/config_schema_v3.4.json",
|
||||
size: 15874,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xbT2/bOhK/+1MYeu9WOymwxQLb2x73tHvewBVoamyzoUh2SDlxi3z3hURJlihKpG2l
|
||||
Sff1AQ+NpeGQM5w/vxlSPxbLZfKnpgfISfJ5mRyMUZ/v779qKdb26Z3E/X2GZGfWHz/d22d/JKtyHMvK
|
||||
IVSKHdun9k16/Nvdp7tyuCUxJwUlkdx+BWrsM4RvBUMoBz8kR0DNpEg2q0X5TqFUgIaBTj4vy8Utly1J
|
||||
86DDVhtkYp9Uj18qDstlogGPjHY4tEv94/7M/74lW7lcO4utnitiDKD4z3Bt1esvD2T9/Z/r/35c/+Mu
|
||||
XW8+/Nl7XeoXYWenz2DHBDNMinb+pKV8qf96aScmWVYRE96be0e4hr7MAsyTxMeQzC3ZG8lcz++RuS/O
|
||||
UfIiD+5gQ/VGwtjp59k/DRTBhE3WUr2ZxZbTzyOwjRohgRuqNxLYTn+bwItGaP8aky/P6/Lfl4rnJD/L
|
||||
pbO+SohezPOp0xdzxvXZKnREkxkoLk/Vyv06swQ5CJO0alouk23BeOZqXQr4d8niofNwufzhhvcOn+p9
|
||||
79e4UbTvR2Rp31MpDDybSqjpqa0KJH0E3DEOsSMIWksfURln2qQS04xR4x3PyRb4TRwooQdIdyjzIJdd
|
||||
aiXRXkZNBI+U3BDcg1+zDvFgdNi3XLcs/9ssPAwTSlRKsqy3DoJITslqmTADufYLtEwKwb4V8K+axGAB
|
||||
Lt8MpZqf8R5loVJFsHSkaWUnVOY5EXN51yVyRGh+EOd7LlvP0X3VztZb1og0ywgz9Hh8IGKEY0YZcmWB
|
||||
NDYETLuCl75gWTzx/hLiXGb9dYsi3wIOXLLvWcPfm4XvjbP7hjABmAqSQ9COETIQhhGeagV0zGY8mza1
|
||||
XUlkpE4Q9kwbPIWiVW9cXJTqSpmBApHp1FY0l4fiJIO2vJk15mRiKsVYNmWSKdeWOANTDQTp4crxMidM
|
||||
xFgICIMnJZmNie8u2IE4pq21XawGEEeGUuRNxI9L9Z3xz0pquD3S1iMeGsFXbYDYOB6zk5iTcrHN3KNe
|
||||
MrS8rgK7MpQQmfCUM/E4v4nDs0GSHqQ216Cp5ACEmwM9AH2cGN6l6o2W2sQYOcvJPkykaIjkatSYzKr8
|
||||
Dlu535ekYxY3qEIi8XuG7AgYC0mlOhdPvjQdggbBarNH+uXOFpsTXlX9xXmyefGwCOVkN4nFpqPzruSE
|
||||
ltgYQeuQRdXgPx0AiDPtgFjHRuqrapLLa8GorQs2DIKwdAx6xltZHAxttp0zokHfVtx1gsvxU6RN+Mb+
|
||||
fXLsyNBRnvF1YIBVF+9y7l3IJoyAX7NMVX0U348VVYToOpiSaH5KYXWOU+eEbycf1lrudkcNep0CbSJK
|
||||
xZVnTBjYl3WRPwkUW870AbJLxqA0kkoe5xjeVlK8M0wUa1dhM4XsyDjsHYm3UnIgopcoEEiWSsFPEZTa
|
||||
EAy2ODTQApk5pVKZ2VGhPuSpZt+h73tnq68ZbZwFOQ37332Nv05fQ580Nddha20yJlKpQAR9Qxup0j0S
|
||||
CqkCZNKril6AzQok5fxDNprtBeEhNzO52l3ZBDAm7OwFZzkbdxpvYyeI1yxW80O0CXgWFbInKoTpAiGi
|
||||
MjgQvCB1VI65G8lPi0gM1D96r/it6oVsvPQXQS93GZtR9ON3qkIHi7iKRug0IrV7zpB/jQjd26OKfHNV
|
||||
HK9nioydrx31oxFB/1xOM21A0FP8RFs2OAm5tO6Kq7oqKrK38Ta60In31fp6wU8RRUgq1cjWxIvxygDW
|
||||
6XRMwNaxCPMk8bHMXxnDqR275j6D0xqcOoTvkgYvNkxfCAgd1jNNts7hhy8vl4kEj354EMYXCAaZcx7R
|
||||
IK8uQAD9Prv2huUgC3MtuCJoLodn7rWnzt2Kpv8/ZUIdSteCHloTapoGQTOJyaYgsurcJSr1IijOKNEh
|
||||
eHNDi7pQGTGQ1jd0ZjopVAQJ58CZzmOQWZIBJ6er7MYenxDGC4SU0NEc4ozIpWBG4vVT5uQ5baatSAJe
|
||||
a70UMxibE0SRe7CR9Yv1jqE2toSWqv7VD+ozHqwiWBCj5zKHc7UyYp3NjJ6jZF0G1PagITh+Vi3YkCQ5
|
||||
s7hpDlVQKew6Yqz0RrcobbSs+nJldJQbPjGRyafLo+8M2lacUHAi9q2K1gYJE+biAzhXLQphBwiCerHf
|
||||
RCE0UQzN12VSZUXwBn3QWzf/BozYunsgu7d04WuvIxmdqiJ4mJdDLqcvu9xwET0kYkM2A3qJOv2tqVKp
|
||||
5m8/hU94N+HmB1MknyuGRJ+HJ1749B6iQ7EVcddG31l0WA2vsozs6kNbia1aXW2it3j0Hsl866+KQrdn
|
||||
7KseiTGEHqIKzQvx/g1xaNA98YahmmqGKBRzsef/I1L96nb982yw/l4m+E1GRXV1ro+43voO9uyNt2KQ
|
||||
6LxbUVP93opX9Yr+GWJnS4bdwClNRl90WnSbf+0yXDLP16V97DN1w2Ax3ZN2Jq2VOC35jHH/7sMEwpu6
|
||||
kPhK0GiG2xv+PXWK50V7V8P9OG7c/5vxg0/lSjnFadCt/tE/r7OfuW16+nFI7PXeTqLddPsJY9vo/YDO
|
||||
PS1sPmQbucDQbzovyv9fFv8LAAD//+uCPa4CPgAA
|
||||
`,
|
||||
},
|
||||
|
||||
"/data/config_schema_v3.5.json": {
|
||||
name: "config_schema_v3.5.json",
|
||||
local: "data/config_schema_v3.5.json",
|
||||
size: 16802,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xbSY/jNha++1cISm6pJcBkBpi+zXFOM+cpuAWaepaZokjmkXKX0/B/H0iUVFpIkbZV
|
||||
XdVIBwi6LD0ub/veQurrJknSnzU9QEnST0l6MEZ9enz8XUtxb58+SCwecyR7c//rb4/22U/pXT2O5fUQ
|
||||
KsWeFZl9kx3/9vD3h3q4JTEnBTWR3P0O1NhnCH9UDKEe/JQeATWTIt3ebep3CqUCNAx0+impN5ckPUn3
|
||||
YDCtNshEkTaPz80MSZJqwCOjgxn6rf70+Dr/Y092N511sNnmuSLGAIr/zvfWvP78RO7//Nf9/369/+dD
|
||||
dr/95efR61q+CHu7fA57JphhUvTrpz3luf3r3C9M8rwhJny09p5wDWOeBZgvEp9DPPdk78Rzu76D5zE7
|
||||
R8mrMqjBjuqdmLHLr6M/DRTBhE3WUr2bxdbLr8OwRY0Qwx3VOzFsl7+N4U3HtHuP6eeX+/rfczPn4nx2
|
||||
lsH+GiZGmOcSpwtz/PLsBeqRZA6Ky1Ozc7fMLEEJwqS9mJIk3VWM51OpSwH/qad4GjxMkq9TeB/M07wf
|
||||
/fIbRf/ew0v/nkph4MU0TC0vbUUg6TPgnnGIHUHQWrpHZJxpk0nMckaNczwnO+A3zUAJPUC2R1kGZ9ln
|
||||
lhPtnKhD8EjODcECoiWrD2Wm2Z8juT6lTBgoANO7fuz2PBk7myzsmFOfrv/bbhwTppSojOT5iAmCSE71
|
||||
jpiBUrv5S9JKsD8q+HdLYrCC6bw5SrX+xAXKSmWKYO2Fy7JPqSxLItZyzUv4iJD8LEiM/L1dY/iqX220
|
||||
LQ83SYRVOuAiADdhwKktXVZIY/HjUj9KkrRieTxxcQlxKfPxvkVV7gDT84x45qSj39uN681E+4YwAZgJ
|
||||
UkLQjhFyEIYRnmkF1GczDqUtqSuNhPkUoWDa4MlJu/EgVRxKDbnMQYHIdWbLoctxPM2hr41WxZxcLMUn
|
||||
O00doeq9pZOBmQaC9HDleFkSJmIsBITBk5LMYuKHAzsQx6y3tovFAOLIUIqyQ/y4PGEw/kVJDbcjbR+1
|
||||
W8bveoDYTjxmL7Ek9Wa7tb1eMre8oQCHPNT5NeEZZ+J5fROHF4MkO0htrknF0gMQbg70APR5YfiQajRa
|
||||
ahNj5KwkRZhI0SCJlpyYtu2yRHh1bpquqqXBtLIoalKfac5qncgqIUd2BIxNZaV6LdFc8TyUQwRr2hHp
|
||||
5wdb0i64X/MX5/Pc2RWqp0+m0S42br1qpSS0TqIRtA5ZVFtiZLNM45V2RqxjIf2qyufyijNKdcG2RDB/
|
||||
9eWo8VYWl692aueMaNC3lZADFDr+FmkTrrH/WBzrGeqdM75gDEw1TIw5d25kG06V37KeVeN0f4wVDUIM
|
||||
HUxJNN+kAnvFqdfMwC4+L8qm6o4a9DaV3AJKxdVxXXvDPUBVO870AfJLxqA0kkoe5xjOhlW8MyxUdVcl
|
||||
cQrZkXEoJhzvpORAxChQIJA8k4KfIii1IRjshWigFTJzyqQyq6eP7ubWq9X3va3xhibHAj8aIH+dBog+
|
||||
aWquy621yZnIpAIR9A1tpMoKJBQyBcikUxQjgM0rtKXBbBrNCkF4yM1MqfZXdguMCTt7xVnJ/E7j7AAF
|
||||
8zWbq7lTtIX0LAqyFyqE5QIhojI4ELwgdDSOuffEp01kDjQ+4G/mu2s3snXSX5R6Tbex9WY/bqeqdLCI
|
||||
a2iEziJCu+Ok+vtA6JGOGvLtVTjerhSJnW+N+tEZwfj0TzNtQNBT/EI7NjsyubTuiqu6GipS+Fsx7tok
|
||||
2lfbSwzfhBUhqVQe1cSz8cYJ7KTTsZC2+hDmi8TnOn7lDJc0ds2tiUkPcemof0gavD6xfO0gdCWAabKb
|
||||
nJK44nIdSPDoTg/C+QWCQTY5uOgyr2GCAPpjtvcNK0FW5trkiqC5PD2bXq4a3ODoDgqWTGhAObWgp96E
|
||||
uqZB0ExioimIvDmgiQq9CIozSnQovbmhRV2pnBjI2ntAKx0pKoKEc+BMlzGZWZoDJ6er7MaesxDGK4SM
|
||||
0Ih2fqspwYzE65csyUvWLduQBLzWeinm4FsTRFU6ciPrF/d7htrYElqq9tcY1Fc8gUWwSYxeyxyc1co6
|
||||
95pUFdtYTUsoZfj0+tbe5OzQXNcRwXdS8lEE4KAuQAAymo2swYMuc9o3avfebtk2zEjObC68hnlTKew+
|
||||
YpDnRqircaeu5EtldBS0fmEil18uj6grSFtxQmEShW8VtDZImDAXH6pOxaIQ9oAgKCy65by4XShw1+sc
|
||||
qrrKe4fe9q3KvyHvd8LNUuo2HzCrAcbac2jNry2/lupigCIY6Fd23cYKWcKyFaTPbfEdBOr0SHgV0ay9
|
||||
6njbV/5FDD47P94I6bQjWyEXj7lJEnXfoaXKpFq/4Rq+07ANt/uYIuVaCBt9AyR1FgwfATurnfD00z42
|
||||
dt7Nb3l5tPrU9x7uellto1XsdYz19t+0QaanJK5+CTGG0ENUa+XCCveGSDTrFzqhqqX6gVQXINX3btff
|
||||
zgbb79CC3zo1VOFPx26wvIjb4R9Ar++srlkwdKqrpfqhrvdW1+T0faC2eR99SZLRVwQ3w7Z5v40pmePr
|
||||
b18F492U7zRnsmgrxGXOV4wfD78sZIpLV3nfKMVa4d6TW6eTFsWmv+U0/XjVjxHd+NmnrDWf4jQ75/k6
|
||||
Pum2n6FuR/KZkNgb9IOAvY0qfF0fuE7P2bsPTT1Xf8bV4ab+/7z5fwAAAP//yoGbgKJBAAA=
|
||||
`,
|
||||
},
|
||||
|
||||
"/data/config_schema_v3.6.json": {
|
||||
name: "config_schema_v3.6.json",
|
||||
local: "data/config_schema_v3.6.json",
|
||||
size: 17084,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xbS4/jNhK++1cISm7pxwAbBNi57XFPu+dteASaKstMUyRTpDztDPzfF3q2RJEibaun
|
||||
O8gECKYtFR/15FfF0rdNkqQ/a3qAkqSfk/RgjPr8+Pi7luK+ffogsXjMkezN/adfH9tnP6V39TiW10Oo
|
||||
FHtWZO2b7PiPh98e6uEtiTkpqInk7negpn2G8EfFEOrBT+kRUDMp0u3dpn6nUCpAw0Cnn5N6c0kykPQP
|
||||
RtNqg0wUafP43MyQJKkGPDI6mmHY6k+Pr/M/DmR39qyjzTbPFTEGUPx3vrfm9Zcncv/nv+7/9+n+nw/Z
|
||||
/faXnyeva/ki7Nvlc9gzwQyTYlg/HSjP3V/nYWGS5w0x4ZO194RrmPIswHyV+BzieSB7J5679R08T9k5
|
||||
Sl6VQQ32VO/ETLv8OvrTQBFM2GRbqnez2Hr5dRhuo0aI4Z7qnRhul7+N4U3PtHuP6ZeX+/rfczPn4nzt
|
||||
LKP9NUxMYp5LnK6Y45fnIFCPJHNQXJ6anbtl1hKUIEw6iClJ0l3FeG5LXQr4Tz3F0+hhknyzw/tonub9
|
||||
5JffKIb3Hl6G91QKAy+mYWp56VYEkj4D7hmH2BEEW0v3iIwzbTKJWc6ocY7nZAf8phkooQfI9ijL4Cz7
|
||||
rOVEOyfqI3gk54ZgAdGS1Ycy0+zPiVyfUiYMFIDp3TB2e7bGziYLO6bt0/V/241jwpQSlZE8nzBBEMmp
|
||||
3hEzUGo3f0laCfZHBf/uSAxWYM+bo1TrT1ygrFSmCNZeuCz7lMqyJGIt17yEjwjJzw6Jib93a4xfDatN
|
||||
tuXhJomwSke4CISbcMCpLV1WSGPjx6V+lCRpxfJ44uIS4lLm032LqtwBpucZ8cxJJ7+3G9cbS/uGMAGY
|
||||
CVJC0I4RchCGEZ5pBdRnMw6lLakrjQzzKULBtMGTk3bjiVRxUWrMZQ4KRK6zNh26PI6nOQy50aoxJxdL
|
||||
51M7TX1C1XtLrYGZBoL0cOV4WRImYiwEhMGTkqyNiR8u2IE4ZoO1XSwGEEeGUpR9xI/DCaPxL0pquD3S
|
||||
Dqd2x/jdECC2lsfsJZak3my/ttdL5pY3FuCYhxpfE55xJp7XN3F4MUiyg9TmGiiWHoBwc6AHoM8Lw8dU
|
||||
k9FSmxgjZyUpwkSKBkm05MR0ZZclwquxabqqlkbTyqKoSX2mOct1IrOEHNkRMBbKSvWaornO8xCGCOa0
|
||||
E9IvD21Ku+B+zV+cz7Gz66i2n9inXey59aqVktAaRCNoHbKoLsXIZkjjlXZGrGND+lWZz+UZZ5TqgmWJ
|
||||
IH71YdR4K4vDq73aOSMa9G0p5CgKHX+NtAnX2N8Wx3qGeueMTxgDU42BMefOjWzDUPkt81k1hfvTWNFE
|
||||
iLGDKYnmu2Rgr3HqFRm0i8+TMlvdUYPeJpNbiFJxeVxf3nAPUNWOM32A/JIxKI2kksc5hrNgFe8MC1nd
|
||||
VSBOITsyDoXF8U5KDkRMDgoEkmdS8FMEpTYEg7UQDbRCZk6ZVGZ1+Ogubr1a/VDbmm7Iuhb4UQD5+xRA
|
||||
9ElTcx221iZnIpMKRNA3tJEqK5BQyBQgk05RTAJsXmGbGsym0awQhIfczJRqf2W1wJiws1eclczvNM4K
|
||||
UBCvtVjNDdEW4FlUyF7IEJYThIjM4EDwgqOjccy953zaRGKg6QV/M99dt5Gtk/4i6GVvY+tFP26nqnQw
|
||||
iWtohM4ijnbHTfVfI0JPdNSQb6+K491KkbHzraN+NCKY3v5ppg0IeopfaMdmVyaX5l1xWVdDRQp/Kcad
|
||||
m0T7atfE8F1YEZJK5VHNjWwMR8rbc9FjOH9yakfOhTy2ZIKVVZl+Tj75MtZ4ybwxtLdqQAuA3hd7v0p8
|
||||
rk/2nOGSLV/TT2JVV5eaIMakwcaS5YaMULME02Rn3R+5EEttKHh0A6cw8kIwyKwrnR6TjqET6I958WFY
|
||||
CbIy18JOguZy4Gq3nY16W/orlCUTGlHaFvQ0mFBfTgmaSQzOAJE3V1dRoARBcUaJDgG/G4r3lcqJgazr
|
||||
kFrpslURJJwDZ7qMwaxpDpycrrKb9gaKMF4hZIRGXHR0mhLMSLx+yZK8ZP2yDUnAa1svxRx8a4JoTg8b
|
||||
NbZ+cb9nqE1bXJCq+zUN6iveTSO08E6vZQ7OPG6dji9VxZac0xJKGb7Xv7VqO2sn0PWJ4LtD+igCcFAX
|
||||
IAAZzSbW4Ikuc9o3KoTfbtntMSM5a7OENcybStHuIyby3Bjq6rhDjIFSGR0VWr8ykcuvl5+oK0hbcULB
|
||||
OoVvFbQ2SJgwF18322JRCHtAEBQW3XKe9i+k/uvVVFWd/75D1f9W5d+A+53hZgm6zQfMcoCp9hxa82vL
|
||||
r6U6GaAIBoaVXX1qIUtYtoL0uStLBAN1eiS8iihjX3Xx70v/IgafnZ+1hHTak62AxWN6bKI6QTqqTKr1
|
||||
S9Hhbo9tuBDKFCnXirDRvTGpM2H4CLGz2glPpfFjx867ef+bR6tPQ+3hbpDVNlrFXsdYb/9NGcS+P3LV
|
||||
S4gxhB6iSisXZrg3nESzSqozVHVUPyLVBZHqr27X388Guy/0gl+BNVThj+pusLyIvvkPoNd3VtfsMHSq
|
||||
q6P6oa73VpfVlzBS27yOviTJ6ObJzbhsPmzDJnN8F+/LYLyb8t3mWIt2QlzmfMXz4+GXBaS41OT8RhBr
|
||||
hY4wt06tEsVm6P+yP+v1x4h+/Owj35pPcZrd83yb9gC0H+huJ/KxSNpvC0YH9jYq8XV9+mt3IPSf4Hqa
|
||||
oqbZ4ab+/7z5fwAAAP//nm8U9rxCAAA=
|
||||
`,
|
||||
},
|
||||
|
||||
"/data/config_schema_v3.7.json": {
|
||||
name: "config_schema_v3.7.json",
|
||||
local: "data/config_schema_v3.7.json",
|
||||
size: 17854,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xc3W/bOBJ/918haPdt46TALe5wfbvHe7p7vsAVaGpsc0OR3CHlxi38vx/0GYkiRdpW
|
||||
mhSbAkUTafgxn/zNcNTvqyRJf9X0AAVJPyfpwRj1+eHhDy3Funl6L3H/kCPZmfWn3x+aZ7+kd9U4lldD
|
||||
qBQ7ts+aN9nxb/f/uK+GNyTmpKAikts/gJrmGcKfJUOoBj+mR0DNpEg3d6vqnUKpAA0DnX5Oqs0lSU/S
|
||||
PRhMqw0ysU/rx+d6hiRJNeCR0cEM/VZ/eXiZ/6Enu7NnHWy2fq6IMYDiv9O91a+/PJL1t3+t//dp/c/7
|
||||
bL357dfR60q+CLtm+Rx2TDDDpOjXT3vKc/vTuV+Y5HlNTPho7R3hGsY8CzBfJT6FeO7J3ojndn0Hz2N2
|
||||
jpKXRVCDHdUbMdMsv4z+NFAEEzbZhurNLLZafhmGm6gRYrijeiOGm+VvY3jVMe3eY/rleV39e67nnJ2v
|
||||
mWWwv5qJUcxzidMVc/zy7AXqkWQOistTvXO3zBqCAoRJezElSbotGc9tqUsB/6mmeBw8TJLvdngfzFO/
|
||||
H/3mN4r+vYeX/j2VwsCzqZmaX7oRgaRPgDvGIXYEwcbSPSLjTJtMYpYzapzjOdkCv2kGSugBsh3KIjjL
|
||||
Lms40c6JuggeybkhuIdoyepDkWn2bSTXx5QJA3vA9K4fuzlbYyeThR3T9unqz2blmDClRGUkz0dMEERy
|
||||
qnbEDBTazV+SloL9WcK/WxKDJdjz5ijV8hPvUZYqUwQrL5yXfUplURCxlGtewkeE5CeHxMjf2zWGr/rV
|
||||
RtvycJNEWKUjXATCTTjgVJYuS6Sx8eNSP0qStGR5PPH+EuJC5uN9i7LYAqbnCfHESUe/b1auN5b2DWEC
|
||||
MBOkgKAdI+QgDCM80wqoz2YcSptTVxoZ5lOEPdMGT07alSdSxUWpIZc5KBC5zpp06PI4nubQ50aLxpxc
|
||||
zJ1PzTTVCVXtLbUGZhoI0sOV42VBmIixEBAGT0qyJia+u2AH4pj11naxGEAcGUpRdBE/DicMxj8rqeH2
|
||||
SNuf2i3jd32A2Fges5NYkGqz3dpeL5la3lCAQx4qfE14xpl4Wt7E4dkgyQ5Sm2ugWHoAws2BHoA+zQwf
|
||||
Uo1GS21ijJwVZB8mEmx8lmyl5EDEmEjR4DxacmLa2swc4dUANl1UlYNp5X5fkfrsd5IQRaYSObIjYCze
|
||||
leolj3Md+iGgEUx8R6Rf7pu8d8ZH6584nwJs13luP7GPxNjD7UUrBaEV0kbQOmRRbR6STeDIC+2EWMfG
|
||||
/avSo8vT0ijVBWsXQZDrA7LxVhYHaju1c0Y06NvyzEEUOv4eaROusX+fHesZ6p0zPqsMTDVEz5w7N7IJ
|
||||
4+nXTHrVOCcYx4o6QgwdTEk0PyRNe4lTL/ChWXyaudnqjhr0OuneTJSKS/a6Goh7gCq3nOkD5JeMQWkk
|
||||
lTzOMZxVrXhnmEn9rkJ6CtmRcdhbHLtgDALJMyn4KYJSG4LBgokGWiIzp0wqszjGdFfAXqy+L4CNN2Td
|
||||
HXxUSf46VRJ90tRch621yZnIpAIR9A1tpMr2SChkCpBJpyhGATYvsUkNJtNotheEh9zMFGp3ZUnBmLCz
|
||||
l5wVzO80zjJREK81WM0N0WbgWVTInskQ5hOEiMzgQPCCo6N2zJ3nfFpFYqBxF0A93127kY2T/iLoZW9j
|
||||
40U/bqcqdTCJq2mEziKOdsd19s8RoUc6qsk3V8XxdqXI2PnaUT8aEYyvCDXTBgQ9xS+0ZZN7lUvzrris
|
||||
q6Yie38pxp2bRPtq2+nwQ1gRkkrlUc2NbPRHyutz0WE4f3JqR86ZPLZgghVlkX5OPvky1njJvDK0t2pA
|
||||
M4DeF3u/SnyqTvac4Zwtn+d7P8Z9FRc2p1il2rmOiiFpsEtlvrsj1HnBNNlal1HOuq0wgEc3wAojNASD
|
||||
zLof6rDrEGKBfp+3KIYVIEtzLTwlaC4HuHYP26BRpruPmTOhAaVtQY+9CXVll6CZxOAREHl9DxYFXhAU
|
||||
Z5ToEEC8ociPkvMtoU9Z23C10N2tIkg4B850EYNu0xw4OV1lOc2FFmG8RMgIjbgSaXUlmJF4/ZIFec66
|
||||
ZWuSgN82foo5+NYEUZ8zNr5sPGO9Y6hNU4aQqv1tHP4XvOouVU4MfJjEh0kMK3R1bqCXMgdnEWCZnkJV
|
||||
xt5XpAUUMtw5cmvJf9KwoiuY4LuAfC8CcFDvQQAymo2swXPkTGlf6RbldstusIfkrEkxlzBvKkWzj5jI
|
||||
c2Ooq+JOBcQLZXRUaP3KRC6/Xg6zFpC24oSCBc1uFbQ2SJgwF/cq2GJRCDtAEBRm3XJaM5qpGy1XkFcI
|
||||
JH+DK6NblX/DlwrOcDOH56cDJonhWHsOrfm15ddSlSFSBAP9yq5OyJAlzFtB+tTWtIKBOj0SXkbcgVzV
|
||||
NeKrHUQMPjs/nArptCNbIEGL6eKKaiNqqTKplr/HCLcKbcJVdKZIsVSEjW6sSp0Jw3uIneVWeMrU7zt2
|
||||
3k07LD1afewLUne9rDbRKvY6xnL7r2tj9uWjq4hGjCH0EFVvu7Ds8QPKl5NyvTOktVQfEe2CiPaz2//7
|
||||
s9X2m9Lgd4s1Vfgz0BssNOJLj3eg/59ErZND2KnWlupDrT+LWq2mm4F6p5c/cxKP7gxeDe96+m3YZI7/
|
||||
GcKXYXk35buqtBZthT3P+YLn1v1vM0h2roP/lSDgAu2Obp1aJZRV39xof9jujyXd+Mln7hWf4jS5nPw+
|
||||
bnBpPlHfjORjkTRf1wyAwiYqMXd9/G6313QfoXs6/sbZ66r6e179PwAA//8ZL3SpvkUAAA==
|
||||
`,
|
||||
},
|
||||
|
||||
"/data/config_schema_v3.8.json": {
|
||||
name: "config_schema_v3.8.json",
|
||||
local: "data/config_schema_v3.8.json",
|
||||
size: 18246,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xcS4/juBG++1cI2r1tPwbIIkjmlmNOyTkNj0BTZZvbFMktUp72DvzfAz1bokiRtuXu
|
||||
3qQDBDstFR/15FfFkn+skiT9WdM9FCT9mqR7Y9TXx8fftBT3zdMHibvHHMnW3H/59bF59lN6V41jeTWE
|
||||
SrFlu6x5kx3+8vC3h2p4Q2KOCioiufkNqGmeIfxeMoRq8FN6ANRMinR9t6reKZQK0DDQ6dek2lyS9CTd
|
||||
g8G02iATu7R+fKpnSJJUAx4YHczQb/Wnx9f5H3uyO3vWwWbr54oYAyj+Pd1b/frbE7n/4x/3//ly//eH
|
||||
7H79y8+j15V8EbbN8jlsmWCGSdGvn/aUp/Zfp35hkuc1MeGjtbeEaxjzLMB8l/gc4rkneyee2/UdPI/Z
|
||||
OUheFkENdlTvxEyz/DL600ARTNhkG6p3s9hq+WUYbqJGiOGO6p0Ybpa/juFVx7R7j+m3l/vqv6d6ztn5
|
||||
mlkG+6uZGMU8lzhdMccvz16gHknmoLg81jt3y6whKECYtBdTkqSbkvHclroU8K9qiqfBwyT5YYf3wTz1
|
||||
+9FffqPo33t46d9TKQy8mJqp+aUbEUj6DLhlHGJHEGws3SMyzrTJJGY5o8Y5npMN8KtmoITuIduiLIKz
|
||||
bLOGE+2cqIvgkZwbgjuIlqzeF5lmf4zk+pQyYWAHmN71Y9cna+xksrBj2j5d/W+9ckyYUqIykucjJggi
|
||||
OVY7YgYK7eYvSUvBfi/hny2JwRLseXOUavmJdyhLlSmClRfOyz6lsiiIWMo1z+EjQvKTQ2Lk7+0aw1f9
|
||||
aqNtebhJIqzSES4C4SYccCpLlyXS2Phxrh8lSVqyPJ54dw5xIfPxvkVZbADT04R44qSjv9cr1xtL+4Yw
|
||||
AZgJUkDQjhFyEIYRnmkF1GczDqXNqas1wQjxpJEHQoqwY9rg0Um78sS0uHg2lEcOCkSusyZxOj/ipzn0
|
||||
WdSi0SkXcydZM011llV7S62BmQaCdH/heFkQJmJsCYTBo5KsiZ4fLiyCOGS9tZ0tBhAHhlIU3dkQhygG
|
||||
41+U1HB9TO7P95bxuz6UrG3PkliQarPd2l4vmVreUIBDHiokTnjGmXhe3sThxSDJ9lKbS0BbugfCzZ7u
|
||||
gT7PDB9SjUZLbWKMnBVkFyYSbHzqbKTkQMSYSNHgPFpyYtoqzhzhxVA3XVSVg2nlbleR+ux3kjpFJh05
|
||||
sgNgLDKW6jXjc8GDECQJpsgj0m8PTYY846P1vzifQnHXyW8/sY/E2MPtVSsFoRUmR9A6ZFFtxpJNgMsr
|
||||
7YRYx8b9ixKp8xPYKNUFqxxBOOyDvPFWFgd/O7VzRjTo6zLSQRQ6/BppE66xf50d6xnqnTM+/wxMNcTZ
|
||||
nDs3sg4j71umx2qcPYxjRR0hhg6mJJo3Sehe49QrfGgWn+Z4trqjBt0mMZyJUnFpYVctcQ9Q5YYzvYf8
|
||||
nDEojaSSxzmGs/4V7wwzSeJFSE8hOzAOO4tjF4xBIHkmBT9GUGpDMFha0UBLZOaYSWUWx5juWtmr1fel
|
||||
svGGrFuGz3rK/089RR81NZdha21yJjKpQAR9Qxupsh0SCpkCZNIpilGAzUtsUoPJNJrtBOEhNzOF2l5Y
|
||||
UjAm7OwlZwXzO42zoBTEaw1Wc0O0GXgWFbJnMoT5BCEiM9gTPOPoqB1z6zmfVpEYaNwvUM93125k7aQ/
|
||||
C3rZ21h70Y/bqUodTOJqGqGziKPdcfH954jQIx3V5OuL4ni7UmTsvHXUj0YE44KxZtqAoMf4hTZscgNz
|
||||
bt4Vl3XVVGTnL8W4c5NoX217It6EFSGpVB7VXMlGf6TcnosOw/mTUztyzuSxBROsKIv0a/LFl7HGS+bG
|
||||
0N6qAc0Ael/s/S7xuTrZc4Zztnya7xIZd2Cc2cZilWrnei+GpMF+lvk+kFCPBtNkY11GOeu2wgAe3AAr
|
||||
jNAQDDLrfqjDrkOIBfpj3qIYVoAszaXwlKA5H+Da3W6DlpruPmbOhAaUtgU99SbUlV2CZhKDR0Dk9T1Y
|
||||
FHhBUJxRokMA8YoiP0rON4Q+Z6/3skvc8iqChHPgTBcx6DbNgZPjRZbTXGgRxkuEjNCIK5FWV4IZiZcv
|
||||
WZCXrFu2Jgn4beOnmINvTRD1OWPjy8Yz7rcMtWnKEFK1f43D/4JX3aXKiYFPk/g0iWGFrs4N9FLm4CwC
|
||||
LNN9qMrY+4q0gEKGO0euLflPGlZ0BRN8F5AfRQAO6h0IQEazkTV4jpwp7Y1uUa637AZ7SM6aFHOhNqdm
|
||||
HzGR58pQV8WdCogXyuio0PqdiVx+Px9mLSBtxQkFC5pdK2htkDBhzu5VsMWiELaAICjMuuW0ZjRTN1qu
|
||||
IK8QSP4OV0Yua+uAaQXYM2EjWVdF8hKzueJrCGegmssEpgMmKeVY7w59+/Xs12+VW1IEA/3Krm7LkA3N
|
||||
20/63FbDgiE+PRBeRtyeXNRv4qs6RAw+OT/OCum0I1sgtYvp/4pqQGqpMqmWvwEJNxmtw/V3pkixVGyO
|
||||
bslKnanGR4i65UZ4Ctw3jrrLHbldb6ZHq099Keuul9U6WsVex1hu/3VVzb62dJXfiDGE7qMqdWcWTN6g
|
||||
8Dkp9DtDWkv1GdHOiGh/dvv/eLbafrca/Daypgp/anqFhUZ8I/IB9L+EWv/n3LLKVzkxkM2w8wa2PEEe
|
||||
TltuqT5teWlb/iBWYLU0DaxherU2p6DovuvV8Cat34ZN5viFDl8W6t2U7yLYWrTVzTznCwaRh19m0P7c
|
||||
9xE3gskLNJO6dWoVqFZ966j9AwP+0NONn/zcQMWnOE6ufn+M24eanwpYj+RjkTTfLg2i9jqqeOH6EQK7
|
||||
ean7MQBPP+U4w19V/z+t/hsAAP//Fd/bF0ZHAAA=
|
||||
`,
|
||||
},
|
||||
|
||||
"/data/config_schema_v3.9.json": {
|
||||
name: "config_schema_v3.9.json",
|
||||
local: "data/config_schema_v3.9.json",
|
||||
size: 18407,
|
||||
modtime: 1518458244,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xcSY/jNha++1cISm6ppYEJBkjf5jinmfMU3AJNPdtMUSTzSLnLadR/H2gtiSJFylYt
|
||||
makAQZetx+XxLfzeIv/YJEn6s6ZHKEj6NUmPxqiv9/e/aylum2/vJB7ucyR7c/vl1/vmu5/Sm2ocy6sh
|
||||
VIo9O2TNk+z0t7vf7qrhDYk5K6iI5O53oKb5DuGPkiFUgx/SE6BmUqTbm031TKFUgIaBTr8m1eaSpCfp
|
||||
vhhMqw0ycUjrr5/rGZIk1YAnRgcz9Fv96f5l/vue7MaedbDZ+ntFjAEU/57urX787YHc/vmP2/98uf3t
|
||||
Lrvd/vLz6HF1vgj7Zvkc9kwww6To1097yuf2r+d+YZLnNTHho7X3hGsY8yzAfJf4GOK5J3snntv1HTyP
|
||||
2TlJXhZBCXZU78RMs/w68tNAEUxYZRuqd9PYavl1GG68RojhjuqdGG6Wv47hTce0e4/pt6fb6t/nes7Z
|
||||
+ZpZBvurmRj5PNdxunyO/zz7A/WcZA6Ky3O9c/eZNQQFCJP2x5Qk6a5kPLdPXQr4VzXFw+DLJPlhu/fB
|
||||
PPXz0Se/UvTPPbz0z6kUBp5MzdT80s0RSPoIuGccYkcQbDTdc2ScaZNJzHJGjXM8JzvgV81ACT1CtkdZ
|
||||
BGfZZw0n2jlR58EjOTcEDxB9svpYZJr9OTrXh5QJAwfA9KYfu3UNhieDJDtKbeJPypplMm3Yvm3XUP23
|
||||
3TgmTClRGcnz0VkQRHKuGGMGCu0+piQtBfujhH+2JAZLsOfNUar1Jz6gLJXQWSHzkKq3xJkiWFl+iFgW
|
||||
BRFruYMlTEeIaXIxjXxMu8bwUb/aaFsebpIIS3C4qICLCzu5yrpkiTTWZy213SRJS5bHEx+WEE8UUJTF
|
||||
DnBiv2MznH7eblxPLOkbwgRgJkgRVnqEHIRhhGdaAfXpjENoc+JqVTDieNLISyhFODBt8Oyk3XgcYJzz
|
||||
G55HDgpErrMmWFt+y6Q59JHbqq4sF3N3QjNNdStUe0utgZkGgvR44XhZECZidAmEwbOSrPGeH84tgjhl
|
||||
vbYtPgYQJ4ZSFN3dEIdiBuOflNRwvU/uMUXL+E3vSra2ZUksSLXZbm2vlUw1b3iAQx4q9E94xpl4XF/F
|
||||
L4E/g+FHINwc6RHo48zwIdVotNQmRslZQQ5hIsHGt85OSg5EjIkUDc6jJSemzRzNEV4Mr9NVRTmYVh4O
|
||||
FalPfyfhWmSgkyM7AcaicaleokwXPAhBkmBYPiL9dtdE5TM2Wv/Febp9dkwRwgL2lRh7ub1IpSC0AvAI
|
||||
Woc0qo2S5pDzhFjH+v2LgrflQXOU6IKZlSAc9kHeeC2Lg7+d2DkjGvR1UfDAC51+jdQJ19i/z471DPXO
|
||||
GR+sBqYa4mzOnRvZhpH3a8bSahw9jH1F7SGGBqYkmjcJ6F781At8aBafxni2uKMGvU5gGBHfz4eFXYbG
|
||||
PUCVO870EfIlY1AaSSWPMwxnzi3eGGaCxIuQnkJ2YhwOFscuGINA8kwKfo6g1IZgMLWigZbIzDmTyqyO
|
||||
Md35uRetd6TnHJWNz3zK/08+RZ81NZdha21yJjKpQARtQxupsgMSCpkCZNJ5FCMHm5fYhAaTaTQ7CMJD
|
||||
ZmYKtb8wpWBM2NhLzgrmNxpnQimI1xqs5oZoM/AsymXPRAjzAUJEZHAkuODqqA1z77mfNpEYaNyjUM93
|
||||
025k66RfBL3sbWy96MdtVKUOBnE1TWTqflps/2t46JGMavLtRX68XSnSd762149GBOOEsWbagKDn+IV2
|
||||
bFKBWRp3xUVdNRU5+FMx7tgk2lbbPow3YUVIKpVHNFey0V8pr89Fh+H8wantOWfi2IIJVpRF+jX54otY
|
||||
40/mlaG9lQOaAfQ+3/td4mN1s+cM53T5eb4zZdz1sbB1xkrVzvV7DEmDPTTzvSehvhCmyc4qRjnztsIA
|
||||
ntwAK4zQEAwyqz7UYdchxAL9MasohhUgS3MpPCVolgNcu8Nu0MbT1WPmVGhAaWvQQ69CXdolqCYxeARE
|
||||
XtfBosALguKMEh0CiFck+VFyviP0MXupy65R5VUECefAmS5i0G2aAyfnizSnKWgRxkuEjNCIkkgrK8GM
|
||||
xMuXLMhT1i1bkwTstrFTzMG3Joj6nrHxZWMZt3uG2jRpCKnaT2P3v2Kpu1Q5MfCpEp8qMczQ1bGBXksd
|
||||
nEmAdToeVRlbr0gLKCTGxhWpYrmOir2vqQ9Mult0hSl81coPfFoHEICMZiPV8dxPU9pXKrlcbwYNUJGc
|
||||
NfHoSj1RzT5i3NSVfrFyUhVqL5TRUX74OxO5/L4ck61w2ooTChaOu/agtUHChFnc2GAfi0LYA4KgMGuW
|
||||
0wTTTJJpvey9QiD5O9SXXNrWodgK3WfChr0uF3qJ2lzxuobTUc2FDdMBk/hzLHeHvP1y9su3CkQpgoF+
|
||||
ZVdrZkiH5vUnfWxTZ0EXn54ILyNKLRc1p/hSFBGDn51vj4Vk2pGtEAfGNItFdSu1VJlU65dLwh1J23Cy
|
||||
nilSrOWbo/u3Umdc8hG8brkTnmz4K3vd9a7crpHTI9WHPu9105/VNlrEXsNYb/91Cs6ucbpydcQYQo9R
|
||||
ab2F2ZU3yJJOqgJOl9ZSfXq0BR7tr67/H09X2xdrgy9v1lThd2Gv0NCIF0o+gPzXEOv/nFlW8SonBrIZ
|
||||
dt5AlyfIw6nLLdWnLq+tyx9EC6z+p4E2TOtwcwKKbtLeDMtu/TZsMsdPiPiiUO+mfFVja9FWNvOcr+hE
|
||||
7n6ZQftzL1O8EkxeofPULVMrQbXp+0ztX0Dwu55u/OT3ECo+xXlSJ/4x7jVqfstg/BK6RdK86DTw2tuo
|
||||
5IXrVxLsTqfu1wo8zZfjCH9T/f+8+W8AAAD//6unbt7nRwAA
|
||||
`,
|
||||
},
|
||||
|
||||
"/data": {
|
||||
name: "data",
|
||||
local: `data`,
|
||||
isDir: true,
|
||||
},
|
||||
}
|
||||
|
||||
var _escDirs = map[string][]os.FileInfo{
|
||||
|
||||
"data": {
|
||||
_escData["/data/config_schema_v3.0.json"],
|
||||
_escData["/data/config_schema_v3.1.json"],
|
||||
_escData["/data/config_schema_v3.2.json"],
|
||||
_escData["/data/config_schema_v3.3.json"],
|
||||
_escData["/data/config_schema_v3.4.json"],
|
||||
_escData["/data/config_schema_v3.5.json"],
|
||||
_escData["/data/config_schema_v3.6.json"],
|
||||
_escData["/data/config_schema_v3.7.json"],
|
||||
_escData["/data/config_schema_v3.8.json"],
|
||||
_escData["/data/config_schema_v3.9.json"],
|
||||
},
|
||||
}
|
|
@ -1,8 +1,7 @@
|
|||
package schema
|
||||
|
||||
//go:generate esc -o bindata.go -pkg schema -ignore .*\.go -private -modtime=1518458244 data
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -58,9 +57,12 @@ func normalizeVersion(version string) string {
|
|||
}
|
||||
}
|
||||
|
||||
//go:embed data/config_schema_v*.json
|
||||
var schemas embed.FS
|
||||
|
||||
// Validate uses the jsonschema to validate the configuration
|
||||
func Validate(config map[string]interface{}, version string) error {
|
||||
schemaData, err := _escFSByte(false, fmt.Sprintf("/data/config_schema_v%s.json", version))
|
||||
schemaData, err := schemas.ReadFile("data/config_schema_v" + version + ".json")
|
||||
if err != nil {
|
||||
return errors.Errorf("unsupported Compose file version: %s", version)
|
||||
}
|
||||
|
|
|
@ -24,12 +24,12 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
initConfigDir sync.Once
|
||||
initConfigDir = new(sync.Once)
|
||||
configDir string
|
||||
homeDir string
|
||||
)
|
||||
|
||||
// resetHomeDir is used in testing to resets the "homeDir" package variable to
|
||||
// resetHomeDir is used in testing to reset the "homeDir" package variable to
|
||||
// force re-lookup of the home directory between tests.
|
||||
func resetHomeDir() {
|
||||
homeDir = ""
|
||||
|
@ -42,6 +42,13 @@ func getHomeDir() string {
|
|||
return homeDir
|
||||
}
|
||||
|
||||
// resetConfigDir is used in testing to reset the "configDir" package variable
|
||||
// and its sync.Once to force re-lookup between tests.
|
||||
func resetConfigDir() {
|
||||
configDir = ""
|
||||
initConfigDir = new(sync.Once)
|
||||
}
|
||||
|
||||
func setConfigDir() {
|
||||
if configDir != "" {
|
||||
return
|
||||
|
@ -97,10 +104,15 @@ func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
|||
return &configFile, err
|
||||
}
|
||||
|
||||
// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file
|
||||
var printLegacyFileWarning bool
|
||||
|
||||
// Load reads the configuration files in the given directory, and sets up
|
||||
// the auth config information and returns values.
|
||||
// FIXME: use the internal golang config parser
|
||||
func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
printLegacyFileWarning = false
|
||||
|
||||
if configDir == "" {
|
||||
configDir = Dir()
|
||||
}
|
||||
|
@ -125,6 +137,7 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
|||
// Can't find latest config file so check for the old one
|
||||
filename = filepath.Join(getHomeDir(), oldConfigfile)
|
||||
if file, err := os.Open(filename); err == nil {
|
||||
printLegacyFileWarning = true
|
||||
defer file.Close()
|
||||
if err := configFile.LegacyLoadFromReader(file); err != nil {
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
|
@ -140,6 +153,9 @@ func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
|
|||
if err != nil {
|
||||
fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err)
|
||||
}
|
||||
if printLegacyFileWarning {
|
||||
_, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release")
|
||||
}
|
||||
if !configFile.ContainsAuth() {
|
||||
configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore)
|
||||
}
|
||||
|
|
|
@ -12,9 +12,11 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/env"
|
||||
"gotest.tools/v3/fs"
|
||||
)
|
||||
|
||||
var homeKey = "HOME"
|
||||
|
@ -223,6 +225,31 @@ func TestOldJSON(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestOldJSONFallbackDeprecationWarning(t *testing.T) {
|
||||
js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}`
|
||||
tmpHome := fs.NewDir(t, t.Name(), fs.WithFile(oldConfigfile, js))
|
||||
defer tmpHome.Remove()
|
||||
defer env.PatchAll(t, map[string]string{homeKey: tmpHome.Path(), "DOCKER_CONFIG": ""})()
|
||||
|
||||
// reset the homeDir, configDir, and its sync.Once, to force them being resolved again
|
||||
resetHomeDir()
|
||||
resetConfigDir()
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
configFile := LoadDefaultConfigFile(buffer)
|
||||
expected := configfile.New(tmpHome.Join(configFileDir, ConfigFileName))
|
||||
expected.AuthConfigs = map[string]types.AuthConfig{
|
||||
"https://index.docker.io/v1/": {
|
||||
Username: "joejoe",
|
||||
Password: "hello",
|
||||
Email: "user@example.com",
|
||||
ServerAddress: "https://index.docker.io/v1/",
|
||||
},
|
||||
}
|
||||
assert.Assert(t, strings.Contains(buffer.String(), "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release"))
|
||||
assert.Check(t, is.DeepEqual(expected, configFile))
|
||||
}
|
||||
|
||||
func TestNewJSON(t *testing.T) {
|
||||
tmpHome, err := ioutil.TempDir("", "config-test")
|
||||
assert.NilError(t, err)
|
||||
|
|
|
@ -60,6 +60,7 @@ type ProxyConfig struct {
|
|||
HTTPSProxy string `json:"httpsProxy,omitempty"`
|
||||
NoProxy string `json:"noProxy,omitempty"`
|
||||
FTPProxy string `json:"ftpProxy,omitempty"`
|
||||
AllProxy string `json:"allProxy,omitempty"`
|
||||
}
|
||||
|
||||
// KubernetesConfig contains Kubernetes orchestrator settings
|
||||
|
@ -244,6 +245,7 @@ func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*
|
|||
"HTTPS_PROXY": &config.HTTPSProxy,
|
||||
"NO_PROXY": &config.NoProxy,
|
||||
"FTP_PROXY": &config.FTPProxy,
|
||||
"ALL_PROXY": &config.AllProxy,
|
||||
}
|
||||
m := runOpts
|
||||
if m == nil {
|
||||
|
|
|
@ -27,16 +27,21 @@ func TestEncodeAuth(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestProxyConfig(t *testing.T) {
|
||||
httpProxy := "http://proxy.mycorp.com:3128"
|
||||
httpsProxy := "https://user:password@proxy.mycorp.com:3129"
|
||||
ftpProxy := "http://ftpproxy.mycorp.com:21"
|
||||
noProxy := "*.intra.mycorp.com"
|
||||
defaultProxyConfig := ProxyConfig{
|
||||
var (
|
||||
httpProxy = "http://proxy.mycorp.example.com:3128"
|
||||
httpsProxy = "https://user:password@proxy.mycorp.example.com:3129"
|
||||
ftpProxy = "http://ftpproxy.mycorp.example.com:21"
|
||||
noProxy = "*.intra.mycorp.example.com"
|
||||
allProxy = "socks://example.com:1234"
|
||||
|
||||
defaultProxyConfig = ProxyConfig{
|
||||
HTTPProxy: httpProxy,
|
||||
HTTPSProxy: httpsProxy,
|
||||
FTPProxy: ftpProxy,
|
||||
NoProxy: noProxy,
|
||||
AllProxy: allProxy,
|
||||
}
|
||||
)
|
||||
|
||||
cfg := ConfigFile{
|
||||
Proxies: map[string]ProxyConfig{
|
||||
|
@ -54,23 +59,28 @@ func TestProxyConfig(t *testing.T) {
|
|||
"ftp_proxy": &ftpProxy,
|
||||
"NO_PROXY": &noProxy,
|
||||
"no_proxy": &noProxy,
|
||||
"ALL_PROXY": &allProxy,
|
||||
"all_proxy": &allProxy,
|
||||
}
|
||||
assert.Check(t, is.DeepEqual(expected, proxyConfig))
|
||||
}
|
||||
|
||||
func TestProxyConfigOverride(t *testing.T) {
|
||||
httpProxy := "http://proxy.mycorp.com:3128"
|
||||
overrideHTTPProxy := "http://proxy.example.com:3128"
|
||||
overrideNoProxy := ""
|
||||
httpsProxy := "https://user:password@proxy.mycorp.com:3129"
|
||||
ftpProxy := "http://ftpproxy.mycorp.com:21"
|
||||
noProxy := "*.intra.mycorp.com"
|
||||
defaultProxyConfig := ProxyConfig{
|
||||
var (
|
||||
httpProxy = "http://proxy.mycorp.example.com:3128"
|
||||
httpProxyOverride = "http://proxy.example.com:3128"
|
||||
httpsProxy = "https://user:password@proxy.mycorp.example.com:3129"
|
||||
ftpProxy = "http://ftpproxy.mycorp.example.com:21"
|
||||
noProxy = "*.intra.mycorp.example.com"
|
||||
noProxyOverride = ""
|
||||
|
||||
defaultProxyConfig = ProxyConfig{
|
||||
HTTPProxy: httpProxy,
|
||||
HTTPSProxy: httpsProxy,
|
||||
FTPProxy: ftpProxy,
|
||||
NoProxy: noProxy,
|
||||
}
|
||||
)
|
||||
|
||||
cfg := ConfigFile{
|
||||
Proxies: map[string]ProxyConfig{
|
||||
|
@ -84,46 +94,49 @@ func TestProxyConfigOverride(t *testing.T) {
|
|||
}
|
||||
|
||||
ropts := map[string]*string{
|
||||
"HTTP_PROXY": clone(overrideHTTPProxy),
|
||||
"NO_PROXY": clone(overrideNoProxy),
|
||||
"HTTP_PROXY": clone(httpProxyOverride),
|
||||
"NO_PROXY": clone(noProxyOverride),
|
||||
}
|
||||
proxyConfig := cfg.ParseProxyConfig("/var/run/docker.sock", ropts)
|
||||
expected := map[string]*string{
|
||||
"HTTP_PROXY": &overrideHTTPProxy,
|
||||
"HTTP_PROXY": &httpProxyOverride,
|
||||
"http_proxy": &httpProxy,
|
||||
"HTTPS_PROXY": &httpsProxy,
|
||||
"https_proxy": &httpsProxy,
|
||||
"FTP_PROXY": &ftpProxy,
|
||||
"ftp_proxy": &ftpProxy,
|
||||
"NO_PROXY": &overrideNoProxy,
|
||||
"NO_PROXY": &noProxyOverride,
|
||||
"no_proxy": &noProxy,
|
||||
}
|
||||
assert.Check(t, is.DeepEqual(expected, proxyConfig))
|
||||
}
|
||||
|
||||
func TestProxyConfigPerHost(t *testing.T) {
|
||||
httpProxy := "http://proxy.mycorp.com:3128"
|
||||
httpsProxy := "https://user:password@proxy.mycorp.com:3129"
|
||||
ftpProxy := "http://ftpproxy.mycorp.com:21"
|
||||
noProxy := "*.intra.mycorp.com"
|
||||
var (
|
||||
httpProxy = "http://proxy.mycorp.example.com:3128"
|
||||
httpsProxy = "https://user:password@proxy.mycorp.example.com:3129"
|
||||
ftpProxy = "http://ftpproxy.mycorp.example.com:21"
|
||||
noProxy = "*.intra.mycorp.example.com"
|
||||
|
||||
extHTTPProxy := "http://proxy.example.com:3128"
|
||||
extHTTPSProxy := "https://user:password@proxy.example.com:3129"
|
||||
extFTPProxy := "http://ftpproxy.example.com:21"
|
||||
extNoProxy := "*.intra.example.com"
|
||||
extHTTPProxy = "http://proxy.example.com:3128"
|
||||
extHTTPSProxy = "https://user:password@proxy.example.com:3129"
|
||||
extFTPProxy = "http://ftpproxy.example.com:21"
|
||||
extNoProxy = "*.intra.example.com"
|
||||
|
||||
defaultProxyConfig := ProxyConfig{
|
||||
defaultProxyConfig = ProxyConfig{
|
||||
HTTPProxy: httpProxy,
|
||||
HTTPSProxy: httpsProxy,
|
||||
FTPProxy: ftpProxy,
|
||||
NoProxy: noProxy,
|
||||
}
|
||||
externalProxyConfig := ProxyConfig{
|
||||
|
||||
externalProxyConfig = ProxyConfig{
|
||||
HTTPProxy: extHTTPProxy,
|
||||
HTTPSProxy: extHTTPSProxy,
|
||||
FTPProxy: extFTPProxy,
|
||||
NoProxy: extNoProxy,
|
||||
}
|
||||
)
|
||||
|
||||
cfg := ConfigFile{
|
||||
Proxies: map[string]ProxyConfig{
|
||||
|
@ -226,9 +239,11 @@ func TestGetAllCredentialsCredsStore(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetAllCredentialsCredHelper(t *testing.T) {
|
||||
testCredHelperSuffix := "test_cred_helper"
|
||||
testCredHelperRegistryHostname := "credhelper.com"
|
||||
testExtraCredHelperRegistryHostname := "somethingweird.com"
|
||||
const (
|
||||
testCredHelperSuffix = "test_cred_helper"
|
||||
testCredHelperRegistryHostname = "credhelper.com"
|
||||
testExtraCredHelperRegistryHostname = "somethingweird.com"
|
||||
)
|
||||
|
||||
unexpectedCredHelperAuth := types.AuthConfig{
|
||||
Username: "file_store_user",
|
||||
|
@ -265,9 +280,11 @@ func TestGetAllCredentialsCredHelper(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetAllCredentialsFileStoreAndCredHelper(t *testing.T) {
|
||||
testFileStoreRegistryHostname := "example.com"
|
||||
testCredHelperSuffix := "test_cred_helper"
|
||||
testCredHelperRegistryHostname := "credhelper.com"
|
||||
const (
|
||||
testFileStoreRegistryHostname = "example.com"
|
||||
testCredHelperSuffix = "test_cred_helper"
|
||||
testCredHelperRegistryHostname = "credhelper.com"
|
||||
)
|
||||
|
||||
expectedFileStoreAuth := types.AuthConfig{
|
||||
Username: "file_store_user",
|
||||
|
@ -301,10 +318,12 @@ func TestGetAllCredentialsFileStoreAndCredHelper(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetAllCredentialsCredStoreAndCredHelper(t *testing.T) {
|
||||
testCredStoreSuffix := "test_creds_store"
|
||||
testCredStoreRegistryHostname := "credstore.com"
|
||||
testCredHelperSuffix := "test_cred_helper"
|
||||
testCredHelperRegistryHostname := "credhelper.com"
|
||||
const (
|
||||
testCredStoreSuffix = "test_creds_store"
|
||||
testCredStoreRegistryHostname = "credstore.com"
|
||||
testCredHelperSuffix = "test_cred_helper"
|
||||
testCredHelperRegistryHostname = "credhelper.com"
|
||||
)
|
||||
|
||||
configFile := New("filename")
|
||||
configFile.CredentialsStore = testCredStoreSuffix
|
||||
|
@ -343,9 +362,11 @@ func TestGetAllCredentialsCredStoreAndCredHelper(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetAllCredentialsCredHelperOverridesDefaultStore(t *testing.T) {
|
||||
testCredStoreSuffix := "test_creds_store"
|
||||
testCredHelperSuffix := "test_cred_helper"
|
||||
testRegistryHostname := "example.com"
|
||||
const (
|
||||
testCredStoreSuffix = "test_creds_store"
|
||||
testCredHelperSuffix = "test_cred_helper"
|
||||
testRegistryHostname = "example.com"
|
||||
)
|
||||
|
||||
configFile := New("filename")
|
||||
configFile.CredentialsStore = testCredStoreSuffix
|
||||
|
@ -424,38 +445,36 @@ func TestCheckKubernetesConfigurationRaiseAnErrorOnInvalidValue(t *testing.T) {
|
|||
expectError bool
|
||||
}{
|
||||
{
|
||||
"no kubernetes config is valid",
|
||||
nil,
|
||||
false,
|
||||
name: "no kubernetes config is valid",
|
||||
},
|
||||
{
|
||||
"enabled is valid",
|
||||
&KubernetesConfig{AllNamespaces: "enabled"},
|
||||
false,
|
||||
name: "enabled is valid",
|
||||
config: &KubernetesConfig{AllNamespaces: "enabled"},
|
||||
},
|
||||
{
|
||||
"disabled is valid",
|
||||
&KubernetesConfig{AllNamespaces: "disabled"},
|
||||
false,
|
||||
name: "disabled is valid",
|
||||
config: &KubernetesConfig{AllNamespaces: "disabled"},
|
||||
},
|
||||
{
|
||||
"empty string is valid",
|
||||
&KubernetesConfig{AllNamespaces: ""},
|
||||
false,
|
||||
name: "empty string is valid",
|
||||
config: &KubernetesConfig{AllNamespaces: ""},
|
||||
},
|
||||
{
|
||||
"other value is invalid",
|
||||
&KubernetesConfig{AllNamespaces: "unknown"},
|
||||
true,
|
||||
name: "other value is invalid",
|
||||
config: &KubernetesConfig{AllNamespaces: "unknown"},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
for _, tc := range testCases {
|
||||
test := tc
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := checkKubernetesConfiguration(test.config)
|
||||
if test.expectError {
|
||||
assert.Assert(t, err != nil, test.name)
|
||||
} else {
|
||||
assert.NilError(t, err, test.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ func TestFileStoreGet(t *testing.T) {
|
|||
|
||||
func TestFileStoreGetAll(t *testing.T) {
|
||||
s1 := "https://example.com"
|
||||
s2 := "https://example2.com"
|
||||
s2 := "https://example2.example.com"
|
||||
f := newStore(map[string]types.AuthConfig{
|
||||
s1: {
|
||||
Auth: "super_secret_token",
|
||||
|
@ -80,7 +80,7 @@ func TestFileStoreGetAll(t *testing.T) {
|
|||
s2: {
|
||||
Auth: "super_secret_token2",
|
||||
Email: "foo@example2.com",
|
||||
ServerAddress: "https://example2.com",
|
||||
ServerAddress: "https://example2.example.com",
|
||||
},
|
||||
})
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper
|
|||
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return commandconn.New(ctx, "ssh", append(sshFlags, sp.Args("docker", "system", "dial-stdio")...)...)
|
||||
},
|
||||
Host: "http://docker",
|
||||
Host: "http://docker.example.com",
|
||||
}, nil
|
||||
}
|
||||
// Future version may support plugins via ~/.docker/config.json. e.g. "dind"
|
||||
|
@ -63,6 +63,6 @@ func GetCommandConnectionHelper(cmd string, flags ...string) (*ConnectionHelper,
|
|||
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return commandconn.New(ctx, cmd, flags...)
|
||||
},
|
||||
Host: "http://docker",
|
||||
Host: "http://docker.example.com",
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -27,6 +27,11 @@ type EndpointMeta = context.EndpointMetaBase
|
|||
type Endpoint struct {
|
||||
EndpointMeta
|
||||
TLSData *context.TLSData
|
||||
|
||||
// Deprecated: Use of encrypted TLS private keys has been deprecated, and
|
||||
// will be removed in a future release. Golang has deprecated support for
|
||||
// legacy PEM encryption (as specified in RFC 1423), as it is insecure by
|
||||
// design (see https://go-review.googlesource.com/c/go/+/264159).
|
||||
TLSPassword string
|
||||
}
|
||||
|
||||
|
@ -66,8 +71,9 @@ func (c *Endpoint) tlsConfig() (*tls.Config, error) {
|
|||
}
|
||||
|
||||
var err error
|
||||
if x509.IsEncryptedPEMBlock(pemBlock) {
|
||||
keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(c.TLSPassword))
|
||||
// TODO should we follow Golang, and deprecate RFC 1423 encryption, and produce a warning (or just error)? see https://github.com/docker/cli/issues/3212
|
||||
if x509.IsEncryptedPEMBlock(pemBlock) { //nolint: staticcheck // SA1019: x509.IsEncryptedPEMBlock is deprecated, and insecure by design
|
||||
keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(c.TLSPassword)) //nolint: staticcheck // SA1019: x509.IsEncryptedPEMBlock is deprecated, and insecure by design
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
|
||||
}
|
||||
|
|
|
@ -90,13 +90,10 @@ func getHTTPTransport(authConfig authtypes.AuthConfig, endpoint registry.APIEndp
|
|||
|
||||
modifiers := registry.Headers(userAgent, http.Header{})
|
||||
authTransport := transport.NewTransport(base, modifiers...)
|
||||
challengeManager, confirmedV2, err := registry.PingV2Registry(endpoint.URL, authTransport)
|
||||
challengeManager, err := registry.PingV2Registry(endpoint.URL, authTransport)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error pinging v2 registry")
|
||||
}
|
||||
if !confirmedV2 {
|
||||
return nil, fmt.Errorf("unsupported registry version")
|
||||
}
|
||||
if authConfig.RegistryToken != "" {
|
||||
passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
|
||||
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
/*Package winresources is used to embed Windows resources into docker.exe.
|
||||
These resources are used to provide
|
||||
|
||||
* Version information
|
||||
* An icon
|
||||
* A Windows manifest declaring Windows version support
|
||||
|
||||
The resource object files are generated with go generate.
|
||||
The resource source files are located in scripts/winresources.
|
||||
This occurs automatically when you run scripts/build/windows.
|
||||
|
||||
These object files are picked up automatically by go build when this package
|
||||
is included.
|
||||
|
||||
*/
|
||||
package winresources
|
||||
|
||||
//go:generate ../../scripts/gen/windows-resources
|
|
@ -0,0 +1,11 @@
|
|||
// Package winresources is used to embed Windows resources into docker.exe.
|
||||
//
|
||||
// These resources are used to provide:
|
||||
// * Version information
|
||||
// * An icon
|
||||
// * A Windows manifest declaring Windows version support
|
||||
//
|
||||
// The resource object files are generated when building with goversioninfo
|
||||
// in scripts/build/binary and are located in cmd/docker/winresources.
|
||||
// This occurs automatically when you build against Windows OS.
|
||||
package winresources
|
|
@ -17,7 +17,9 @@ import (
|
|||
func TestClientDebugEnabled(t *testing.T) {
|
||||
defer debug.Disable()
|
||||
|
||||
tcmd := newDockerCommand(&command.DockerCli{})
|
||||
cli, err := command.NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
tcmd := newDockerCommand(cli)
|
||||
tcmd.SetFlag("debug", "true")
|
||||
cmd, _, err := tcmd.HandleGlobalFlags()
|
||||
assert.NilError(t, err)
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
package main
|
||||
|
||||
import _ "github.com/docker/cli/cli/winresources"
|
|
@ -0,0 +1,8 @@
|
|||
//go:build windows && 386
|
||||
// +build windows,386
|
||||
|
||||
//go:generate goversioninfo -o=../../cli/winresources/resource.syso -icon=winresources/docker.ico -manifest=winresources/docker.exe.manifest ../../cli/winresources/versioninfo.json
|
||||
|
||||
package main
|
||||
|
||||
import _ "github.com/docker/cli/cli/winresources"
|
|
@ -0,0 +1,8 @@
|
|||
//go:build windows && amd64
|
||||
// +build windows,amd64
|
||||
|
||||
//go:generate goversioninfo -64=true -o=../../cli/winresources/resource.syso -icon=winresources/docker.ico -manifest=winresources/docker.exe.manifest ../../cli/winresources/versioninfo.json
|
||||
|
||||
package main
|
||||
|
||||
import _ "github.com/docker/cli/cli/winresources"
|
|
@ -0,0 +1,8 @@
|
|||
//go:build windows && arm
|
||||
// +build windows,arm
|
||||
|
||||
//go:generate goversioninfo -arm=true -o=../../cli/winresources/resource.syso -icon=winresources/docker.ico -manifest=winresources/docker.exe.manifest ../../cli/winresources/versioninfo.json
|
||||
|
||||
package main
|
||||
|
||||
import _ "github.com/docker/cli/cli/winresources"
|
|
@ -0,0 +1,8 @@
|
|||
//go:build windows && arm64
|
||||
// +build windows,arm64
|
||||
|
||||
//go:generate goversioninfo -arm=true -64=true -o=../../cli/winresources/resource.syso -icon=winresources/docker.ico -manifest=winresources/docker.exe.manifest ../../cli/winresources/versioninfo.json
|
||||
|
||||
package main
|
||||
|
||||
import _ "github.com/docker/cli/cli/winresources"
|
Before Width: | Height: | Size: 100 KiB After Width: | Height: | Size: 100 KiB |
|
@ -2560,6 +2560,7 @@ _docker_daemon() {
|
|||
--raw-logs
|
||||
--selinux-enabled
|
||||
--userland-proxy=false
|
||||
--validate
|
||||
--version -v
|
||||
"
|
||||
local options_with_args="
|
||||
|
@ -5485,6 +5486,23 @@ _docker_wait() {
|
|||
_docker_container_wait
|
||||
}
|
||||
|
||||
COMPOSE_PLUGIN_PATH=$(docker info --format '{{json .ClientInfo.Plugins}}' | sed -n 's/.*"Path":"\([^"]\+docker-compose\)".*/\1/p')
|
||||
|
||||
_docker_compose() {
|
||||
local completionCommand="__completeNoDesc"
|
||||
local resultArray=($COMPOSE_PLUGIN_PATH $completionCommand compose)
|
||||
for value in "${words[@]:2}"; do
|
||||
if [ -z "$value" ]; then
|
||||
resultArray+=( "''" )
|
||||
else
|
||||
resultArray+=( "$value" )
|
||||
fi
|
||||
done
|
||||
local result=$(eval "${resultArray[*]}" 2> /dev/null | grep -v '^:[0-9]*$')
|
||||
|
||||
COMPREPLY=( $(compgen -W "${result}" -- "$current") )
|
||||
}
|
||||
|
||||
_docker() {
|
||||
local previous_extglob_setting=$(shopt -p extglob)
|
||||
shopt -s extglob
|
||||
|
@ -5554,11 +5572,17 @@ _docker() {
|
|||
wait
|
||||
)
|
||||
|
||||
local known_plugin_commands=()
|
||||
|
||||
if [ -f "$COMPOSE_PLUGIN_PATH" ] ; then
|
||||
known_plugin_commands+=("compose")
|
||||
fi
|
||||
|
||||
local experimental_server_commands=(
|
||||
checkpoint
|
||||
)
|
||||
|
||||
local commands=(${management_commands[*]} ${top_level_commands[*]})
|
||||
local commands=(${management_commands[*]} ${top_level_commands[*]} ${known_plugin_commands[*]})
|
||||
[ -z "$DOCKER_HIDE_LEGACY_COMMANDS" ] && commands+=(${legacy_commands[*]})
|
||||
|
||||
# These options are valid as global options for all client commands
|
||||
|
|
|
@ -2791,7 +2791,8 @@ __docker_subcommand() {
|
|||
"($help)--tlsverify[Use TLS and verify the remote]" \
|
||||
"($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \
|
||||
"($help)--userland-proxy[Use userland proxy for loopback traffic]" \
|
||||
"($help)--userland-proxy-path=[Path to the userland proxy binary]:binary:_files" && ret=0
|
||||
"($help)--userland-proxy-path=[Path to the userland proxy binary]:binary:_files" \
|
||||
"($help)--validate[Validate daemon configuration and exit]" && ret=0
|
||||
|
||||
case $state in
|
||||
(cluster-store)
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
variable "VERSION" {
|
||||
default = ""
|
||||
}
|
||||
variable "USE_GLIBC" {
|
||||
default = ""
|
||||
}
|
||||
variable "STRIP_TARGET" {
|
||||
default = ""
|
||||
}
|
||||
variable "IMAGE_NAME" {
|
||||
default = "docker-cli"
|
||||
}
|
||||
|
||||
# Sets the name of the company that produced the windows binary.
|
||||
variable "COMPANY_NAME" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
group "default" {
|
||||
targets = ["binary"]
|
||||
}
|
||||
|
||||
target "binary" {
|
||||
target = "binary"
|
||||
platforms = ["local"]
|
||||
output = ["build"]
|
||||
args = {
|
||||
BASE_VARIANT = USE_GLIBC != "" ? "buster" : "alpine"
|
||||
VERSION = VERSION
|
||||
COMPANY_NAME = COMPANY_NAME
|
||||
GO_STRIP = STRIP_TARGET
|
||||
}
|
||||
}
|
||||
|
||||
target "dynbinary" {
|
||||
inherits = ["binary"]
|
||||
args = {
|
||||
GO_LINKMODE = "dynamic"
|
||||
}
|
||||
}
|
||||
|
||||
target "plugins" {
|
||||
target = "plugins"
|
||||
platforms = ["local"]
|
||||
output = ["build"]
|
||||
args = {
|
||||
BASE_VARIANT = USE_GLIBC != "" ? "buster" : "alpine"
|
||||
VERSION = VERSION
|
||||
GO_STRIP = STRIP_TARGET
|
||||
}
|
||||
}
|
||||
|
||||
target "platforms" {
|
||||
platforms = concat(["linux/amd64", "linux/386", "linux/arm64", "linux/arm", "linux/ppc64le", "linux/s390x", "darwin/amd64", "darwin/arm64", "windows/amd64", "windows/arm", "windows/386"], USE_GLIBC!=""?[]:["windows/arm64"])
|
||||
}
|
||||
|
||||
target "cross" {
|
||||
inherits = ["binary", "platforms"]
|
||||
}
|
||||
|
||||
target "dynbinary-cross" {
|
||||
inherits = ["dynbinary", "platforms"]
|
||||
}
|
||||
|
||||
target "plugins-cross" {
|
||||
inherits = ["plugins", "platforms"]
|
||||
}
|
||||
|
||||
target "lint" {
|
||||
dockerfile = "./dockerfiles/Dockerfile.lint"
|
||||
target = "lint"
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
target "shellcheck" {
|
||||
dockerfile = "./dockerfiles/Dockerfile.shellcheck"
|
||||
target = "shellcheck"
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
target "test" {
|
||||
target = "test"
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
target "test-coverage" {
|
||||
target = "test-coverage"
|
||||
output = ["build/coverage"]
|
||||
}
|
||||
|
||||
target "e2e-image" {
|
||||
target = "e2e"
|
||||
output = ["type=docker"]
|
||||
tags = ["${IMAGE_NAME}"]
|
||||
args = {
|
||||
BASE_VARIANT = USE_GLIBC != "" ? "buster" : "alpine"
|
||||
VERSION = VERSION
|
||||
}
|
||||
}
|
129
docker.Makefile
129
docker.Makefile
|
@ -9,12 +9,11 @@ DOCKER_CLI_MOUNTS ?= -v "$(CURDIR)":/go/src/github.com/docker/cli
|
|||
DOCKER_CLI_CONTAINER_NAME ?=
|
||||
DOCKER_CLI_GO_BUILD_CACHE ?= y
|
||||
|
||||
# Sets the name of the company that produced the windows binary.
|
||||
COMPANY_NAME ?=
|
||||
|
||||
DEV_DOCKER_IMAGE_NAME = docker-cli-dev$(IMAGE_TAG)
|
||||
BINARY_NATIVE_IMAGE_NAME = docker-cli-native$(IMAGE_TAG)
|
||||
LINTER_IMAGE_NAME = docker-cli-lint$(IMAGE_TAG)
|
||||
CROSS_IMAGE_NAME = docker-cli-cross$(IMAGE_TAG)
|
||||
VALIDATE_IMAGE_NAME = docker-cli-shell-validate$(IMAGE_TAG)
|
||||
E2E_IMAGE_NAME = docker-cli-e2e$(IMAGE_TAG)
|
||||
E2E_IMAGE_NAME = docker-cli-e2e
|
||||
E2E_ENGINE_VERSION ?=
|
||||
CACHE_VOLUME_NAME := docker-cli-dev-cache
|
||||
ifeq ($(DOCKER_CLI_GO_BUILD_CACHE),y)
|
||||
|
@ -32,73 +31,33 @@ build_docker_image:
|
|||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.dev | docker build ${DOCKER_BUILD_ARGS} --build-arg=GO_VERSION -t $(DEV_DOCKER_IMAGE_NAME) -
|
||||
|
||||
# build docker image having the linting tools (dockerfiles/Dockerfile.lint)
|
||||
.PHONY: build_linter_image
|
||||
build_linter_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.lint | docker build ${DOCKER_BUILD_ARGS} --build-arg=GO_VERSION -t $(LINTER_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_cross_image
|
||||
build_cross_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.cross | docker build ${DOCKER_BUILD_ARGS} --build-arg=GO_VERSION -t $(CROSS_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_shell_validate_image
|
||||
build_shell_validate_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.shellcheck | docker build --build-arg=GO_VERSION -t $(VALIDATE_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_binary_native_image
|
||||
build_binary_native_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.binary-native | docker build --build-arg=GO_VERSION -t $(BINARY_NATIVE_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_e2e_image
|
||||
build_e2e_image:
|
||||
docker build -t $(E2E_IMAGE_NAME) --build-arg=GO_VERSION --build-arg VERSION=$(VERSION) --build-arg GITCOMMIT=$(GITCOMMIT) -f ./dockerfiles/Dockerfile.e2e .
|
||||
|
||||
DOCKER_RUN_NAME_OPTION := $(if $(DOCKER_CLI_CONTAINER_NAME),--name $(DOCKER_CLI_CONTAINER_NAME),)
|
||||
DOCKER_RUN := docker run --rm $(ENVVARS) $(DOCKER_CLI_MOUNTS) $(DOCKER_RUN_NAME_OPTION)
|
||||
|
||||
binary: build_binary_native_image ## build the CLI
|
||||
$(DOCKER_RUN) $(BINARY_NATIVE_IMAGE_NAME)
|
||||
.PHONY: binary
|
||||
binary:
|
||||
COMPANY_NAME=$(COMPANY_NAME) docker buildx bake binary
|
||||
|
||||
build: binary ## alias for binary
|
||||
|
||||
plugins: build_binary_native_image ## build the CLI plugin examples
|
||||
$(DOCKER_RUN) $(BINARY_NATIVE_IMAGE_NAME) ./scripts/build/plugins
|
||||
plugins: ## build the CLI plugin examples
|
||||
docker buildx bake plugins
|
||||
|
||||
plugins-cross: ## build the CLI plugin examples for all platforms
|
||||
docker buildx bake plugins-cross
|
||||
|
||||
.PHONY: clean
|
||||
clean: build_docker_image ## clean build artifacts
|
||||
$(DOCKER_RUN) $(DEV_DOCKER_IMAGE_NAME) make clean
|
||||
docker volume rm -f $(CACHE_VOLUME_NAME)
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: build_docker_image ## run unit tests (using go test)
|
||||
$(DOCKER_RUN) $(DEV_DOCKER_IMAGE_NAME) make test-unit
|
||||
|
||||
.PHONY: test ## run unit and e2e tests
|
||||
test: test-unit test-e2e
|
||||
|
||||
.PHONY: cross
|
||||
cross: build_cross_image ## build the CLI for macOS and Windows
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make cross
|
||||
cross:
|
||||
COMPANY_NAME=$(COMPANY_NAME) docker buildx bake cross
|
||||
|
||||
.PHONY: binary-windows
|
||||
binary-windows: build_cross_image ## build the CLI for Windows
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make $@
|
||||
|
||||
.PHONY: plugins-windows
|
||||
plugins-windows: build_cross_image ## build the example CLI plugins for Windows
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make $@
|
||||
|
||||
.PHONY: binary-osx
|
||||
binary-osx: build_cross_image ## build the CLI for macOS
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make $@
|
||||
|
||||
.PHONY: plugins-osx
|
||||
plugins-osx: build_cross_image ## build the example CLI plugins for macOS
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make $@
|
||||
.PHONY: dynbinary
|
||||
dynbinary: ## build dynamically linked binary
|
||||
USE_GLIBC=1 COMPANY_NAME=$(COMPANY_NAME) docker buildx bake dynbinary
|
||||
|
||||
.PHONY: dev
|
||||
dev: build_docker_image ## start a build container in interactive mode for in-container development
|
||||
|
@ -109,8 +68,12 @@ dev: build_docker_image ## start a build container in interactive mode for in-co
|
|||
shell: dev ## alias for dev
|
||||
|
||||
.PHONY: lint
|
||||
lint: build_linter_image ## run linters
|
||||
$(DOCKER_RUN) -it $(LINTER_IMAGE_NAME)
|
||||
lint: ## run linters
|
||||
docker buildx bake lint
|
||||
|
||||
.PHONY: shellcheck
|
||||
shellcheck: ## run shellcheck validation
|
||||
docker buildx bake shellcheck
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## run gofmt
|
||||
|
@ -120,17 +83,10 @@ fmt: ## run gofmt
|
|||
vendor: build_docker_image vendor.conf ## download dependencies (vendor/) listed in vendor.conf
|
||||
$(DOCKER_RUN) -it $(DEV_DOCKER_IMAGE_NAME) make vendor
|
||||
|
||||
dynbinary: build_cross_image ## build the CLI dynamically linked
|
||||
$(DOCKER_RUN) -it $(CROSS_IMAGE_NAME) make dynbinary
|
||||
|
||||
.PHONY: authors
|
||||
authors: ## generate AUTHORS file from git history
|
||||
$(DOCKER_RUN) -it $(DEV_DOCKER_IMAGE_NAME) make authors
|
||||
|
||||
.PHONY: compose-jsonschema
|
||||
compose-jsonschema: build_docker_image ## generate compose-file schemas
|
||||
$(DOCKER_RUN) -it $(DEV_DOCKER_IMAGE_NAME) make compose-jsonschema
|
||||
|
||||
.PHONY: manpages
|
||||
manpages: build_docker_image ## generate man pages from go source and markdown
|
||||
$(DOCKER_RUN) -it $(DEV_DOCKER_IMAGE_NAME) make manpages
|
||||
|
@ -139,24 +95,45 @@ manpages: build_docker_image ## generate man pages from go source and markdown
|
|||
yamldocs: build_docker_image ## generate documentation YAML files consumed by docs repo
|
||||
$(DOCKER_RUN) -it $(DEV_DOCKER_IMAGE_NAME) make yamldocs
|
||||
|
||||
.PHONY: shellcheck
|
||||
shellcheck: build_shell_validate_image ## run shellcheck validation
|
||||
$(DOCKER_RUN) -it $(VALIDATE_IMAGE_NAME) make shellcheck
|
||||
.PHONY: test ## run unit and e2e tests
|
||||
test: test-unit test-e2e
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## run unit tests
|
||||
docker buildx bake test
|
||||
|
||||
.PHONY: test-coverage
|
||||
test-coverage: ## run test with coverage
|
||||
docker buildx bake test-coverage
|
||||
|
||||
.PHONY: build-e2e-image
|
||||
build-e2e-image:
|
||||
mkdir -p $(CURDIR)/build/coverage
|
||||
IMAGE_NAME=$(E2E_IMAGE_NAME) VERSION=$(VERSION) docker buildx bake e2e-image
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e: test-e2e-non-experimental test-e2e-experimental test-e2e-connhelper-ssh ## run all e2e tests
|
||||
|
||||
.PHONY: test-e2e-experimental
|
||||
test-e2e-experimental: build_e2e_image # run experimental e2e tests
|
||||
docker run --rm --mount type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock $(ENVVARS) -e DOCKERD_EXPERIMENTAL=1 $(E2E_IMAGE_NAME)
|
||||
test-e2e-experimental: build-e2e-image # run experimental e2e tests
|
||||
docker run --rm $(ENVVARS) -e DOCKERD_EXPERIMENTAL=1 -e TEST_ENGINE_VERSION=$(E2E_ENGINE_VERSION) \
|
||||
--mount type=bind,src=$(CURDIR)/build/coverage,dst=/tmp/coverage \
|
||||
--mount type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \
|
||||
$(E2E_IMAGE_NAME)
|
||||
|
||||
.PHONY: test-e2e-non-experimental
|
||||
test-e2e-non-experimental: build_e2e_image # run non-experimental e2e tests
|
||||
docker run --rm --mount type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock $(ENVVARS) -e TEST_ENGINE_VERSION=$(E2E_ENGINE_VERSION) $(E2E_IMAGE_NAME)
|
||||
test-e2e-non-experimental: build-e2e-image # run non-experimental e2e tests
|
||||
docker run --rm $(ENVVARS) -e TEST_ENGINE_VERSION=$(E2E_ENGINE_VERSION) \
|
||||
--mount type=bind,src=$(CURDIR)/build/coverage,dst=/tmp/coverage \
|
||||
--mount type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \
|
||||
$(E2E_IMAGE_NAME)
|
||||
|
||||
.PHONY: test-e2e-connhelper-ssh
|
||||
test-e2e-connhelper-ssh: build_e2e_image # run experimental SSH-connection helper e2e tests
|
||||
docker run --rm --mount type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock $(ENVVARS) -e DOCKERD_EXPERIMENTAL=1 -e TEST_CONNHELPER=ssh $(E2E_IMAGE_NAME)
|
||||
test-e2e-connhelper-ssh: build-e2e-image # run experimental SSH-connection helper e2e tests
|
||||
docker run --rm $(ENVVARS) -e DOCKERD_EXPERIMENTAL=1 -e TEST_ENGINE_VERSION=$(E2E_ENGINE_VERSION) -e TEST_CONNHELPER=ssh \
|
||||
--mount type=bind,src=$(CURDIR)/build/coverage,dst=/tmp/coverage \
|
||||
--mount type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \
|
||||
$(E2E_IMAGE_NAME)
|
||||
|
||||
.PHONY: help
|
||||
help: ## print this help
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
ARG GO_VERSION=1.13.15
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
RUN apk add -U git bash coreutils gcc musl-dev
|
||||
|
||||
ENV CGO_ENABLED=0 \
|
||||
DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
CMD ./scripts/build/binary
|
|
@ -1,6 +0,0 @@
|
|||
ARG GO_VERSION=1.13.15
|
||||
|
||||
FROM dockercore/golang-cross:${GO_VERSION}
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
COPY . .
|
|
@ -1,29 +1,30 @@
|
|||
# syntax=docker/dockerfile:1.1.7-experimental
|
||||
ARG GO_VERSION=1.13.15
|
||||
# syntax=docker/dockerfile:1.3
|
||||
|
||||
ARG GO_VERSION=1.16.11
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS golang
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
FROM golang AS esc
|
||||
ARG ESC_VERSION=v0.2.0
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
GO111MODULE=on go get github.com/mjibson/esc@${ESC_VERSION}
|
||||
|
||||
FROM golang AS gotestsum
|
||||
ARG GOTESTSUM_VERSION=v0.4.0
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
GO111MODULE=on go get gotest.tools/gotestsum@${GOTESTSUM_VERSION}
|
||||
GO111MODULE=on go install gotest.tools/gotestsum@${GOTESTSUM_VERSION}
|
||||
|
||||
FROM golang AS vndr
|
||||
ARG VNDR_VERSION=v0.1.2
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
GO111MODULE=on go get github.com/LK4D4/vndr@${VNDR_VERSION}
|
||||
GO111MODULE=on go install github.com/LK4D4/vndr@${VNDR_VERSION}
|
||||
|
||||
FROM golang AS goversioninfo
|
||||
ARG GOVERSIONINFO_VERSION=v1.3.0
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
GO111MODULE=on go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@${GOVERSIONINFO_VERSION}
|
||||
|
||||
FROM golang AS dev
|
||||
RUN apk add --no-cache \
|
||||
|
@ -38,9 +39,10 @@ CMD bash
|
|||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
ENV PATH=$PATH:/go/src/github.com/docker/cli/build
|
||||
|
||||
COPY --from=esc /go/bin/* /go/bin/
|
||||
COPY --from=vndr /go/bin/* /go/bin/
|
||||
COPY --from=gotestsum /go/bin/* /go/bin/
|
||||
COPY --from=goversioninfo /go/bin/* /go/bin/
|
||||
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
ENV GO111MODULE=auto
|
||||
COPY . .
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
ARG GO_VERSION=1.13.15
|
||||
|
||||
# Use Debian based image as docker-compose requires glibc.
|
||||
FROM golang:${GO_VERSION}-buster
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
curl \
|
||||
openssl \
|
||||
openssh-client \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG COMPOSE_VERSION=1.25.1
|
||||
RUN curl -fsSL https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose \
|
||||
&& chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
ARG NOTARY_VERSION=v0.6.1
|
||||
RUN curl -fsSL https://github.com/theupdateframework/notary/releases/download/${NOTARY_VERSION}/notary-Linux-amd64 -o /usr/local/bin/notary \
|
||||
&& chmod +x /usr/local/bin/notary
|
||||
|
||||
ARG GOTESTSUM_VERSION=0.4.0
|
||||
RUN curl -fsSL https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_linux_amd64.tar.gz -o gotestsum.tar.gz \
|
||||
&& tar -xf gotestsum.tar.gz gotestsum \
|
||||
&& mv gotestsum /usr/local/bin/gotestsum \
|
||||
&& rm gotestsum.tar.gz
|
||||
|
||||
ENV CGO_ENABLED=0 \
|
||||
DISABLE_WARN_OUTSIDE_CONTAINER=1 \
|
||||
PATH=/go/src/github.com/docker/cli/build:$PATH
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
|
||||
# Trust notary CA cert.
|
||||
COPY e2e/testdata/notary/root-ca.cert /usr/share/ca-certificates/notary.cert
|
||||
RUN echo 'notary.cert' >> /etc/ca-certificates.conf && update-ca-certificates
|
||||
|
||||
COPY . .
|
||||
ARG VERSION
|
||||
ARG GITCOMMIT
|
||||
ENV VERSION=${VERSION}
|
||||
ENV GITCOMMIT=${GITCOMMIT}
|
||||
ENV DOCKER_BUILDKIT=1
|
||||
ENV COMPOSE_DOCKER_CLI_BUILD=1
|
||||
RUN ./scripts/build/binary
|
||||
RUN ./scripts/build/plugins e2e/cli-plugins/plugins/*
|
||||
|
||||
CMD ./scripts/test/e2e/entry
|
|
@ -1,23 +1,16 @@
|
|||
# syntax=docker/dockerfile:1.1.3-experimental
|
||||
# syntax=docker/dockerfile:1.3
|
||||
|
||||
ARG GO_VERSION=1.13.15
|
||||
ARG GOLANGCI_LINTER_SHA="v1.21.0"
|
||||
ARG GO_VERSION=1.16.11
|
||||
ARG GOLANGCI_LINT_VERSION=v1.23.8
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS build
|
||||
ENV CGO_ENABLED=0
|
||||
RUN apk add --no-cache git
|
||||
ARG GOLANGCI_LINTER_SHA
|
||||
ARG GO111MODULE=on
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
go get github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINTER_SHA}
|
||||
FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS lint
|
||||
ENV GO111MODULE=off
|
||||
ENV CGO_ENABLED=0
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
COPY --from=build /go/bin/golangci-lint /usr/local/bin
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
ENV GOGC=75
|
||||
ENTRYPOINT ["/usr/local/bin/golangci-lint"]
|
||||
CMD ["run", "--config=.golangci.yml"]
|
||||
COPY . .
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
COPY --from=golangci-lint /usr/bin/golangci-lint /usr/bin/golangci-lint
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
golangci-lint run
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
FROM koalaman/shellcheck-alpine:v0.7.1
|
||||
RUN apk add --no-cache bash make
|
||||
# syntax=docker/dockerfile:1.3
|
||||
|
||||
FROM koalaman/shellcheck-alpine:v0.7.1 AS shellcheck
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
COPY . .
|
||||
RUN --mount=type=bind,target=. \
|
||||
set -eo pipefail; \
|
||||
find scripts/ contrib/completion/bash -type f | grep -v scripts/winresources | grep -v '.*.ps1' | xargs shellcheck
|
||||
|
|
|
@ -50,9 +50,11 @@ The table below provides an overview of the current status of deprecated feature
|
|||
|
||||
Status | Feature | Deprecated | Remove
|
||||
-----------|------------------------------------------------------------------------------------------------------------------------------------|------------|------------
|
||||
Deprecated | [Support for encrypted TLS private keys](#support-for-encrypted-tls-private-keys) | v20.10 | -
|
||||
Deprecated | [Kubernetes stack and context support](#kubernetes-stack-and-context-support) | v20.10 | -
|
||||
Deprecated | [Pulling images from non-compliant image registries](#pulling-images-from-non-compliant-image-registries) | v20.10 | -
|
||||
Deprecated | [Linux containers on Windows (LCOW)](#linux-containers-on-windows-lcow-experimental) | v20.10 | -
|
||||
Deprecated | [BLKIO weight options with cgroups v1](#blkio-weight-options–with-cgroups-v1) | v20.10 | -
|
||||
Removed | [Linux containers on Windows (LCOW)](#linux-containers-on-windows-lcow-experimental) | v20.10 | v21.xx
|
||||
Deprecated | [BLKIO weight options with cgroups v1](#blkio-weight-options-with-cgroups-v1) | v20.10 | -
|
||||
Deprecated | [Kernel memory limit](#kernel-memory-limit) | v20.10 | -
|
||||
Deprecated | [Classic Swarm and overlay networks using external key/value stores](#classic-swarm-and-overlay-networks-using-cluster-store) | v20.10 | -
|
||||
Deprecated | [Support for the legacy `~/.dockercfg` configuration file for authentication](#support-for-legacy-dockercfg-configuration-files) | v20.10 | -
|
||||
|
@ -97,6 +99,22 @@ Removed | [`--api-enable-cors` flag on `dockerd`](#--api-enable-cors-flag-on-
|
|||
Removed | [`--run` flag on `docker commit`](#--run-flag-on-docker-commit) | v0.10 | v1.13
|
||||
Removed | [Three arguments form in `docker import`](#three-arguments-form-in-docker-import) | v0.6.7 | v1.12
|
||||
|
||||
### Support for encrypted TLS private keys
|
||||
|
||||
**Deprecated in Release: v20.10**
|
||||
|
||||
Use of encrypted TLS private keys has been deprecated, and will be removed in a
|
||||
future release. Golang has deprecated support for legacy PEM encryption (as
|
||||
specified in [RFC 1423](https://datatracker.ietf.org/doc/html/rfc1423)), as it
|
||||
is insecure by design (see [https://go-review.googlesource.com/c/go/+/264159](https://go-review.googlesource.com/c/go/+/264159)).
|
||||
|
||||
### Kubernetes stack and context support
|
||||
|
||||
**Deprecated in Release: v20.10**
|
||||
|
||||
Following the deprecation of [Compose on Kubernetes](https://github.com/docker/compose-on-kubernetes), support for
|
||||
Kubernetes in the `stack` and `context` commands in the docker CLI is now marked as deprecated as well.
|
||||
|
||||
### Pulling images from non-compliant image registries
|
||||
|
||||
**Deprecated in Release: v20.10**
|
||||
|
@ -132,6 +150,7 @@ major release.
|
|||
### Linux containers on Windows (LCOW) (experimental)
|
||||
|
||||
**Deprecated in Release: v20.10**
|
||||
**Removed in Release: v21.xx**
|
||||
|
||||
The experimental feature to run Linux containers on Windows (LCOW) was introduced
|
||||
as a technical preview in Docker 17.09. While many enhancements were made after
|
||||
|
@ -541,7 +560,7 @@ See the events API documentation for the new format.
|
|||
|
||||
**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)**
|
||||
|
||||
To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use.
|
||||
To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is no longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use.
|
||||
|
||||
### HostConfig at API container start
|
||||
**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)**
|
||||
|
@ -570,9 +589,9 @@ Log tags are now generated in a standard way across different logging drivers.
|
|||
Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and
|
||||
`fluentd-tag` have been deprecated in favor of the generic `tag` option.
|
||||
|
||||
```bash
|
||||
```console
|
||||
{% raw %}
|
||||
docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"
|
||||
$ docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ enabled, and use it to create a volume.
|
|||
|
||||
1. Install the `sshfs` plugin.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker plugin install vieux/sshfs
|
||||
|
||||
Plugin "vieux/sshfs" is requesting the following privileges:
|
||||
|
@ -74,7 +74,7 @@ enabled, and use it to create a volume.
|
|||
|
||||
2. Check that the plugin is enabled in the output of `docker plugin ls`.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker plugin ls
|
||||
|
||||
ID NAME TAG DESCRIPTION ENABLED
|
||||
|
@ -87,7 +87,7 @@ enabled, and use it to create a volume.
|
|||
|
||||
This volume can now be mounted into containers.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker volume create \
|
||||
-d vieux/sshfs \
|
||||
--name sshvolume \
|
||||
|
@ -96,9 +96,10 @@ enabled, and use it to create a volume.
|
|||
|
||||
sshvolume
|
||||
```
|
||||
|
||||
4. Verify that the volume was created successfully.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker volume ls
|
||||
|
||||
DRIVER NAME
|
||||
|
@ -107,22 +108,23 @@ enabled, and use it to create a volume.
|
|||
|
||||
5. Start a container that uses the volume `sshvolume`.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run --rm -v sshvolume:/data busybox ls /data
|
||||
|
||||
<content of /remote on machine 1.2.3.4>
|
||||
```
|
||||
|
||||
6. Remove the volume `sshvolume`
|
||||
```bash
|
||||
docker volume rm sshvolume
|
||||
```console
|
||||
$ docker volume rm sshvolume
|
||||
|
||||
sshvolume
|
||||
```
|
||||
|
||||
To disable a plugin, use the `docker plugin disable` command. To completely
|
||||
remove it, use the `docker plugin remove` command. For other available
|
||||
commands and options, see the
|
||||
[command line reference](../reference/commandline/index.md).
|
||||
[command line reference](https://docs.docker.com/engine/reference/commandline/cli/).
|
||||
|
||||
|
||||
## Developing a plugin
|
||||
|
@ -134,7 +136,7 @@ example, it was created from a Dockerfile:
|
|||
>**Note:** The `/run/docker/plugins` directory is mandatory inside of the
|
||||
plugin's filesystem for docker to communicate with the plugin.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ git clone https://github.com/vieux/docker-volume-sshfs
|
||||
$ cd docker-volume-sshfs
|
||||
$ docker build -t rootfsimage .
|
||||
|
@ -193,13 +195,13 @@ Stdout of a plugin is redirected to dockerd logs. Such entries have a
|
|||
`f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62` and their
|
||||
corresponding log entries in the docker daemon logs.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker plugin install tiborvass/sample-volume-plugin
|
||||
|
||||
INFO[0036] Starting... Found 0 volumes on startup plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker volume create -d tiborvass/sample-volume-plugin samplevol
|
||||
|
||||
INFO[0193] Create Called... Ensuring directory /data/samplevol exists on host... plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
|
||||
|
@ -208,7 +210,7 @@ INFO[0193] Created volume samplevol with mountpoint /data/samp
|
|||
INFO[0193] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run -v samplevol:/tmp busybox sh
|
||||
|
||||
INFO[0421] Get Called... Found samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
|
||||
|
@ -223,7 +225,7 @@ INFO[0421] Unmount Called... Unmounted samplevol plugin=f52a3df433b9a
|
|||
plugins. This is specifically useful to collect plugin logs if they are
|
||||
redirected to a file.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins list
|
||||
|
||||
ID PID STATUS BUNDLE CREATED OWNER
|
||||
|
@ -232,13 +234,14 @@ ID PID S
|
|||
c5bb4b90941efcaccca999439ed06d6a6affdde7081bb34dc84126b57b3e793d 14984 running /run/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby-plugins/c5bb4b90941efcaccca999439ed06d6a6affdde7081bb34dc84126b57b3e793d 2018-02-08T21:35:12.321288966Z root
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins exec 93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 cat /var/log/plugin.log
|
||||
```
|
||||
|
||||
If the plugin has a built-in shell, then exec into the plugin can be done as
|
||||
follows:
|
||||
```bash
|
||||
|
||||
```console
|
||||
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins exec -t 93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 sh
|
||||
```
|
||||
|
||||
|
@ -251,17 +254,18 @@ the plugin is listening on the said socket. For a well functioning plugin,
|
|||
these basic requests should work. Note that plugin sockets are available on the host under `/var/run/docker/plugins/<pluginID>`
|
||||
|
||||
|
||||
```bash
|
||||
curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List
|
||||
```console
|
||||
$ curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List
|
||||
|
||||
{"Mountpoint":"","Err":"","Volumes":[{"Name":"myvol1","Mountpoint":"/data/myvol1"},{"Name":"myvol2","Mountpoint":"/data/myvol2"}],"Volume":null}
|
||||
```
|
||||
|
||||
```bash
|
||||
curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities
|
||||
```console
|
||||
$ curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities
|
||||
|
||||
{"Scope":"local"}
|
||||
```
|
||||
|
||||
When using curl 7.5 and above, the URL should be of the form
|
||||
`http://hostname/APICall`, where `hostname` is the valid hostname where the
|
||||
plugin is installed and `APICall` is the call to the plugin API.
|
||||
|
|
|
@ -72,7 +72,7 @@ The sections below provide an inexhaustive overview of available plugins.
|
|||
| [Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3. |
|
||||
| [HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays. |
|
||||
| [Infinit volume plugin](https://infinit.sh/documentation/docker/volume-plugin) | A volume plugin that makes it easy to mount and manage Infinit volumes using Docker. |
|
||||
| [IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. |
|
||||
| [IPFS Volume Plugin](https://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. |
|
||||
| [Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository. |
|
||||
| [Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`. |
|
||||
| [NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future. |
|
||||
|
@ -80,7 +80,7 @@ The sections below provide an inexhaustive overview of available plugins.
|
|||
| [Nimble Storage Volume Plugin](https://connect.nimblestorage.com/community/app-integration/docker) | A volume plug-in that integrates with Nimble Storage Unified Flash Fabric arrays. The plug-in abstracts array volume capabilities to the Docker administrator to allow self-provisioning of secure multi-tenant volumes and clones. |
|
||||
| [OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. |
|
||||
| [Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler. |
|
||||
| [Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. |
|
||||
| [Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](https://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. |
|
||||
| [REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. |
|
||||
| [Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices. |
|
||||
| [VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments. |
|
||||
|
|
|
@ -90,7 +90,7 @@ The `TLSConfig` field is optional and TLS will only be verified if this configur
|
|||
Plugins should be started before Docker, and stopped after Docker. For
|
||||
example, when packaging a plugin for a platform which supports `systemd`, you
|
||||
might use [`systemd` dependencies](
|
||||
http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
|
||||
https://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
|
||||
manage startup and shutdown order.
|
||||
|
||||
When upgrading a plugin, you should first stop the Docker daemon, upgrade the
|
||||
|
@ -114,7 +114,7 @@ a `service` file and a `socket` file.
|
|||
|
||||
The `service` file (for example `/lib/systemd/system/your-plugin.service`):
|
||||
|
||||
```
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=Your plugin
|
||||
Before=docker.service
|
||||
|
@ -127,9 +127,10 @@ ExecStart=/usr/lib/docker/your-plugin
|
|||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
The `socket` file (for example `/lib/systemd/system/your-plugin.socket`):
|
||||
|
||||
```
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=Your plugin
|
||||
|
||||
|
@ -166,7 +167,8 @@ Plugins are activated via the following "handshake" API call.
|
|||
**Request:** empty body
|
||||
|
||||
**Response:**
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"Implements": ["VolumeDriver"]
|
||||
}
|
||||
|
|
|
@ -114,9 +114,9 @@ Enable the authorization plugin with a dedicated command line flag in the
|
|||
`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID`
|
||||
value. This value can be the plugin’s socket or a path to a specification file.
|
||||
Authorization plugins can be loaded without restarting the daemon. Refer
|
||||
to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information.
|
||||
to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reload-behavior) for more information.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
|
||||
```
|
||||
|
||||
|
@ -124,26 +124,26 @@ Docker's authorization subsystem supports multiple `--authorization-plugin` para
|
|||
|
||||
### Calling authorized command (allow)
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker pull centos
|
||||
...
|
||||
<...>
|
||||
f1b10cd84249: Pull complete
|
||||
...
|
||||
<...>
|
||||
```
|
||||
|
||||
### Calling unauthorized command (deny)
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker pull centos
|
||||
...
|
||||
<...>
|
||||
docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed.
|
||||
```
|
||||
|
||||
### Error from plugins
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker pull centos
|
||||
...
|
||||
<...>
|
||||
docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?.
|
||||
```
|
||||
|
||||
|
@ -180,6 +180,7 @@ should implement the following two methods:
|
|||
"Err": "The error message if things go wrong"
|
||||
}
|
||||
```
|
||||
|
||||
#### /AuthZPlugin.AuthZRes
|
||||
|
||||
**Request**:
|
||||
|
|
|
@ -31,7 +31,7 @@ You need to install and enable the plugin and then restart the Docker daemon
|
|||
before using the plugin. See the following example for the correct ordering
|
||||
of steps.
|
||||
|
||||
```
|
||||
```console
|
||||
$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver
|
||||
<output suppressed>
|
||||
$ pkill dockerd
|
||||
|
@ -309,6 +309,7 @@ Get an archive of the changes between the filesystem layers specified by the `ID
|
|||
and `Parent`. `Parent` may be an empty string, in which case there is no parent.
|
||||
|
||||
**Response**:
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
{{ TAR STREAM }}
|
||||
|
@ -354,6 +355,7 @@ Respond with a non-empty string error if an error occurred.
|
|||
### /GraphDriver.ApplyDiff
|
||||
|
||||
**Request**:
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
{{ TAR STREAM }}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue