Merge pull request #3576 from thaJeztah/update_engine

vendor: update github.com/docker/docker to latest master
This commit is contained in:
Sebastiaan van Stijn 2022-05-02 10:41:51 +02:00 committed by GitHub
commit 5c769c40be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
222 changed files with 5602 additions and 1804 deletions

View File

@ -142,7 +142,7 @@ func runAttach(dockerCli command.Cli, opts *attachOptions) error {
return getExitStatus(errC, resultC) return getExitStatus(errC, resultC)
} }
func getExitStatus(errC <-chan error, resultC <-chan container.ContainerWaitOKBody) error { func getExitStatus(errC <-chan error, resultC <-chan container.WaitResponse) error {
select { select {
case result := <-resultC: case result := <-resultC:
if result.Error != nil { if result.Error != nil {

View File

@ -81,16 +81,16 @@ func TestGetExitStatus(t *testing.T) {
var ( var (
expectedErr = fmt.Errorf("unexpected error") expectedErr = fmt.Errorf("unexpected error")
errC = make(chan error, 1) errC = make(chan error, 1)
resultC = make(chan container.ContainerWaitOKBody, 1) resultC = make(chan container.WaitResponse, 1)
) )
testcases := []struct { testcases := []struct {
result *container.ContainerWaitOKBody result *container.WaitResponse
err error err error
expectedError error expectedError error
}{ }{
{ {
result: &container.ContainerWaitOKBody{ result: &container.WaitResponse{
StatusCode: 0, StatusCode: 0,
}, },
}, },
@ -99,13 +99,13 @@ func TestGetExitStatus(t *testing.T) {
expectedError: expectedErr, expectedError: expectedErr,
}, },
{ {
result: &container.ContainerWaitOKBody{ result: &container.WaitResponse{
Error: &container.ContainerWaitOKBodyError{Message: expectedErr.Error()}, Error: &container.WaitExitError{Message: expectedErr.Error()},
}, },
expectedError: expectedErr, expectedError: expectedErr,
}, },
{ {
result: &container.ContainerWaitOKBody{ result: &container.WaitResponse{
StatusCode: 15, StatusCode: 15,
}, },
expectedError: cli.StatusError{StatusCode: 15}, expectedError: cli.StatusError{StatusCode: 15},

View File

@ -20,14 +20,14 @@ type fakeClient struct {
hostConfig *container.HostConfig, hostConfig *container.HostConfig,
networkingConfig *network.NetworkingConfig, networkingConfig *network.NetworkingConfig,
platform *specs.Platform, platform *specs.Platform,
containerName string) (container.ContainerCreateCreatedBody, error) containerName string) (container.CreateResponse, error)
containerStartFunc func(container string, options types.ContainerStartOptions) error containerStartFunc func(container string, options types.ContainerStartOptions) error
imageCreateFunc func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) imageCreateFunc func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
infoFunc func() (types.Info, error) infoFunc func() (types.Info, error)
containerStatPathFunc func(container, path string) (types.ContainerPathStat, error) containerStatPathFunc func(container, path string) (types.ContainerPathStat, error)
containerCopyFromFunc func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) containerCopyFromFunc func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
logFunc func(string, types.ContainerLogsOptions) (io.ReadCloser, error) logFunc func(string, types.ContainerLogsOptions) (io.ReadCloser, error)
waitFunc func(string) (<-chan container.ContainerWaitOKBody, <-chan error) waitFunc func(string) (<-chan container.WaitResponse, <-chan error)
containerListFunc func(types.ContainerListOptions) ([]types.Container, error) containerListFunc func(types.ContainerListOptions) ([]types.Container, error)
containerExportFunc func(string) (io.ReadCloser, error) containerExportFunc func(string) (io.ReadCloser, error)
containerExecResizeFunc func(id string, options types.ResizeOptions) error containerExecResizeFunc func(id string, options types.ResizeOptions) error
@ -75,11 +75,11 @@ func (f *fakeClient) ContainerCreate(
networkingConfig *network.NetworkingConfig, networkingConfig *network.NetworkingConfig,
platform *specs.Platform, platform *specs.Platform,
containerName string, containerName string,
) (container.ContainerCreateCreatedBody, error) { ) (container.CreateResponse, error) {
if f.createContainerFunc != nil { if f.createContainerFunc != nil {
return f.createContainerFunc(config, hostConfig, networkingConfig, platform, containerName) return f.createContainerFunc(config, hostConfig, networkingConfig, platform, containerName)
} }
return container.ContainerCreateCreatedBody{}, nil return container.CreateResponse{}, nil
} }
func (f *fakeClient) ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error { func (f *fakeClient) ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error {
@ -128,7 +128,7 @@ func (f *fakeClient) ClientVersion() string {
return f.Version return f.Version
} }
func (f *fakeClient) ContainerWait(_ context.Context, container string, _ container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { func (f *fakeClient) ContainerWait(_ context.Context, container string, _ container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
if f.waitFunc != nil { if f.waitFunc != nil {
return f.waitFunc(container) return f.waitFunc(container)
} }

View File

@ -191,7 +191,7 @@ func newCIDFile(path string) (*cidFile, error) {
} }
// nolint: gocyclo // nolint: gocyclo
func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig *containerConfig, opts *createOptions) (*container.ContainerCreateCreatedBody, error) { func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig *containerConfig, opts *createOptions) (*container.CreateResponse, error) {
config := containerConfig.Config config := containerConfig.Config
hostConfig := containerConfig.HostConfig hostConfig := containerConfig.HostConfig
networkingConfig := containerConfig.NetworkingConfig networkingConfig := containerConfig.NetworkingConfig

View File

@ -88,18 +88,18 @@ func TestCreateContainerImagePullPolicy(t *testing.T) {
cases := []struct { cases := []struct {
PullPolicy string PullPolicy string
ExpectedPulls int ExpectedPulls int
ExpectedBody container.ContainerCreateCreatedBody ExpectedBody container.CreateResponse
ExpectedErrMsg string ExpectedErrMsg string
ResponseCounter int ResponseCounter int
}{ }{
{ {
PullPolicy: PullImageMissing, PullPolicy: PullImageMissing,
ExpectedPulls: 1, ExpectedPulls: 1,
ExpectedBody: container.ContainerCreateCreatedBody{ID: containerID}, ExpectedBody: container.CreateResponse{ID: containerID},
}, { }, {
PullPolicy: PullImageAlways, PullPolicy: PullImageAlways,
ExpectedPulls: 1, ExpectedPulls: 1,
ExpectedBody: container.ContainerCreateCreatedBody{ID: containerID}, ExpectedBody: container.CreateResponse{ID: containerID},
ResponseCounter: 1, // This lets us return a container on the first pull ResponseCounter: 1, // This lets us return a container on the first pull
}, { }, {
PullPolicy: PullImageNever, PullPolicy: PullImageNever,
@ -118,13 +118,13 @@ func TestCreateContainerImagePullPolicy(t *testing.T) {
networkingConfig *network.NetworkingConfig, networkingConfig *network.NetworkingConfig,
platform *specs.Platform, platform *specs.Platform,
containerName string, containerName string,
) (container.ContainerCreateCreatedBody, error) { ) (container.CreateResponse, error) {
defer func() { c.ResponseCounter++ }() defer func() { c.ResponseCounter++ }()
switch c.ResponseCounter { switch c.ResponseCounter {
case 0: case 0:
return container.ContainerCreateCreatedBody{}, fakeNotFound{} return container.CreateResponse{}, fakeNotFound{}
default: default:
return container.ContainerCreateCreatedBody{ID: containerID}, nil return container.CreateResponse{ID: containerID}, nil
} }
}, },
imageCreateFunc: func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { imageCreateFunc: func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
@ -187,8 +187,8 @@ func TestNewCreateCommandWithContentTrustErrors(t *testing.T) {
networkingConfig *network.NetworkingConfig, networkingConfig *network.NetworkingConfig,
platform *specs.Platform, platform *specs.Platform,
containerName string, containerName string,
) (container.ContainerCreateCreatedBody, error) { ) (container.CreateResponse, error) {
return container.ContainerCreateCreatedBody{}, fmt.Errorf("shouldn't try to pull image") return container.CreateResponse{}, fmt.Errorf("shouldn't try to pull image")
}, },
}, test.EnableContentTrust) }, test.EnableContentTrust)
cli.SetNotaryClient(tc.notaryFunc) cli.SetNotaryClient(tc.notaryFunc)
@ -248,8 +248,8 @@ func TestNewCreateCommandWithWarnings(t *testing.T) {
networkingConfig *network.NetworkingConfig, networkingConfig *network.NetworkingConfig,
platform *specs.Platform, platform *specs.Platform,
containerName string, containerName string,
) (container.ContainerCreateCreatedBody, error) { ) (container.CreateResponse, error) {
return container.ContainerCreateCreatedBody{}, nil return container.CreateResponse{}, nil
}, },
}) })
cmd := NewCreateCommand(cli) cmd := NewCreateCommand(cli)
@ -287,10 +287,10 @@ func TestCreateContainerWithProxyConfig(t *testing.T) {
networkingConfig *network.NetworkingConfig, networkingConfig *network.NetworkingConfig,
platform *specs.Platform, platform *specs.Platform,
containerName string, containerName string,
) (container.ContainerCreateCreatedBody, error) { ) (container.CreateResponse, error) {
sort.Strings(config.Env) sort.Strings(config.Env)
assert.DeepEqual(t, config.Env, expected) assert.DeepEqual(t, config.Env, expected)
return container.ContainerCreateCreatedBody{}, nil return container.CreateResponse{}, nil
}, },
}) })
cli.SetConfigFile(&configfile.ConfigFile{ cli.SetConfigFile(&configfile.ConfigFile{

View File

@ -4,10 +4,10 @@ import (
"context" "context"
"fmt" "fmt"
"strings" "strings"
"time"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types/container"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -42,18 +42,19 @@ func NewRestartCommand(dockerCli command.Cli) *cobra.Command {
func runRestart(dockerCli command.Cli, opts *restartOptions) error { func runRestart(dockerCli command.Cli, opts *restartOptions) error {
ctx := context.Background() ctx := context.Background()
var errs []string var errs []string
var timeout *time.Duration var timeout *int
if opts.nSecondsChanged { if opts.nSecondsChanged {
timeoutValue := time.Duration(opts.nSeconds) * time.Second timeout = &opts.nSeconds
timeout = &timeoutValue
} }
for _, name := range opts.containers { for _, name := range opts.containers {
if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil { err := dockerCli.Client().ContainerRestart(ctx, name, container.StopOptions{
Timeout: timeout,
})
if err != nil {
errs = append(errs, err.Error()) errs = append(errs, err.Error())
continue continue
} }
fmt.Fprintln(dockerCli.Out(), name) _, _ = fmt.Fprintln(dockerCli.Out(), name)
} }
if len(errs) > 0 { if len(errs) > 0 {
return errors.New(strings.Join(errs, "\n")) return errors.New(strings.Join(errs, "\n"))

View File

@ -16,8 +16,8 @@ import (
func TestRunLabel(t *testing.T) { func TestRunLabel(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{ cli := test.NewFakeCli(&fakeClient{
createContainerFunc: func(_ *container.Config, _ *container.HostConfig, _ *network.NetworkingConfig, _ *specs.Platform, _ string) (container.ContainerCreateCreatedBody, error) { createContainerFunc: func(_ *container.Config, _ *container.HostConfig, _ *network.NetworkingConfig, _ *specs.Platform, _ string) (container.CreateResponse, error) {
return container.ContainerCreateCreatedBody{ return container.CreateResponse{
ID: "id", ID: "id",
}, nil }, nil
}, },
@ -61,8 +61,8 @@ func TestRunCommandWithContentTrustErrors(t *testing.T) {
networkingConfig *network.NetworkingConfig, networkingConfig *network.NetworkingConfig,
platform *specs.Platform, platform *specs.Platform,
containerName string, containerName string,
) (container.ContainerCreateCreatedBody, error) { ) (container.CreateResponse, error) {
return container.ContainerCreateCreatedBody{}, fmt.Errorf("shouldn't try to pull image") return container.CreateResponse{}, fmt.Errorf("shouldn't try to pull image")
}, },
}, test.EnableContentTrust) }, test.EnableContentTrust)
cli.SetNotaryClient(tc.notaryFunc) cli.SetNotaryClient(tc.notaryFunc)

View File

@ -4,10 +4,10 @@ import (
"context" "context"
"fmt" "fmt"
"strings" "strings"
"time"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types/container"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -40,25 +40,23 @@ func NewStopCommand(dockerCli command.Cli) *cobra.Command {
} }
func runStop(dockerCli command.Cli, opts *stopOptions) error { func runStop(dockerCli command.Cli, opts *stopOptions) error {
ctx := context.Background() var timeout *int
var timeout *time.Duration
if opts.timeChanged { if opts.timeChanged {
timeoutValue := time.Duration(opts.time) * time.Second timeout = &opts.time
timeout = &timeoutValue
} }
var errs []string errChan := parallelOperation(context.Background(), opts.containers, func(ctx context.Context, id string) error {
return dockerCli.Client().ContainerStop(ctx, id, container.StopOptions{
errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error { Timeout: timeout,
return dockerCli.Client().ContainerStop(ctx, id, timeout) })
}) })
for _, container := range opts.containers { var errs []string
for _, ctr := range opts.containers {
if err := <-errChan; err != nil { if err := <-errChan; err != nil {
errs = append(errs, err.Error()) errs = append(errs, err.Error())
continue continue
} }
fmt.Fprintln(dockerCli.Out(), container) _, _ = fmt.Fprintln(dockerCli.Out(), ctr)
} }
if len(errs) > 0 { if len(errs) > 0 {
return errors.New(strings.Join(errs, "\n")) return errors.New(strings.Join(errs, "\n"))

View File

@ -13,10 +13,10 @@ import (
is "gotest.tools/v3/assert/cmp" is "gotest.tools/v3/assert/cmp"
) )
func waitFn(cid string) (<-chan container.ContainerWaitOKBody, <-chan error) { func waitFn(cid string) (<-chan container.WaitResponse, <-chan error) {
resC := make(chan container.ContainerWaitOKBody) resC := make(chan container.WaitResponse)
errC := make(chan error, 1) errC := make(chan error, 1)
var res container.ContainerWaitOKBody var res container.WaitResponse
go func() { go func() {
switch { switch {
@ -24,10 +24,10 @@ func waitFn(cid string) (<-chan container.ContainerWaitOKBody, <-chan error) {
res.StatusCode = 42 res.StatusCode = 42
resC <- res resC <- res
case strings.Contains(cid, "non-existent"): case strings.Contains(cid, "non-existent"):
err := errors.Errorf("No such container: %v", cid) err := errors.Errorf("no such container: %v", cid)
errC <- err errC <- err
case strings.Contains(cid, "wait-error"): case strings.Contains(cid, "wait-error"):
res.Error = &container.ContainerWaitOKBodyError{Message: "removal failed"} res.Error = &container.WaitExitError{Message: "removal failed"}
resC <- res resC <- res
default: default:
// normal exit // normal exit

View File

@ -3,11 +3,13 @@ package formatter
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"strconv"
"strings" "strings"
"text/template" "text/template"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/volume"
units "github.com/docker/go-units" units "github.com/docker/go-units"
) )
@ -34,7 +36,7 @@ type DiskUsageContext struct {
LayersSize int64 LayersSize int64
Images []*types.ImageSummary Images []*types.ImageSummary
Containers []*types.Container Containers []*types.Container
Volumes []*types.Volume Volumes []*volume.Volume
BuildCache []*types.BuildCache BuildCache []*types.BuildCache
BuilderSize int64 BuilderSize int64
} }
@ -271,7 +273,7 @@ func (c *diskUsageImagesContext) Type() string {
} }
func (c *diskUsageImagesContext) TotalCount() string { func (c *diskUsageImagesContext) TotalCount() string {
return fmt.Sprintf("%d", len(c.images)) return strconv.Itoa(len(c.images))
} }
func (c *diskUsageImagesContext) Active() string { func (c *diskUsageImagesContext) Active() string {
@ -282,7 +284,7 @@ func (c *diskUsageImagesContext) Active() string {
} }
} }
return fmt.Sprintf("%d", used) return strconv.Itoa(used)
} }
func (c *diskUsageImagesContext) Size() string { func (c *diskUsageImagesContext) Size() string {
@ -323,7 +325,7 @@ func (c *diskUsageContainersContext) Type() string {
} }
func (c *diskUsageContainersContext) TotalCount() string { func (c *diskUsageContainersContext) TotalCount() string {
return fmt.Sprintf("%d", len(c.containers)) return strconv.Itoa(len(c.containers))
} }
func (c *diskUsageContainersContext) isActive(container types.Container) bool { func (c *diskUsageContainersContext) isActive(container types.Container) bool {
@ -340,7 +342,7 @@ func (c *diskUsageContainersContext) Active() string {
} }
} }
return fmt.Sprintf("%d", used) return strconv.Itoa(used)
} }
func (c *diskUsageContainersContext) Size() string { func (c *diskUsageContainersContext) Size() string {
@ -373,7 +375,7 @@ func (c *diskUsageContainersContext) Reclaimable() string {
type diskUsageVolumesContext struct { type diskUsageVolumesContext struct {
HeaderContext HeaderContext
volumes []*types.Volume volumes []*volume.Volume
} }
func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) { func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) {
@ -385,7 +387,7 @@ func (c *diskUsageVolumesContext) Type() string {
} }
func (c *diskUsageVolumesContext) TotalCount() string { func (c *diskUsageVolumesContext) TotalCount() string {
return fmt.Sprintf("%d", len(c.volumes)) return strconv.Itoa(len(c.volumes))
} }
func (c *diskUsageVolumesContext) Active() string { func (c *diskUsageVolumesContext) Active() string {
@ -397,7 +399,7 @@ func (c *diskUsageVolumesContext) Active() string {
} }
} }
return fmt.Sprintf("%d", used) return strconv.Itoa(used)
} }
func (c *diskUsageVolumesContext) Size() string { func (c *diskUsageVolumesContext) Size() string {
@ -447,7 +449,7 @@ func (c *diskUsageBuilderContext) Type() string {
} }
func (c *diskUsageBuilderContext) TotalCount() string { func (c *diskUsageBuilderContext) TotalCount() string {
return fmt.Sprintf("%d", len(c.buildCache)) return strconv.Itoa(len(c.buildCache))
} }
func (c *diskUsageBuilderContext) Active() string { func (c *diskUsageBuilderContext) Active() string {
@ -457,7 +459,7 @@ func (c *diskUsageBuilderContext) Active() string {
numActive++ numActive++
} }
} }
return fmt.Sprintf("%d", numActive) return strconv.Itoa(numActive)
} }
func (c *diskUsageBuilderContext) Size() string { func (c *diskUsageBuilderContext) Size() string {

View File

@ -1,10 +1,10 @@
package formatter package formatter
import ( import (
"fmt" "strconv"
"strings" "strings"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types/volume"
units "github.com/docker/go-units" units "github.com/docker/go-units"
) )
@ -36,10 +36,10 @@ func NewVolumeFormat(source string, quiet bool) Format {
} }
// VolumeWrite writes formatted volumes using the Context // VolumeWrite writes formatted volumes using the Context
func VolumeWrite(ctx Context, volumes []*types.Volume) error { func VolumeWrite(ctx Context, volumes []*volume.Volume) error {
render := func(format func(subContext SubContext) error) error { render := func(format func(subContext SubContext) error) error {
for _, volume := range volumes { for _, vol := range volumes {
if err := format(&volumeContext{v: *volume}); err != nil { if err := format(&volumeContext{v: *vol}); err != nil {
return err return err
} }
} }
@ -50,7 +50,7 @@ func VolumeWrite(ctx Context, volumes []*types.Volume) error {
type volumeContext struct { type volumeContext struct {
HeaderContext HeaderContext
v types.Volume v volume.Volume
} }
func newVolumeContext() *volumeContext { func newVolumeContext() *volumeContext {
@ -94,7 +94,7 @@ func (c *volumeContext) Labels() string {
var joinLabels []string var joinLabels []string
for k, v := range c.v.Labels { for k, v := range c.v.Labels {
joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) joinLabels = append(joinLabels, k+"="+v)
} }
return strings.Join(joinLabels, ",") return strings.Join(joinLabels, ",")
} }
@ -110,7 +110,7 @@ func (c *volumeContext) Links() string {
if c.v.UsageData == nil { if c.v.UsageData == nil {
return "N/A" return "N/A"
} }
return fmt.Sprintf("%d", c.v.UsageData.RefCount) return strconv.FormatInt(c.v.UsageData.RefCount, 10)
} }
func (c *volumeContext) Size() string { func (c *volumeContext) Size() string {

View File

@ -8,7 +8,7 @@ import (
"testing" "testing"
"github.com/docker/cli/internal/test" "github.com/docker/cli/internal/test"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types/volume"
"github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/stringid"
"gotest.tools/v3/assert" "gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp" is "gotest.tools/v3/assert/cmp"
@ -24,22 +24,22 @@ func TestVolumeContext(t *testing.T) {
call func() string call func() string
}{ }{
{volumeContext{ {volumeContext{
v: types.Volume{Name: volumeName}, v: volume.Volume{Name: volumeName},
}, volumeName, ctx.Name}, }, volumeName, ctx.Name},
{volumeContext{ {volumeContext{
v: types.Volume{Driver: "driver_name"}, v: volume.Volume{Driver: "driver_name"},
}, "driver_name", ctx.Driver}, }, "driver_name", ctx.Driver},
{volumeContext{ {volumeContext{
v: types.Volume{Scope: "local"}, v: volume.Volume{Scope: "local"},
}, "local", ctx.Scope}, }, "local", ctx.Scope},
{volumeContext{ {volumeContext{
v: types.Volume{Mountpoint: "mountpoint"}, v: volume.Volume{Mountpoint: "mountpoint"},
}, "mountpoint", ctx.Mountpoint}, }, "mountpoint", ctx.Mountpoint},
{volumeContext{ {volumeContext{
v: types.Volume{}, v: volume.Volume{},
}, "", ctx.Labels}, }, "", ctx.Labels},
{volumeContext{ {volumeContext{
v: types.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, v: volume.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}},
}, "label1=value1,label2=value2", ctx.Labels}, }, "label1=value1,label2=value2", ctx.Labels},
} }
@ -122,7 +122,7 @@ foobar_bar
}, },
} }
volumes := []*types.Volume{ volumes := []*volume.Volume{
{Name: "foobar_baz", Driver: "foo"}, {Name: "foobar_baz", Driver: "foo"},
{Name: "foobar_bar", Driver: "bar"}, {Name: "foobar_bar", Driver: "bar"},
} }
@ -143,7 +143,7 @@ foobar_bar
} }
func TestVolumeContextWriteJSON(t *testing.T) { func TestVolumeContextWriteJSON(t *testing.T) {
volumes := []*types.Volume{ volumes := []*volume.Volume{
{Driver: "foo", Name: "foobar_baz"}, {Driver: "foo", Name: "foobar_baz"},
{Driver: "bar", Name: "foobar_bar"}, {Driver: "bar", Name: "foobar_bar"},
} }
@ -166,7 +166,7 @@ func TestVolumeContextWriteJSON(t *testing.T) {
} }
func TestVolumeContextWriteJSONField(t *testing.T) { func TestVolumeContextWriteJSONField(t *testing.T) {
volumes := []*types.Volume{ volumes := []*volume.Volume{
{Driver: "foo", Name: "foobar_baz"}, {Driver: "foo", Name: "foobar_baz"},
{Driver: "bar", Name: "foobar_bar"}, {Driver: "bar", Name: "foobar_bar"},
} }

View File

@ -22,12 +22,12 @@ import (
"github.com/docker/docker/api" "github.com/docker/docker/api"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder/remotecontext/urlutil"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/urlutil"
units "github.com/docker/go-units" units "github.com/docker/go-units"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"

View File

@ -7,14 +7,14 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/swarm"
swarmapi "github.com/docker/swarmkit/api" swarmapi "github.com/moby/swarmkit/v2/api"
"github.com/docker/swarmkit/api/genericresource" "github.com/moby/swarmkit/v2/api/genericresource"
) )
// GenericResource is a concept that a user can use to advertise user-defined // GenericResource is a concept that a user can use to advertise user-defined
// resources on a node and thus better place services based on these resources. // resources on a node and thus better place services based on these resources.
// E.g: NVIDIA GPUs, Intel FPGAs, ... // E.g: NVIDIA GPUs, Intel FPGAs, ...
// See https://github.com/docker/swarmkit/blob/master/design/generic_resources.md // See https://github.com/moby/swarmkit/blob/de950a7ed842c7b7e47e9451cde9bf8f96031894/design/generic_resources.md
// ValidateSingleGenericResource validates that a single entry in the // ValidateSingleGenericResource validates that a single entry in the
// generic resource list is valid. // generic resource list is valid.

View File

@ -14,10 +14,10 @@ import (
"github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions" "github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/defaults"
gogotypes "github.com/gogo/protobuf/types" gogotypes "github.com/gogo/protobuf/types"
"github.com/google/shlex" "github.com/google/shlex"
"github.com/moby/swarmkit/v2/api"
"github.com/moby/swarmkit/v2/api/defaults"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/pflag" "github.com/spf13/pflag"
) )

View File

@ -17,7 +17,7 @@ import (
"github.com/docker/docker/api/types/versions" "github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client" "github.com/docker/docker/client"
units "github.com/docker/go-units" units "github.com/docker/go-units"
"github.com/docker/swarmkit/api/defaults" "github.com/moby/swarmkit/v2/api/defaults"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag" "github.com/spf13/pflag"

View File

@ -5,48 +5,48 @@ import (
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
volumetypes "github.com/docker/docker/api/types/volume" "github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client" "github.com/docker/docker/client"
) )
type fakeClient struct { type fakeClient struct {
client.Client client.Client
volumeCreateFunc func(volumetypes.VolumeCreateBody) (types.Volume, error) volumeCreateFunc func(volume.CreateOptions) (volume.Volume, error)
volumeInspectFunc func(volumeID string) (types.Volume, error) volumeInspectFunc func(volumeID string) (volume.Volume, error)
volumeListFunc func(filter filters.Args) (volumetypes.VolumeListOKBody, error) volumeListFunc func(filter filters.Args) (volume.ListResponse, error)
volumeRemoveFunc func(volumeID string, force bool) error volumeRemoveFunc func(volumeID string, force bool) error
volumePruneFunc func(filter filters.Args) (types.VolumesPruneReport, error) volumePruneFunc func(filter filters.Args) (types.VolumesPruneReport, error)
} }
func (c *fakeClient) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { func (c *fakeClient) VolumeCreate(_ context.Context, options volume.CreateOptions) (volume.Volume, error) {
if c.volumeCreateFunc != nil { if c.volumeCreateFunc != nil {
return c.volumeCreateFunc(options) return c.volumeCreateFunc(options)
} }
return types.Volume{}, nil return volume.Volume{}, nil
} }
func (c *fakeClient) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { func (c *fakeClient) VolumeInspect(_ context.Context, volumeID string) (volume.Volume, error) {
if c.volumeInspectFunc != nil { if c.volumeInspectFunc != nil {
return c.volumeInspectFunc(volumeID) return c.volumeInspectFunc(volumeID)
} }
return types.Volume{}, nil return volume.Volume{}, nil
} }
func (c *fakeClient) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { func (c *fakeClient) VolumeList(_ context.Context, filter filters.Args) (volume.ListResponse, error) {
if c.volumeListFunc != nil { if c.volumeListFunc != nil {
return c.volumeListFunc(filter) return c.volumeListFunc(filter)
} }
return volumetypes.VolumeListOKBody{}, nil return volume.ListResponse{}, nil
} }
func (c *fakeClient) VolumesPrune(ctx context.Context, filter filters.Args) (types.VolumesPruneReport, error) { func (c *fakeClient) VolumesPrune(_ context.Context, filter filters.Args) (types.VolumesPruneReport, error) {
if c.volumePruneFunc != nil { if c.volumePruneFunc != nil {
return c.volumePruneFunc(filter) return c.volumePruneFunc(filter)
} }
return types.VolumesPruneReport{}, nil return types.VolumesPruneReport{}, nil
} }
func (c *fakeClient) VolumeRemove(ctx context.Context, volumeID string, force bool) error { func (c *fakeClient) VolumeRemove(_ context.Context, volumeID string, force bool) error {
if c.volumeRemoveFunc != nil { if c.volumeRemoveFunc != nil {
return c.volumeRemoveFunc(volumeID, force) return c.volumeRemoveFunc(volumeID, force)
} }

View File

@ -7,7 +7,7 @@ import (
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/cli/opts" "github.com/docker/cli/opts"
volumetypes "github.com/docker/docker/api/types/volume" "github.com/docker/docker/api/types/volume"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -50,20 +50,16 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
} }
func runCreate(dockerCli command.Cli, options createOptions) error { func runCreate(dockerCli command.Cli, options createOptions) error {
client := dockerCli.Client() vol, err := dockerCli.Client().VolumeCreate(context.Background(), volume.CreateOptions{
volReq := volumetypes.VolumeCreateBody{
Driver: options.driver, Driver: options.driver,
DriverOpts: options.driverOpts.GetAll(), DriverOpts: options.driverOpts.GetAll(),
Name: options.name, Name: options.name,
Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()),
} })
vol, err := client.VolumeCreate(context.Background(), volReq)
if err != nil { if err != nil {
return err return err
} }
fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) _, _ = fmt.Fprintln(dockerCli.Out(), vol.Name)
return nil return nil
} }

View File

@ -7,8 +7,7 @@ import (
"testing" "testing"
"github.com/docker/cli/internal/test" "github.com/docker/cli/internal/test"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types/volume"
volumetypes "github.com/docker/docker/api/types/volume"
"github.com/pkg/errors" "github.com/pkg/errors"
"gotest.tools/v3/assert" "gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp" is "gotest.tools/v3/assert/cmp"
@ -18,7 +17,7 @@ func TestVolumeCreateErrors(t *testing.T) {
testCases := []struct { testCases := []struct {
args []string args []string
flags map[string]string flags map[string]string
volumeCreateFunc func(volumetypes.VolumeCreateBody) (types.Volume, error) volumeCreateFunc func(volume.CreateOptions) (volume.Volume, error)
expectedError string expectedError string
}{ }{
{ {
@ -33,8 +32,8 @@ func TestVolumeCreateErrors(t *testing.T) {
expectedError: "requires at most 1 argument", expectedError: "requires at most 1 argument",
}, },
{ {
volumeCreateFunc: func(createBody volumetypes.VolumeCreateBody) (types.Volume, error) { volumeCreateFunc: func(createBody volume.CreateOptions) (volume.Volume, error) {
return types.Volume{}, errors.Errorf("error creating volume") return volume.Volume{}, errors.Errorf("error creating volume")
}, },
expectedError: "error creating volume", expectedError: "error creating volume",
}, },
@ -57,11 +56,11 @@ func TestVolumeCreateErrors(t *testing.T) {
func TestVolumeCreateWithName(t *testing.T) { func TestVolumeCreateWithName(t *testing.T) {
name := "foo" name := "foo"
cli := test.NewFakeCli(&fakeClient{ cli := test.NewFakeCli(&fakeClient{
volumeCreateFunc: func(body volumetypes.VolumeCreateBody) (types.Volume, error) { volumeCreateFunc: func(body volume.CreateOptions) (volume.Volume, error) {
if body.Name != name { if body.Name != name {
return types.Volume{}, errors.Errorf("expected name %q, got %q", name, body.Name) return volume.Volume{}, errors.Errorf("expected name %q, got %q", name, body.Name)
} }
return types.Volume{ return volume.Volume{
Name: body.Name, Name: body.Name,
}, nil }, nil
}, },
@ -96,20 +95,20 @@ func TestVolumeCreateWithFlags(t *testing.T) {
name := "banana" name := "banana"
cli := test.NewFakeCli(&fakeClient{ cli := test.NewFakeCli(&fakeClient{
volumeCreateFunc: func(body volumetypes.VolumeCreateBody) (types.Volume, error) { volumeCreateFunc: func(body volume.CreateOptions) (volume.Volume, error) {
if body.Name != "" { if body.Name != "" {
return types.Volume{}, errors.Errorf("expected empty name, got %q", body.Name) return volume.Volume{}, errors.Errorf("expected empty name, got %q", body.Name)
} }
if body.Driver != expectedDriver { if body.Driver != expectedDriver {
return types.Volume{}, errors.Errorf("expected driver %q, got %q", expectedDriver, body.Driver) return volume.Volume{}, errors.Errorf("expected driver %q, got %q", expectedDriver, body.Driver)
} }
if !reflect.DeepEqual(body.DriverOpts, expectedOpts) { if !reflect.DeepEqual(body.DriverOpts, expectedOpts) {
return types.Volume{}, errors.Errorf("expected drivers opts %v, got %v", expectedOpts, body.DriverOpts) return volume.Volume{}, errors.Errorf("expected drivers opts %v, got %v", expectedOpts, body.DriverOpts)
} }
if !reflect.DeepEqual(body.Labels, expectedLabels) { if !reflect.DeepEqual(body.Labels, expectedLabels) {
return types.Volume{}, errors.Errorf("expected labels %v, got %v", expectedLabels, body.Labels) return volume.Volume{}, errors.Errorf("expected labels %v, got %v", expectedLabels, body.Labels)
} }
return types.Volume{ return volume.Volume{
Name: name, Name: name,
}, nil }, nil
}, },

View File

@ -7,7 +7,7 @@ import (
"github.com/docker/cli/internal/test" "github.com/docker/cli/internal/test"
. "github.com/docker/cli/internal/test/builders" // Import builders to get the builder function as package function . "github.com/docker/cli/internal/test/builders" // Import builders to get the builder function as package function
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types/volume"
"github.com/pkg/errors" "github.com/pkg/errors"
"gotest.tools/v3/assert" "gotest.tools/v3/assert"
"gotest.tools/v3/golden" "gotest.tools/v3/golden"
@ -17,7 +17,7 @@ func TestVolumeInspectErrors(t *testing.T) {
testCases := []struct { testCases := []struct {
args []string args []string
flags map[string]string flags map[string]string
volumeInspectFunc func(volumeID string) (types.Volume, error) volumeInspectFunc func(volumeID string) (volume.Volume, error)
expectedError string expectedError string
}{ }{
{ {
@ -25,8 +25,8 @@ func TestVolumeInspectErrors(t *testing.T) {
}, },
{ {
args: []string{"foo"}, args: []string{"foo"},
volumeInspectFunc: func(volumeID string) (types.Volume, error) { volumeInspectFunc: func(volumeID string) (volume.Volume, error) {
return types.Volume{}, errors.Errorf("error while inspecting the volume") return volume.Volume{}, errors.Errorf("error while inspecting the volume")
}, },
expectedError: "error while inspecting the volume", expectedError: "error while inspecting the volume",
}, },
@ -39,13 +39,13 @@ func TestVolumeInspectErrors(t *testing.T) {
}, },
{ {
args: []string{"foo", "bar"}, args: []string{"foo", "bar"},
volumeInspectFunc: func(volumeID string) (types.Volume, error) { volumeInspectFunc: func(volumeID string) (volume.Volume, error) {
if volumeID == "foo" { if volumeID == "foo" {
return types.Volume{ return volume.Volume{
Name: "foo", Name: "foo",
}, nil }, nil
} }
return types.Volume{}, errors.Errorf("error while inspecting the volume") return volume.Volume{}, errors.Errorf("error while inspecting the volume")
}, },
expectedError: "error while inspecting the volume", expectedError: "error while inspecting the volume",
}, },
@ -69,14 +69,14 @@ func TestVolumeInspectWithoutFormat(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
args []string args []string
volumeInspectFunc func(volumeID string) (types.Volume, error) volumeInspectFunc func(volumeID string) (volume.Volume, error)
}{ }{
{ {
name: "single-volume", name: "single-volume",
args: []string{"foo"}, args: []string{"foo"},
volumeInspectFunc: func(volumeID string) (types.Volume, error) { volumeInspectFunc: func(volumeID string) (volume.Volume, error) {
if volumeID != "foo" { if volumeID != "foo" {
return types.Volume{}, errors.Errorf("Invalid volumeID, expected %s, got %s", "foo", volumeID) return volume.Volume{}, errors.Errorf("Invalid volumeID, expected %s, got %s", "foo", volumeID)
} }
return *Volume(), nil return *Volume(), nil
}, },
@ -84,7 +84,7 @@ func TestVolumeInspectWithoutFormat(t *testing.T) {
{ {
name: "multiple-volume-with-labels", name: "multiple-volume-with-labels",
args: []string{"foo", "bar"}, args: []string{"foo", "bar"},
volumeInspectFunc: func(volumeID string) (types.Volume, error) { volumeInspectFunc: func(volumeID string) (volume.Volume, error) {
return *Volume(VolumeName(volumeID), VolumeLabels(map[string]string{ return *Volume(VolumeName(volumeID), VolumeLabels(map[string]string{
"foo": "bar", "foo": "bar",
})), nil })), nil
@ -103,7 +103,7 @@ func TestVolumeInspectWithoutFormat(t *testing.T) {
} }
func TestVolumeInspectWithFormat(t *testing.T) { func TestVolumeInspectWithFormat(t *testing.T) {
volumeInspectFunc := func(volumeID string) (types.Volume, error) { volumeInspectFunc := func(volumeID string) (volume.Volume, error) {
return *Volume(VolumeLabels(map[string]string{ return *Volume(VolumeLabels(map[string]string{
"foo": "bar", "foo": "bar",
})), nil })), nil
@ -112,7 +112,7 @@ func TestVolumeInspectWithFormat(t *testing.T) {
name string name string
format string format string
args []string args []string
volumeInspectFunc func(volumeID string) (types.Volume, error) volumeInspectFunc func(volumeID string) (volume.Volume, error)
}{ }{
{ {
name: "simple-template", name: "simple-template",

View File

@ -7,9 +7,8 @@ import (
"github.com/docker/cli/cli/config/configfile" "github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/internal/test" "github.com/docker/cli/internal/test"
. "github.com/docker/cli/internal/test/builders" // Import builders to get the builder function as package function . "github.com/docker/cli/internal/test/builders" // Import builders to get the builder function as package function
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
volumetypes "github.com/docker/docker/api/types/volume" "github.com/docker/docker/api/types/volume"
"github.com/pkg/errors" "github.com/pkg/errors"
"gotest.tools/v3/assert" "gotest.tools/v3/assert"
"gotest.tools/v3/golden" "gotest.tools/v3/golden"
@ -19,7 +18,7 @@ func TestVolumeListErrors(t *testing.T) {
testCases := []struct { testCases := []struct {
args []string args []string
flags map[string]string flags map[string]string
volumeListFunc func(filter filters.Args) (volumetypes.VolumeListOKBody, error) volumeListFunc func(filter filters.Args) (volume.ListResponse, error)
expectedError string expectedError string
}{ }{
{ {
@ -27,8 +26,8 @@ func TestVolumeListErrors(t *testing.T) {
expectedError: "accepts no argument", expectedError: "accepts no argument",
}, },
{ {
volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { volumeListFunc: func(filter filters.Args) (volume.ListResponse, error) {
return volumetypes.VolumeListOKBody{}, errors.Errorf("error listing volumes") return volume.ListResponse{}, errors.Errorf("error listing volumes")
}, },
expectedError: "error listing volumes", expectedError: "error listing volumes",
}, },
@ -50,9 +49,9 @@ func TestVolumeListErrors(t *testing.T) {
func TestVolumeListWithoutFormat(t *testing.T) { func TestVolumeListWithoutFormat(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{ cli := test.NewFakeCli(&fakeClient{
volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { volumeListFunc: func(filter filters.Args) (volume.ListResponse, error) {
return volumetypes.VolumeListOKBody{ return volume.ListResponse{
Volumes: []*types.Volume{ Volumes: []*volume.Volume{
Volume(), Volume(),
Volume(VolumeName("foo"), VolumeDriver("bar")), Volume(VolumeName("foo"), VolumeDriver("bar")),
Volume(VolumeName("baz"), VolumeLabels(map[string]string{ Volume(VolumeName("baz"), VolumeLabels(map[string]string{
@ -69,9 +68,9 @@ func TestVolumeListWithoutFormat(t *testing.T) {
func TestVolumeListWithConfigFormat(t *testing.T) { func TestVolumeListWithConfigFormat(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{ cli := test.NewFakeCli(&fakeClient{
volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { volumeListFunc: func(filter filters.Args) (volume.ListResponse, error) {
return volumetypes.VolumeListOKBody{ return volume.ListResponse{
Volumes: []*types.Volume{ Volumes: []*volume.Volume{
Volume(), Volume(),
Volume(VolumeName("foo"), VolumeDriver("bar")), Volume(VolumeName("foo"), VolumeDriver("bar")),
Volume(VolumeName("baz"), VolumeLabels(map[string]string{ Volume(VolumeName("baz"), VolumeLabels(map[string]string{
@ -91,9 +90,9 @@ func TestVolumeListWithConfigFormat(t *testing.T) {
func TestVolumeListWithFormat(t *testing.T) { func TestVolumeListWithFormat(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{ cli := test.NewFakeCli(&fakeClient{
volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { volumeListFunc: func(filter filters.Args) (volume.ListResponse, error) {
return volumetypes.VolumeListOKBody{ return volume.ListResponse{
Volumes: []*types.Volume{ Volumes: []*volume.Volume{
Volume(), Volume(),
Volume(VolumeName("foo"), VolumeDriver("bar")), Volume(VolumeName("foo"), VolumeDriver("bar")),
Volume(VolumeName("baz"), VolumeLabels(map[string]string{ Volume(VolumeName("baz"), VolumeLabels(map[string]string{
@ -111,9 +110,9 @@ func TestVolumeListWithFormat(t *testing.T) {
func TestVolumeListSortOrder(t *testing.T) { func TestVolumeListSortOrder(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{ cli := test.NewFakeCli(&fakeClient{
volumeListFunc: func(filter filters.Args) (volumetypes.VolumeListOKBody, error) { volumeListFunc: func(filter filters.Args) (volume.ListResponse, error) {
return volumetypes.VolumeListOKBody{ return volume.ListResponse{
Volumes: []*types.Volume{ Volumes: []*volume.Volume{
Volume(VolumeName("volume-2-foo")), Volume(VolumeName("volume-2-foo")),
Volume(VolumeName("volume-10-foo")), Volume(VolumeName("volume-10-foo")),
Volume(VolumeName("volume-1-foo")), Volume(VolumeName("volume-1-foo")),

View File

@ -1,13 +1,11 @@
package builders package builders
import ( import "github.com/docker/docker/api/types/volume"
"github.com/docker/docker/api/types"
)
// Volume creates a volume with default values. // Volume creates a volume with default values.
// Any number of volume function builder can be passed to augment it. // Any number of volume function builder can be passed to augment it.
func Volume(builders ...func(volume *types.Volume)) *types.Volume { func Volume(builders ...func(volume *volume.Volume)) *volume.Volume {
volume := &types.Volume{ vol := &volume.Volume{
Name: "volume", Name: "volume",
Driver: "local", Driver: "local",
Mountpoint: "/data/volume", Mountpoint: "/data/volume",
@ -15,29 +13,29 @@ func Volume(builders ...func(volume *types.Volume)) *types.Volume {
} }
for _, builder := range builders { for _, builder := range builders {
builder(volume) builder(vol)
} }
return volume return vol
} }
// VolumeLabels sets the volume labels // VolumeLabels sets the volume labels
func VolumeLabels(labels map[string]string) func(volume *types.Volume) { func VolumeLabels(labels map[string]string) func(volume *volume.Volume) {
return func(volume *types.Volume) { return func(volume *volume.Volume) {
volume.Labels = labels volume.Labels = labels
} }
} }
// VolumeName sets the volume labels // VolumeName sets the volume labels
func VolumeName(name string) func(volume *types.Volume) { func VolumeName(name string) func(volume *volume.Volume) {
return func(volume *types.Volume) { return func(volume *volume.Volume) {
volume.Name = name volume.Name = name
} }
} }
// VolumeDriver sets the volume driver // VolumeDriver sets the volume driver
func VolumeDriver(name string) func(volume *types.Volume) { func VolumeDriver(name string) func(volume *volume.Volume) {
return func(volume *types.Volume) { return func(volume *volume.Volume) {
volume.Driver = name volume.Driver = name
} }
} }

View File

@ -7,14 +7,13 @@ module github.com/docker/cli
go 1.17 go 1.17
require ( require (
github.com/containerd/containerd v1.6.2 github.com/containerd/containerd v1.6.3
github.com/creack/pty v1.1.11 github.com/creack/pty v1.1.11
github.com/docker/distribution v2.8.1+incompatible github.com/docker/distribution v2.8.1+incompatible
github.com/docker/docker v20.10.14+incompatible // see "replace" for the actual version github.com/docker/docker v20.10.14+incompatible // see "replace" for the actual version
github.com/docker/docker-credential-helpers v0.6.4 github.com/docker/docker-credential-helpers v0.6.4
github.com/docker/go-connections v0.4.0 github.com/docker/go-connections v0.4.0
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0
github.com/fvbommel/sortorder v1.0.2 github.com/fvbommel/sortorder v1.0.2
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/google/go-cmp v0.5.7 github.com/google/go-cmp v0.5.7
@ -22,12 +21,13 @@ require (
github.com/imdario/mergo v0.3.12 github.com/imdario/mergo v0.3.12
github.com/mattn/go-runewidth v0.0.13 github.com/mattn/go-runewidth v0.0.13
github.com/mitchellh/mapstructure v1.3.2 github.com/mitchellh/mapstructure v1.3.2
github.com/moby/buildkit v0.10.0 github.com/moby/buildkit v0.10.2
github.com/moby/swarmkit/v2 v2.0.0-20220420172245-6068d1894d46
github.com/moby/sys/signal v0.7.0 github.com/moby/sys/signal v0.7.0
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
github.com/morikuni/aec v1.0.0 github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2 github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.4.0 github.com/spf13/cobra v1.4.0
@ -35,7 +35,7 @@ require (
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d
github.com/xeipuuv/gojsonschema v1.2.0 github.com/xeipuuv/gojsonschema v1.2.0
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 golang.org/x/sys v0.0.0-20220405210540-1e041c57c461
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
golang.org/x/text v0.3.7 golang.org/x/text v0.3.7
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
@ -44,7 +44,7 @@ require (
require ( require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.5.1 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
@ -53,11 +53,11 @@ require (
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/gorilla/mux v1.8.0 // indirect; updated to v1.8.0 to get rid of old compatibility for "context" github.com/gorilla/mux v1.8.0 // indirect; updated to v1.8.0 to get rid of old compatibility for "context"
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/klauspost/compress v1.15.0 // indirect github.com/klauspost/compress v1.15.1 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/miekg/pkcs11 v1.0.3 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/moby/sys/symlink v0.2.0 // indirect github.com/moby/sys/symlink v0.2.0 // indirect
github.com/opencontainers/runc v1.1.0 // indirect github.com/opencontainers/runc v1.1.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/common v0.32.1 // indirect
@ -71,11 +71,17 @@ require (
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
google.golang.org/grpc v1.44.0 // indirect google.golang.org/grpc v1.45.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect google.golang.org/protobuf v1.27.1 // indirect
) )
replace ( replace (
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220326171151-8941dcfcc5db+incompatible // master (v22.04-dev) github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220429181837-2ed904cad705+incompatible // master (v22.04-dev)
github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2 github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2
// Resolve dependency hell with github.com/cloudflare/cfssl (transitive via
// swarmkit) by pinning the certificate-transparency-go version. Remove once
// module go.etcd.io/etcd/server/v3 has upgraded its dependency on
// go.opentelemetry.io/otel to v1.
github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.20
) )

View File

@ -36,8 +36,8 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d h1:hi6J4K6DKrR4/ljxn6SF6nURyu785wKMuQcjt7H3VCQ= github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d h1:hi6J4K6DKrR4/ljxn6SF6nURyu785wKMuQcjt7H3VCQ=
@ -74,8 +74,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5 h1:PqZ3bA4yzwywivzk7PBQWngJp2/PAS0bWRZerKteicY=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
@ -87,9 +87,9 @@ github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.6.2 h1:pcaPUGbYW8kBw6OgIZwIVIeEhdWVrBzsoCfVJ5BjrLU= github.com/containerd/containerd v1.6.3 h1:JfgUEIAH07xDWk6kqz0P3ArZt+KJ9YeihSC9uyFtSKg=
github.com/containerd/containerd v1.6.2/go.mod h1:sidY30/InSE1j2vdD1ihtKoJz+lWdaXMdiAeIupaf+s= github.com/containerd/containerd v1.6.3/go.mod h1:gCVGrYRYFm2E8GmuUIbj/NGD7DLZQLzSJQazjVKDOig=
github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= github.com/containerd/continuity v0.2.3-0.20220330195504-d132b287edc8 h1:yGFEcFNMhze29DxAAB33v/1OMRYF/cM9iwwgV2P0ZrE=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
@ -105,8 +105,8 @@ github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xb
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.3-0.20220326171151-8941dcfcc5db+incompatible h1:5DYFLB020CbxyjsxBle60QaEUb4krFjr30O0eLXsNp0= github.com/docker/docker v20.10.3-0.20220429181837-2ed904cad705+incompatible h1:Bs9PQ1/7QUa5bvhBiQNK2b39Ve3gU1o0Lr4ZfNUk1gc=
github.com/docker/docker v20.10.3-0.20220326171151-8941dcfcc5db+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.3-0.20220429181837-2ed904cad705+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@ -122,8 +122,6 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0 h1:YehAv2BPLfTm58HW04wRnNy8Oo/CAzWji7mjJ6UJWgM=
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@ -189,8 +187,8 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI= github.com/google/certificate-transparency-go v1.0.20 h1:azETE79toaBOyp+StoEBy8atzQujL0PyBPEmsEeDCXI=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/certificate-transparency-go v1.0.20/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -251,8 +249,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@ -272,13 +270,16 @@ github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg=
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/buildkit v0.10.0 h1:ElHQJJdnj/VR/pfNJwhrjQJj8GXFIwVNGZh/Qbd5tVo= github.com/moby/buildkit v0.10.2 h1:jywa+mPPtsfCQqpIbt72RUKf49hTTCirTqIs4LG0n+8=
github.com/moby/buildkit v0.10.0/go.mod h1:WvwAZv8aRScHkqc/+X46cRC2CKMKpqcaX+pRvUTtPes= github.com/moby/buildkit v0.10.2/go.mod h1:jxeOuly98l9gWHai0Ojrbnczrk/rf+o9/JqNhY+UCSo=
github.com/moby/swarmkit/v2 v2.0.0-20220420172245-6068d1894d46 h1:FVr9eatIpN7PlE2ZHP850rIJ6AQoZxoZvPSDR+WQY38=
github.com/moby/swarmkit/v2 v2.0.0-20220420172245-6068d1894d46/go.mod h1:/so6Lct4y1x14UprW/loFsOe6xoXVTlvh25V36ULXNQ=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
@ -305,10 +306,10 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 h1:9iFHD5Kt9hkOfeawBNiEeEaV7bmC4/Z5wJp8E9BptMs=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs=
github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8= github.com/opencontainers/runc v1.1.1 h1:PJ9DSs2sVwE0iVr++pAHE6QkS9tzcVWozlPifdwMgrU=
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
@ -352,6 +353,7 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
@ -559,8 +561,9 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220405210540-1e041c57c461 h1:kHVeDEnfKn3T238CvrUcz6KeEsFHVaKh4kMTt6Wsysg=
golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
@ -699,8 +702,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package winio package winio
@ -143,6 +144,11 @@ func (f *win32File) Close() error {
return nil return nil
} }
// IsClosed checks if the file has been closed
func (f *win32File) IsClosed() bool {
return f.closing.isSet()
}
// prepareIo prepares for a new IO operation. // prepareIo prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
func (f *win32File) prepareIo() (*ioOperation, error) { func (f *win32File) prepareIo() (*ioOperation, error) {

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package winio package winio
@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error {
return conn.sock.Close() return conn.sock.Close()
} }
func (conn *HvsockConn) IsClosed() bool {
return conn.sock.IsClosed()
}
func (conn *HvsockConn) shutdown(how int) error { func (conn *HvsockConn) shutdown(how int) error {
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) if conn.IsClosed() {
return ErrFileClosed
}
err := syscall.Shutdown(conn.sock.handle, how)
if err != nil { if err != nil {
return os.NewSyscallError("shutdown", err) return os.NewSyscallError("shutdown", err)
} }
return nil return nil
} }
// CloseRead shuts down the read end of the socket. // CloseRead shuts down the read end of the socket, preventing future read operations.
func (conn *HvsockConn) CloseRead() error { func (conn *HvsockConn) CloseRead() error {
err := conn.shutdown(syscall.SHUT_RD) err := conn.shutdown(syscall.SHUT_RD)
if err != nil { if err != nil {
@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error {
return nil return nil
} }
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that // CloseWrite shuts down the write end of the socket, preventing future write operations and
// no more data will be written. // notifying the other endpoint that no more data will be written.
func (conn *HvsockConn) CloseWrite() error { func (conn *HvsockConn) CloseWrite() error {
err := conn.shutdown(syscall.SHUT_WR) err := conn.shutdown(syscall.SHUT_WR)
if err != nil { if err != nil {

View File

@ -14,8 +14,6 @@ import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"strconv" "strconv"
"golang.org/x/sys/windows"
) )
// Variant specifies which GUID variant (or "type") of the GUID. It determines // Variant specifies which GUID variant (or "type") of the GUID. It determines
@ -41,13 +39,6 @@ type Version uint8
var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{})
// GUID represents a GUID/UUID. It has the same structure as
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
// that type. It is defined as its own type so that stringification and
// marshaling can be supported. The representation matches that used by native
// Windows code.
type GUID windows.GUID
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. // NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
func NewV4() (GUID, error) { func NewV4() (GUID, error) {
var b [16]byte var b [16]byte

View File

@ -0,0 +1,15 @@
// +build !windows
package guid
// GUID represents a GUID/UUID. It has the same structure as
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
// that type. It is defined as its own type as that is only available to builds
// targeted at `windows`. The representation matches that used by native Windows
// code.
type GUID struct {
Data1 uint32
Data2 uint16
Data3 uint16
Data4 [8]byte
}

View File

@ -0,0 +1,10 @@
package guid
import "golang.org/x/sys/windows"
// GUID represents a GUID/UUID. It has the same structure as
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
// that type. It is defined as its own type so that stringification and
// marshaling can be supported. The representation matches that used by native
// Windows code.
type GUID windows.GUID

View File

@ -7,7 +7,7 @@ Aaron Feng <aaron.feng@gmail.com>
Aaron Hnatiw <aaron@griddio.com> Aaron Hnatiw <aaron@griddio.com>
Aaron Huslage <huslage@gmail.com> Aaron Huslage <huslage@gmail.com>
Aaron L. Xu <liker.xu@foxmail.com> Aaron L. Xu <liker.xu@foxmail.com>
Aaron Lehmann <aaron.lehmann@docker.com> Aaron Lehmann <alehmann@netflix.com>
Aaron Welch <welch@packet.net> Aaron Welch <welch@packet.net>
Aaron.L.Xu <likexu@harmonycloud.cn> Aaron.L.Xu <likexu@harmonycloud.cn>
Abel Muiño <amuino@gmail.com> Abel Muiño <amuino@gmail.com>
@ -61,10 +61,11 @@ Alan Scherger <flyinprogrammer@gmail.com>
Alan Thompson <cloojure@gmail.com> Alan Thompson <cloojure@gmail.com>
Albert Callarisa <shark234@gmail.com> Albert Callarisa <shark234@gmail.com>
Albert Zhang <zhgwenming@gmail.com> Albert Zhang <zhgwenming@gmail.com>
Albin Kerouanton <albin@akerouanton.name> Albin Kerouanton <albinker@gmail.com>
Alec Benson <albenson@redhat.com> Alec Benson <albenson@redhat.com>
Alejandro González Hevia <alejandrgh11@gmail.com> Alejandro González Hevia <alejandrgh11@gmail.com>
Aleksa Sarai <asarai@suse.de> Aleksa Sarai <asarai@suse.de>
Aleksandr Chebotov <v-aleche@microsoft.com>
Aleksandrs Fadins <aleks@s-ko.net> Aleksandrs Fadins <aleks@s-ko.net>
Alena Prokharchyk <alena@rancher.com> Alena Prokharchyk <alena@rancher.com>
Alessandro Boch <aboch@tetrationanalytics.com> Alessandro Boch <aboch@tetrationanalytics.com>
@ -76,6 +77,7 @@ Alex Crawford <alex.crawford@coreos.com>
Alex Ellis <alexellis2@gmail.com> Alex Ellis <alexellis2@gmail.com>
Alex Gaynor <alex.gaynor@gmail.com> Alex Gaynor <alex.gaynor@gmail.com>
Alex Goodman <wagoodman@gmail.com> Alex Goodman <wagoodman@gmail.com>
Alex Nordlund <alexander.nordlund@nasdaq.com>
Alex Olshansky <i@creagenics.com> Alex Olshansky <i@creagenics.com>
Alex Samorukov <samm@os2.kiev.ua> Alex Samorukov <samm@os2.kiev.ua>
Alex Warhawk <ax.warhawk@gmail.com> Alex Warhawk <ax.warhawk@gmail.com>
@ -83,7 +85,7 @@ Alexander Artemenko <svetlyak.40wt@gmail.com>
Alexander Boyd <alex@opengroove.org> Alexander Boyd <alex@opengroove.org>
Alexander Larsson <alexl@redhat.com> Alexander Larsson <alexl@redhat.com>
Alexander Midlash <amidlash@docker.com> Alexander Midlash <amidlash@docker.com>
Alexander Morozov <lk4d4@docker.com> Alexander Morozov <lk4d4math@gmail.com>
Alexander Polakov <plhk@sdf.org> Alexander Polakov <plhk@sdf.org>
Alexander Shopov <ash@kambanaria.org> Alexander Shopov <ash@kambanaria.org>
Alexandre Beslic <alexandre.beslic@gmail.com> Alexandre Beslic <alexandre.beslic@gmail.com>
@ -192,13 +194,15 @@ Antony Messerli <amesserl@rackspace.com>
Anuj Bahuguna <anujbahuguna.dev@gmail.com> Anuj Bahuguna <anujbahuguna.dev@gmail.com>
Anuj Varma <anujvarma@thumbtack.com> Anuj Varma <anujvarma@thumbtack.com>
Anusha Ragunathan <anusha.ragunathan@docker.com> Anusha Ragunathan <anusha.ragunathan@docker.com>
Anyu Wang <wanganyu@outlook.com>
apocas <petermdias@gmail.com> apocas <petermdias@gmail.com>
Arash Deshmeh <adeshmeh@ca.ibm.com> Arash Deshmeh <adeshmeh@ca.ibm.com>
ArikaChen <eaglesora@gmail.com> ArikaChen <eaglesora@gmail.com>
Arko Dasgupta <arko.dasgupta@docker.com> Arko Dasgupta <arko@tetrate.io>
Arnaud Lefebvre <a.lefebvre@outlook.fr> Arnaud Lefebvre <a.lefebvre@outlook.fr>
Arnaud Porterie <arnaud.porterie@docker.com> Arnaud Porterie <icecrime@gmail.com>
Arnaud Rebillout <arnaud.rebillout@collabora.com> Arnaud Rebillout <arnaud.rebillout@collabora.com>
Artem Khramov <akhramov@pm.me>
Arthur Barr <arthur.barr@uk.ibm.com> Arthur Barr <arthur.barr@uk.ibm.com>
Arthur Gautier <baloo@gandi.net> Arthur Gautier <baloo@gandi.net>
Artur Meyster <arthurfbi@yahoo.com> Artur Meyster <arthurfbi@yahoo.com>
@ -343,6 +347,7 @@ Chen Qiu <cheney-90@hotmail.com>
Cheng-mean Liu <soccerl@microsoft.com> Cheng-mean Liu <soccerl@microsoft.com>
Chengfei Shang <cfshang@alauda.io> Chengfei Shang <cfshang@alauda.io>
Chengguang Xu <cgxu519@gmx.com> Chengguang Xu <cgxu519@gmx.com>
Chenyang Yan <memory.yancy@gmail.com>
chenyuzhu <chenyuzhi@oschina.cn> chenyuzhu <chenyuzhi@oschina.cn>
Chetan Birajdar <birajdar.chetan@gmail.com> Chetan Birajdar <birajdar.chetan@gmail.com>
Chewey <prosto-chewey@users.noreply.github.com> Chewey <prosto-chewey@users.noreply.github.com>
@ -406,20 +411,23 @@ Colin Walters <walters@verbum.org>
Collin Guarino <collin.guarino@gmail.com> Collin Guarino <collin.guarino@gmail.com>
Colm Hally <colmhally@gmail.com> Colm Hally <colmhally@gmail.com>
companycy <companycy@gmail.com> companycy <companycy@gmail.com>
Conor Evans <coevans@tcd.ie>
Corbin Coleman <corbin.coleman@docker.com> Corbin Coleman <corbin.coleman@docker.com>
Corey Farrell <git@cfware.com> Corey Farrell <git@cfware.com>
Cory Forsyth <cory.forsyth@gmail.com> Cory Forsyth <cory.forsyth@gmail.com>
Cory Snider <csnider@mirantis.com>
cressie176 <github@stephen-cresswell.net> cressie176 <github@stephen-cresswell.net>
CrimsonGlory <CrimsonGlory@users.noreply.github.com>
Cristian Ariza <dev@cristianrz.com> Cristian Ariza <dev@cristianrz.com>
Cristian Staretu <cristian.staretu@gmail.com> Cristian Staretu <cristian.staretu@gmail.com>
cristiano balducci <cristiano.balducci@gmail.com> cristiano balducci <cristiano.balducci@gmail.com>
Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com> Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
Cruceru Calin-Cristian <crucerucalincristian@gmail.com> Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
CUI Wei <ghostplant@qq.com> CUI Wei <ghostplant@qq.com>
cuishuang <imcusg@gmail.com>
Cuong Manh Le <cuong.manhle.vn@gmail.com> Cuong Manh Le <cuong.manhle.vn@gmail.com>
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu> Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
Cyril F <cyrilf7x@gmail.com> Cyril F <cyrilf7x@gmail.com>
Da McGrady <dabkb@aol.com>
Daan van Berkel <daan.v.berkel.1980@gmail.com> Daan van Berkel <daan.v.berkel.1980@gmail.com>
Daehyeok Mun <daehyeok@gmail.com> Daehyeok Mun <daehyeok@gmail.com>
Dafydd Crosby <dtcrsby@gmail.com> Dafydd Crosby <dtcrsby@gmail.com>
@ -437,6 +445,7 @@ Dan Hirsch <thequux@upstandinghackers.com>
Dan Keder <dan.keder@gmail.com> Dan Keder <dan.keder@gmail.com>
Dan Levy <dan@danlevy.net> Dan Levy <dan@danlevy.net>
Dan McPherson <dmcphers@redhat.com> Dan McPherson <dmcphers@redhat.com>
Dan Plamadeala <cornul11@gmail.com>
Dan Stine <sw@stinemail.com> Dan Stine <sw@stinemail.com>
Dan Williams <me@deedubs.com> Dan Williams <me@deedubs.com>
Dani Hodovic <dani.hodovic@gmail.com> Dani Hodovic <dani.hodovic@gmail.com>
@ -457,6 +466,7 @@ Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
Daniel Nephin <dnephin@docker.com> Daniel Nephin <dnephin@docker.com>
Daniel Norberg <dano@spotify.com> Daniel Norberg <dano@spotify.com>
Daniel Nordberg <dnordberg@gmail.com> Daniel Nordberg <dnordberg@gmail.com>
Daniel P. Berrangé <berrange@redhat.com>
Daniel Robinson <gottagetmac@gmail.com> Daniel Robinson <gottagetmac@gmail.com>
Daniel S <dan.streby@gmail.com> Daniel S <dan.streby@gmail.com>
Daniel Sweet <danieljsweet@icloud.com> Daniel Sweet <danieljsweet@icloud.com>
@ -465,6 +475,7 @@ Daniel Watkins <daniel@daniel-watkins.co.uk>
Daniel X Moore <yahivin@gmail.com> Daniel X Moore <yahivin@gmail.com>
Daniel YC Lin <dlin.tw@gmail.com> Daniel YC Lin <dlin.tw@gmail.com>
Daniel Zhang <jmzwcn@gmail.com> Daniel Zhang <jmzwcn@gmail.com>
Daniele Rondina <geaaru@sabayonlinux.org>
Danny Berger <dpb587@gmail.com> Danny Berger <dpb587@gmail.com>
Danny Milosavljevic <dannym@scratchpost.org> Danny Milosavljevic <dannym@scratchpost.org>
Danny Yates <danny@codeaholics.org> Danny Yates <danny@codeaholics.org>
@ -530,7 +541,7 @@ Dennis Docter <dennis@d23.nl>
Derek <crq@kernel.org> Derek <crq@kernel.org>
Derek <crquan@gmail.com> Derek <crquan@gmail.com>
Derek Ch <denc716@gmail.com> Derek Ch <denc716@gmail.com>
Derek McGowan <derek@mcgstyle.net> Derek McGowan <derek@mcg.dev>
Deric Crago <deric.crago@gmail.com> Deric Crago <deric.crago@gmail.com>
Deshi Xiao <dxiao@redhat.com> Deshi Xiao <dxiao@redhat.com>
devmeyster <arthurfbi@yahoo.com> devmeyster <arthurfbi@yahoo.com>
@ -550,9 +561,11 @@ Dimitris Rozakis <dimrozakis@gmail.com>
Dimitry Andric <d.andric@activevideo.com> Dimitry Andric <d.andric@activevideo.com>
Dinesh Subhraveti <dineshs@altiscale.com> Dinesh Subhraveti <dineshs@altiscale.com>
Ding Fei <dingfei@stars.org.cn> Ding Fei <dingfei@stars.org.cn>
dingwei <dingwei@cmss.chinamobile.com>
Diogo Monica <diogo@docker.com> Diogo Monica <diogo@docker.com>
DiuDiugirl <sophia.wang@pku.edu.cn> DiuDiugirl <sophia.wang@pku.edu.cn>
Djibril Koné <kone.djibril@gmail.com> Djibril Koné <kone.djibril@gmail.com>
Djordje Lukic <djordje.lukic@docker.com>
dkumor <daniel@dkumor.com> dkumor <daniel@dkumor.com>
Dmitri Logvinenko <dmitri.logvinenko@gmail.com> Dmitri Logvinenko <dmitri.logvinenko@gmail.com>
Dmitri Shuralyov <shurcooL@gmail.com> Dmitri Shuralyov <shurcooL@gmail.com>
@ -601,6 +614,7 @@ Elango Sivanandam <elango.siva@docker.com>
Elena Morozova <lelenanam@gmail.com> Elena Morozova <lelenanam@gmail.com>
Eli Uriegas <seemethere101@gmail.com> Eli Uriegas <seemethere101@gmail.com>
Elias Faxö <elias.faxo@tre.se> Elias Faxö <elias.faxo@tre.se>
Elias Koromilas <elias.koromilas@gmail.com>
Elias Probst <mail@eliasprobst.eu> Elias Probst <mail@eliasprobst.eu>
Elijah Zupancic <elijah@zupancic.name> Elijah Zupancic <elijah@zupancic.name>
eluck <mail@eluck.me> eluck <mail@eluck.me>
@ -610,6 +624,7 @@ Emil Hernvall <emil@quench.at>
Emily Maier <emily@emilymaier.net> Emily Maier <emily@emilymaier.net>
Emily Rose <emily@contactvibe.com> Emily Rose <emily@contactvibe.com>
Emir Ozer <emirozer@yandex.com> Emir Ozer <emirozer@yandex.com>
Eng Zer Jun <engzerjun@gmail.com>
Enguerran <engcolson@gmail.com> Enguerran <engcolson@gmail.com>
Eohyung Lee <liquidnuker@gmail.com> Eohyung Lee <liquidnuker@gmail.com>
epeterso <epeterson@breakpoint-labs.com> epeterso <epeterson@breakpoint-labs.com>
@ -724,11 +739,14 @@ Frederik Loeffert <frederik@zitrusmedia.de>
Frederik Nordahl Jul Sabroe <frederikns@gmail.com> Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
Freek Kalter <freek@kalteronline.org> Freek Kalter <freek@kalteronline.org>
Frieder Bluemle <frieder.bluemle@gmail.com> Frieder Bluemle <frieder.bluemle@gmail.com>
frobnicaty <92033765+frobnicaty@users.noreply.github.com>
Frédéric Dalleau <frederic.dalleau@docker.com>
Fu JinLin <withlin@yeah.net> Fu JinLin <withlin@yeah.net>
Félix Baylac-Jacqué <baylac.felix@gmail.com> Félix Baylac-Jacqué <baylac.felix@gmail.com>
Félix Cantournet <felix.cantournet@cloudwatt.com> Félix Cantournet <felix.cantournet@cloudwatt.com>
Gabe Rosenhouse <gabe@missionst.com> Gabe Rosenhouse <gabe@missionst.com>
Gabor Nagy <mail@aigeruth.hu> Gabor Nagy <mail@aigeruth.hu>
Gabriel Goller <gabrielgoller123@gmail.com>
Gabriel L. Somlo <gsomlo@gmail.com> Gabriel L. Somlo <gsomlo@gmail.com>
Gabriel Linder <linder.gabriel@gmail.com> Gabriel Linder <linder.gabriel@gmail.com>
Gabriel Monroy <gabriel@opdemand.com> Gabriel Monroy <gabriel@opdemand.com>
@ -751,6 +769,7 @@ George Kontridze <george@bugsnag.com>
George MacRorie <gmacr31@gmail.com> George MacRorie <gmacr31@gmail.com>
George Xie <georgexsh@gmail.com> George Xie <georgexsh@gmail.com>
Georgi Hristozov <georgi@forkbomb.nl> Georgi Hristozov <georgi@forkbomb.nl>
Georgy Yakovlev <gyakovlev@gentoo.org>
Gereon Frey <gereon.frey@dynport.de> Gereon Frey <gereon.frey@dynport.de>
German DZ <germ@ndz.com.ar> German DZ <germ@ndz.com.ar>
Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl> Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
@ -762,6 +781,7 @@ Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
Giovan Isa Musthofa <giovanism@outlook.co.id> Giovan Isa Musthofa <giovanism@outlook.co.id>
gissehel <public-devgit-dantus@gissehel.org> gissehel <public-devgit-dantus@gissehel.org>
Giuseppe Mazzotta <gdm85@users.noreply.github.com> Giuseppe Mazzotta <gdm85@users.noreply.github.com>
Giuseppe Scrivano <gscrivan@redhat.com>
Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org> Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
Gleb M Borisov <borisov.gleb@gmail.com> Gleb M Borisov <borisov.gleb@gmail.com>
Glyn Normington <gnormington@gopivotal.com> Glyn Normington <gnormington@gopivotal.com>
@ -785,6 +805,7 @@ Guilherme Salgado <gsalgado@gmail.com>
Guillaume Dufour <gdufour.prestataire@voyages-sncf.com> Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
Guillaume J. Charmes <guillaume.charmes@docker.com> Guillaume J. Charmes <guillaume.charmes@docker.com>
Gunadhya S. <6939749+gunadhya@users.noreply.github.com> Gunadhya S. <6939749+gunadhya@users.noreply.github.com>
Guoqiang QI <guoqiang.qi1@gmail.com>
guoxiuyan <guoxiuyan@huawei.com> guoxiuyan <guoxiuyan@huawei.com>
Guri <odg0318@gmail.com> Guri <odg0318@gmail.com>
Gurjeet Singh <gurjeet@singh.im> Gurjeet Singh <gurjeet@singh.im>
@ -794,6 +815,7 @@ gwx296173 <gaojing3@huawei.com>
Günter Zöchbauer <guenter@gzoechbauer.com> Günter Zöchbauer <guenter@gzoechbauer.com>
Haichao Yang <yang.haichao@zte.com.cn> Haichao Yang <yang.haichao@zte.com.cn>
haikuoliu <haikuo@amazon.com> haikuoliu <haikuo@amazon.com>
haining.cao <haining.cao@daocloud.io>
Hakan Özler <hakan.ozler@kodcu.com> Hakan Özler <hakan.ozler@kodcu.com>
Hamish Hutchings <moredhel@aoeu.me> Hamish Hutchings <moredhel@aoeu.me>
Hannes Ljungberg <hannes@5monkeys.se> Hannes Ljungberg <hannes@5monkeys.se>
@ -889,6 +911,7 @@ Jake Champlin <jake.champlin.27@gmail.com>
Jake Moshenko <jake@devtable.com> Jake Moshenko <jake@devtable.com>
Jake Sanders <jsand@google.com> Jake Sanders <jsand@google.com>
Jakub Drahos <jdrahos@pulsepoint.com> Jakub Drahos <jdrahos@pulsepoint.com>
Jakub Guzik <jakubmguzik@gmail.com>
James Allen <jamesallen0108@gmail.com> James Allen <jamesallen0108@gmail.com>
James Carey <jecarey@us.ibm.com> James Carey <jecarey@us.ibm.com>
James Carr <james.r.carr@gmail.com> James Carr <james.r.carr@gmail.com>
@ -900,6 +923,7 @@ James Lal <james@lightsofapollo.com>
James Mills <prologic@shortcircuit.net.au> James Mills <prologic@shortcircuit.net.au>
James Nesbitt <jnesbitt@mirantis.com> James Nesbitt <jnesbitt@mirantis.com>
James Nugent <james@jen20.com> James Nugent <james@jen20.com>
James Sanders <james3sanders@gmail.com>
James Turnbull <james@lovedthanlost.net> James Turnbull <james@lovedthanlost.net>
James Watkins-Harvey <jwatkins@progi-media.com> James Watkins-Harvey <jwatkins@progi-media.com>
Jamie Hannaford <jamie@limetree.org> Jamie Hannaford <jamie@limetree.org>
@ -932,6 +956,7 @@ Jason Shepherd <jason@jasonshepherd.net>
Jason Smith <jasonrichardsmith@gmail.com> Jason Smith <jasonrichardsmith@gmail.com>
Jason Sommer <jsdirv@gmail.com> Jason Sommer <jsdirv@gmail.com>
Jason Stangroome <jason@codeassassin.com> Jason Stangroome <jason@codeassassin.com>
Javier Bassi <javierbassi@gmail.com>
jaxgeller <jacksongeller@gmail.com> jaxgeller <jacksongeller@gmail.com>
Jay <imjching@hotmail.com> Jay <imjching@hotmail.com>
Jay <teguhwpurwanto@gmail.com> Jay <teguhwpurwanto@gmail.com>
@ -1100,6 +1125,7 @@ Justas Brazauskas <brazauskasjustas@gmail.com>
Justen Martin <jmart@the-coder.com> Justen Martin <jmart@the-coder.com>
Justin Cormack <justin.cormack@docker.com> Justin Cormack <justin.cormack@docker.com>
Justin Force <justin.force@gmail.com> Justin Force <justin.force@gmail.com>
Justin Keller <85903732+jk-vb@users.noreply.github.com>
Justin Menga <justin.menga@gmail.com> Justin Menga <justin.menga@gmail.com>
Justin Plock <jplock@users.noreply.github.com> Justin Plock <jplock@users.noreply.github.com>
Justin Simonelis <justin.p.simonelis@gmail.com> Justin Simonelis <justin.p.simonelis@gmail.com>
@ -1148,6 +1174,7 @@ Kenjiro Nakayama <nakayamakenjiro@gmail.com>
Kent Johnson <kentoj@gmail.com> Kent Johnson <kentoj@gmail.com>
Kenta Tada <Kenta.Tada@sony.com> Kenta Tada <Kenta.Tada@sony.com>
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com> Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
Kevin Alvarez <crazy-max@users.noreply.github.com>
Kevin Burke <kev@inburke.com> Kevin Burke <kev@inburke.com>
Kevin Clark <kevin.clark@gmail.com> Kevin Clark <kevin.clark@gmail.com>
Kevin Feyrer <kevin.feyrer@btinternet.com> Kevin Feyrer <kevin.feyrer@btinternet.com>
@ -1332,6 +1359,7 @@ Markus Fix <lispmeister@gmail.com>
Markus Kortlang <hyp3rdino@googlemail.com> Markus Kortlang <hyp3rdino@googlemail.com>
Martijn Dwars <ikben@martijndwars.nl> Martijn Dwars <ikben@martijndwars.nl>
Martijn van Oosterhout <kleptog@svana.org> Martijn van Oosterhout <kleptog@svana.org>
Martin Dojcak <martin.dojcak@lablabs.io>
Martin Honermeyer <maze@strahlungsfrei.de> Martin Honermeyer <maze@strahlungsfrei.de>
Martin Kelly <martin@surround.io> Martin Kelly <martin@surround.io>
Martin Mosegaard Amdisen <martin.amdisen@praqma.com> Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
@ -1348,6 +1376,7 @@ Mathias Monnerville <mathias@monnerville.com>
Mathieu Champlon <mathieu.champlon@docker.com> Mathieu Champlon <mathieu.champlon@docker.com>
Mathieu Le Marec - Pasquet <kiorky@cryptelium.net> Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
Mathieu Parent <math.parent@gmail.com> Mathieu Parent <math.parent@gmail.com>
Mathieu Paturel <mathieu.paturel@gmail.com>
Matt Apperson <me@mattapperson.com> Matt Apperson <me@mattapperson.com>
Matt Bachmann <bachmann.matt@gmail.com> Matt Bachmann <bachmann.matt@gmail.com>
Matt Bajor <matt@notevenremotelydorky.com> Matt Bajor <matt@notevenremotelydorky.com>
@ -1356,6 +1385,7 @@ Matt Haggard <haggardii@gmail.com>
Matt Hoyle <matt@deployable.co> Matt Hoyle <matt@deployable.co>
Matt McCormick <matt.mccormick@kitware.com> Matt McCormick <matt.mccormick@kitware.com>
Matt Moore <mattmoor@google.com> Matt Moore <mattmoor@google.com>
Matt Morrison <3maven@gmail.com>
Matt Richardson <matt@redgumtech.com.au> Matt Richardson <matt@redgumtech.com.au>
Matt Rickard <mrick@google.com> Matt Rickard <mrick@google.com>
Matt Robenolt <matt@ydekproductions.com> Matt Robenolt <matt@ydekproductions.com>
@ -1400,7 +1430,7 @@ Michael Beskin <mrbeskin@gmail.com>
Michael Bridgen <mikeb@squaremobius.net> Michael Bridgen <mikeb@squaremobius.net>
Michael Brown <michael@netdirect.ca> Michael Brown <michael@netdirect.ca>
Michael Chiang <mchiang@docker.com> Michael Chiang <mchiang@docker.com>
Michael Crosby <michael@docker.com> Michael Crosby <crosbymichael@gmail.com>
Michael Currie <mcurrie@bruceforceresearch.com> Michael Currie <mcurrie@bruceforceresearch.com>
Michael Friis <friism@gmail.com> Michael Friis <friism@gmail.com>
Michael Gorsuch <gorsuch@github.com> Michael Gorsuch <gorsuch@github.com>
@ -1409,6 +1439,7 @@ Michael Holzheu <holzheu@linux.vnet.ibm.com>
Michael Hudson-Doyle <michael.hudson@canonical.com> Michael Hudson-Doyle <michael.hudson@canonical.com>
Michael Huettermann <michael@huettermann.net> Michael Huettermann <michael@huettermann.net>
Michael Irwin <mikesir87@gmail.com> Michael Irwin <mikesir87@gmail.com>
Michael Kuehn <micha@kuehn.io>
Michael Käufl <docker@c.michael-kaeufl.de> Michael Käufl <docker@c.michael-kaeufl.de>
Michael Neale <michael.neale@gmail.com> Michael Neale <michael.neale@gmail.com>
Michael Nussbaum <michael.nussbaum@getbraintree.com> Michael Nussbaum <michael.nussbaum@getbraintree.com>
@ -1418,6 +1449,7 @@ Michael Spetsiotis <michael_spets@hotmail.com>
Michael Stapelberg <michael+gh@stapelberg.de> Michael Stapelberg <michael+gh@stapelberg.de>
Michael Steinert <mike.steinert@gmail.com> Michael Steinert <mike.steinert@gmail.com>
Michael Thies <michaelthies78@gmail.com> Michael Thies <michaelthies78@gmail.com>
Michael Weidmann <michaelweidmann@web.de>
Michael West <mwest@mdsol.com> Michael West <mwest@mdsol.com>
Michael Zhao <michael.zhao@arm.com> Michael Zhao <michael.zhao@arm.com>
Michal Fojtik <mfojtik@redhat.com> Michal Fojtik <mfojtik@redhat.com>
@ -1458,6 +1490,7 @@ Mike Snitzer <snitzer@redhat.com>
mikelinjie <294893458@qq.com> mikelinjie <294893458@qq.com>
Mikhail Sobolev <mss@mawhrin.net> Mikhail Sobolev <mss@mawhrin.net>
Miklos Szegedi <miklos.szegedi@cloudera.com> Miklos Szegedi <miklos.szegedi@cloudera.com>
Milas Bowman <milasb@gmail.com>
Milind Chawre <milindchawre@gmail.com> Milind Chawre <milindchawre@gmail.com>
Miloslav Trmač <mitr@redhat.com> Miloslav Trmač <mitr@redhat.com>
mingqing <limingqing@cyou-inc.com> mingqing <limingqing@cyou-inc.com>
@ -1533,6 +1566,7 @@ Nicolas Kaiser <nikai@nikai.net>
Nicolas Sterchele <sterchele.nicolas@gmail.com> Nicolas Sterchele <sterchele.nicolas@gmail.com>
Nicolas V Castet <nvcastet@us.ibm.com> Nicolas V Castet <nvcastet@us.ibm.com>
Nicolás Hock Isaza <nhocki@gmail.com> Nicolás Hock Isaza <nhocki@gmail.com>
Niel Drummond <niel@drummond.lu>
Nigel Poulton <nigelpoulton@hotmail.com> Nigel Poulton <nigelpoulton@hotmail.com>
Nik Nyby <nikolas@gnu.org> Nik Nyby <nikolas@gnu.org>
Nikhil Chawla <chawlanikhil24@gmail.com> Nikhil Chawla <chawlanikhil24@gmail.com>
@ -1621,6 +1655,7 @@ Peng Tao <bergwolf@gmail.com>
Penghan Wang <ph.wang@daocloud.io> Penghan Wang <ph.wang@daocloud.io>
Per Weijnitz <per.weijnitz@gmail.com> Per Weijnitz <per.weijnitz@gmail.com>
perhapszzy@sina.com <perhapszzy@sina.com> perhapszzy@sina.com <perhapszzy@sina.com>
Pete Woods <pete.woods@circleci.com>
Peter Bourgon <peter@bourgon.org> Peter Bourgon <peter@bourgon.org>
Peter Braden <peterbraden@peterbraden.co.uk> Peter Braden <peterbraden@peterbraden.co.uk>
Peter Bücker <peter.buecker@pressrelations.de> Peter Bücker <peter.buecker@pressrelations.de>
@ -1638,7 +1673,7 @@ Peter Waller <p@pwaller.net>
Petr Švihlík <svihlik.petr@gmail.com> Petr Švihlík <svihlik.petr@gmail.com>
Petros Angelatos <petrosagg@gmail.com> Petros Angelatos <petrosagg@gmail.com>
Phil <underscorephil@gmail.com> Phil <underscorephil@gmail.com>
Phil Estes <estesp@linux.vnet.ibm.com> Phil Estes <estesp@gmail.com>
Phil Spitler <pspitler@gmail.com> Phil Spitler <pspitler@gmail.com>
Philip Alexander Etling <paetling@gmail.com> Philip Alexander Etling <paetling@gmail.com>
Philip Monroe <phil@philmonroe.com> Philip Monroe <phil@philmonroe.com>
@ -1707,6 +1742,7 @@ Renaud Gaubert <rgaubert@nvidia.com>
Rhys Hiltner <rhys@twitch.tv> Rhys Hiltner <rhys@twitch.tv>
Ri Xu <xuri.me@gmail.com> Ri Xu <xuri.me@gmail.com>
Ricardo N Feliciano <FelicianoTech@gmail.com> Ricardo N Feliciano <FelicianoTech@gmail.com>
Rich Horwood <rjhorwood@apple.com>
Rich Moyse <rich@moyse.us> Rich Moyse <rich@moyse.us>
Rich Seymour <rseymour@gmail.com> Rich Seymour <rseymour@gmail.com>
Richard <richard.scothern@gmail.com> Richard <richard.scothern@gmail.com>
@ -1731,6 +1767,7 @@ Robert Bachmann <rb@robertbachmann.at>
Robert Bittle <guywithnose@gmail.com> Robert Bittle <guywithnose@gmail.com>
Robert Obryk <robryk@gmail.com> Robert Obryk <robryk@gmail.com>
Robert Schneider <mail@shakeme.info> Robert Schneider <mail@shakeme.info>
Robert Shade <robert.shade@gmail.com>
Robert Stern <lexandro2000@gmail.com> Robert Stern <lexandro2000@gmail.com>
Robert Terhaar <rterhaar@atlanticdynamic.com> Robert Terhaar <rterhaar@atlanticdynamic.com>
Robert Wallis <smilingrob@gmail.com> Robert Wallis <smilingrob@gmail.com>
@ -1743,6 +1780,7 @@ Robin Speekenbrink <robin@kingsquare.nl>
Robin Thoni <robin@rthoni.com> Robin Thoni <robin@rthoni.com>
robpc <rpcann@gmail.com> robpc <rpcann@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com> Rodolfo Carvalho <rhcarvalho@gmail.com>
Rodrigo Campos <rodrigo@kinvolk.io>
Rodrigo Vaz <rodrigo.vaz@gmail.com> Rodrigo Vaz <rodrigo.vaz@gmail.com>
Roel Van Nyen <roel.vannyen@gmail.com> Roel Van Nyen <roel.vannyen@gmail.com>
Roger Peppe <rogpeppe@gmail.com> Roger Peppe <rogpeppe@gmail.com>
@ -1757,6 +1795,8 @@ Roma Sokolov <sokolov.r.v@gmail.com>
Roman Dudin <katrmr@gmail.com> Roman Dudin <katrmr@gmail.com>
Roman Mazur <roman@balena.io> Roman Mazur <roman@balena.io>
Roman Strashkin <roman.strashkin@gmail.com> Roman Strashkin <roman.strashkin@gmail.com>
Roman Volosatovs <roman.volosatovs@docker.com>
Roman Zabaluev <gpg@haarolean.dev>
Ron Smits <ron.smits@gmail.com> Ron Smits <ron.smits@gmail.com>
Ron Williams <ron.a.williams@gmail.com> Ron Williams <ron.a.williams@gmail.com>
Rong Gao <gaoronggood@163.com> Rong Gao <gaoronggood@163.com>
@ -1790,6 +1830,7 @@ Ryan Liu <ryanlyy@me.com>
Ryan McLaughlin <rmclaughlin@insidesales.com> Ryan McLaughlin <rmclaughlin@insidesales.com>
Ryan O'Donnell <odonnellryanc@gmail.com> Ryan O'Donnell <odonnellryanc@gmail.com>
Ryan Seto <ryanseto@yak.net> Ryan Seto <ryanseto@yak.net>
Ryan Shea <sheabot03@gmail.com>
Ryan Simmen <ryan.simmen@gmail.com> Ryan Simmen <ryan.simmen@gmail.com>
Ryan Stelly <ryan.stelly@live.com> Ryan Stelly <ryan.stelly@live.com>
Ryan Thomas <rthomas@atlassian.com> Ryan Thomas <rthomas@atlassian.com>
@ -1822,8 +1863,9 @@ Sambuddha Basu <sambuddhabasu1@gmail.com>
Sami Wagiaalla <swagiaal@redhat.com> Sami Wagiaalla <swagiaal@redhat.com>
Samuel Andaya <samuel@andaya.net> Samuel Andaya <samuel@andaya.net>
Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com> Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com>
Samuel Karp <skarp@amazon.com> Samuel Karp <me@samuelkarp.com>
Samuel PHAN <samuel-phan@users.noreply.github.com> Samuel PHAN <samuel-phan@users.noreply.github.com>
sanchayanghosh <sanchayanghosh@outlook.com>
Sandeep Bansal <sabansal@microsoft.com> Sandeep Bansal <sabansal@microsoft.com>
Sankar சங்கர் <sankar.curiosity@gmail.com> Sankar சங்கர் <sankar.curiosity@gmail.com>
Sanket Saurav <sanketsaurav@gmail.com> Sanket Saurav <sanketsaurav@gmail.com>
@ -1881,6 +1923,7 @@ Shengbo Song <thomassong@tencent.com>
Shengjing Zhu <zhsj@debian.org> Shengjing Zhu <zhsj@debian.org>
Shev Yan <yandong_8212@163.com> Shev Yan <yandong_8212@163.com>
Shih-Yuan Lee <fourdollars@gmail.com> Shih-Yuan Lee <fourdollars@gmail.com>
Shihao Xia <charlesxsh@hotmail.com>
Shijiang Wei <mountkin@gmail.com> Shijiang Wei <mountkin@gmail.com>
Shijun Qin <qinshijun16@mails.ucas.ac.cn> Shijun Qin <qinshijun16@mails.ucas.ac.cn>
Shishir Mahajan <shishir.mahajan@redhat.com> Shishir Mahajan <shishir.mahajan@redhat.com>
@ -1933,6 +1976,7 @@ Stefan S. <tronicum@user.github.com>
Stefan Scherer <stefan.scherer@docker.com> Stefan Scherer <stefan.scherer@docker.com>
Stefan Staudenmeyer <doerte@instana.com> Stefan Staudenmeyer <doerte@instana.com>
Stefan Weil <sw@weilnetz.de> Stefan Weil <sw@weilnetz.de>
Steffen Butzer <steffen.butzer@outlook.com>
Stephan Spindler <shutefan@gmail.com> Stephan Spindler <shutefan@gmail.com>
Stephen Benjamin <stephen@redhat.com> Stephen Benjamin <stephen@redhat.com>
Stephen Crosby <stevecrozz@gmail.com> Stephen Crosby <stevecrozz@gmail.com>
@ -1951,6 +1995,7 @@ Steven Iveson <sjiveson@outlook.com>
Steven Merrill <steven.merrill@gmail.com> Steven Merrill <steven.merrill@gmail.com>
Steven Richards <steven@axiomzen.co> Steven Richards <steven@axiomzen.co>
Steven Taylor <steven.taylor@me.com> Steven Taylor <steven.taylor@me.com>
Stéphane Este-Gracias <sestegra@gmail.com>
Stig Larsson <stig@larsson.dev> Stig Larsson <stig@larsson.dev>
Su Wang <su.wang@docker.com> Su Wang <su.wang@docker.com>
Subhajit Ghosh <isubuz.g@gmail.com> Subhajit Ghosh <isubuz.g@gmail.com>
@ -1962,12 +2007,13 @@ Sunny Gogoi <indiasuny000@gmail.com>
Suryakumar Sudar <surya.trunks@gmail.com> Suryakumar Sudar <surya.trunks@gmail.com>
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@home.org.au>
Swapnil Daingade <swapnil.daingade@gmail.com> Swapnil Daingade <swapnil.daingade@gmail.com>
Sylvain Baubeau <sbaubeau@redhat.com> Sylvain Baubeau <lebauce@gmail.com>
Sylvain Bellemare <sylvain@ascribe.io> Sylvain Bellemare <sylvain@ascribe.io>
Sébastien <sebastien@yoozio.com> Sébastien <sebastien@yoozio.com>
Sébastien HOUZÉ <cto@verylastroom.com> Sébastien HOUZÉ <cto@verylastroom.com>
Sébastien Luttringer <seblu@seblu.net> Sébastien Luttringer <seblu@seblu.net>
Sébastien Stormacq <sebsto@users.noreply.github.com> Sébastien Stormacq <sebsto@users.noreply.github.com>
Sören Tempel <soeren+git@soeren-tempel.net>
Tabakhase <mail@tabakhase.com> Tabakhase <mail@tabakhase.com>
Tadej Janež <tadej.j@nez.si> Tadej Janež <tadej.j@nez.si>
TAGOMORI Satoshi <tagomoris@gmail.com> TAGOMORI Satoshi <tagomoris@gmail.com>
@ -1996,6 +2042,7 @@ Thomas Gazagnaire <thomas@gazagnaire.org>
Thomas Graf <tgraf@suug.ch> Thomas Graf <tgraf@suug.ch>
Thomas Grainger <tagrain@gmail.com> Thomas Grainger <tagrain@gmail.com>
Thomas Hansen <thomas.hansen@gmail.com> Thomas Hansen <thomas.hansen@gmail.com>
Thomas Ledos <thomas.ledos92@gmail.com>
Thomas Leonard <thomas.leonard@docker.com> Thomas Leonard <thomas.leonard@docker.com>
Thomas Léveil <thomasleveil@gmail.com> Thomas Léveil <thomasleveil@gmail.com>
Thomas Orozco <thomas@orozco.fr> Thomas Orozco <thomas@orozco.fr>
@ -2064,9 +2111,11 @@ Tomas Tomecek <ttomecek@redhat.com>
Tomasz Kopczynski <tomek@kopczynski.net.pl> Tomasz Kopczynski <tomek@kopczynski.net.pl>
Tomasz Lipinski <tlipinski@users.noreply.github.com> Tomasz Lipinski <tlipinski@users.noreply.github.com>
Tomasz Nurkiewicz <nurkiewicz@gmail.com> Tomasz Nurkiewicz <nurkiewicz@gmail.com>
Tomek Mańko <tomek.manko@railgun-solutions.com>
Tommaso Visconti <tommaso.visconti@gmail.com> Tommaso Visconti <tommaso.visconti@gmail.com>
Tomoya Tabuchi <t@tomoyat1.com> Tomoya Tabuchi <t@tomoyat1.com>
Tomáš Hrčka <thrcka@redhat.com> Tomáš Hrčka <thrcka@redhat.com>
tonic <tonicbupt@gmail.com>
Tonny Xu <tonny.xu@gmail.com> Tonny Xu <tonny.xu@gmail.com>
Tony Abboud <tdabboud@hotmail.com> Tony Abboud <tdabboud@hotmail.com>
Tony Daws <tony@daws.ca> Tony Daws <tony@daws.ca>
@ -2196,6 +2245,7 @@ Wolfgang Powisch <powo@powo.priv.at>
Wonjun Kim <wonjun.kim@navercorp.com> Wonjun Kim <wonjun.kim@navercorp.com>
WuLonghui <wlh6666@qq.com> WuLonghui <wlh6666@qq.com>
xamyzhao <x.amy.zhao@gmail.com> xamyzhao <x.amy.zhao@gmail.com>
Xia Wu <xwumzn@amazon.com>
Xian Chaobo <xianchaobo@huawei.com> Xian Chaobo <xianchaobo@huawei.com>
Xianglin Gao <xlgao@zju.edu.cn> Xianglin Gao <xlgao@zju.edu.cn>
Xianjie <guxianjie@gmail.com> Xianjie <guxianjie@gmail.com>
@ -2220,6 +2270,7 @@ Xuecong Liao <satorulogic@gmail.com>
xuzhaokui <cynicholas@gmail.com> xuzhaokui <cynicholas@gmail.com>
Yadnyawalkya Tale <ytale@redhat.com> Yadnyawalkya Tale <ytale@redhat.com>
Yahya <ya7yaz@gmail.com> Yahya <ya7yaz@gmail.com>
yalpul <yalpul@gmail.com>
YAMADA Tsuyoshi <tyamada@minimum2scp.org> YAMADA Tsuyoshi <tyamada@minimum2scp.org>
Yamasaki Masahide <masahide.y@gmail.com> Yamasaki Masahide <masahide.y@gmail.com>
Yan Feng <yanfeng2@huawei.com> Yan Feng <yanfeng2@huawei.com>
@ -2254,6 +2305,7 @@ Yu-Ju Hong <yjhong@google.com>
Yuan Sun <sunyuan3@huawei.com> Yuan Sun <sunyuan3@huawei.com>
Yuanhong Peng <pengyuanhong@huawei.com> Yuanhong Peng <pengyuanhong@huawei.com>
Yue Zhang <zy675793960@yeah.net> Yue Zhang <zy675793960@yeah.net>
Yufei Xiong <yufei.xiong@qq.com>
Yuhao Fang <fangyuhao@gmail.com> Yuhao Fang <fangyuhao@gmail.com>
Yuichiro Kaneko <spiketeika@gmail.com> Yuichiro Kaneko <spiketeika@gmail.com>
YujiOshima <yuji.oshima0x3fd@gmail.com> YujiOshima <yuji.oshima0x3fd@gmail.com>

View File

@ -1154,6 +1154,13 @@ definitions:
ContainerConfig: ContainerConfig:
description: | description: |
Configuration for a container that is portable between hosts. Configuration for a container that is portable between hosts.
When used as `ContainerConfig` field in an image, `ContainerConfig` is an
optional field containing the configuration of the container that was last
committed when creating the image.
Previous versions of Docker builder used this field to store build cache,
and it is not in active use anymore.
type: "object" type: "object"
properties: properties:
Hostname: Hostname:
@ -1600,7 +1607,7 @@ definitions:
description: | description: |
ID is the content-addressable ID of an image. ID is the content-addressable ID of an image.
This identified is a content-addressable digest calculated from the This identifier is a content-addressable digest calculated from the
image's configuration (which includes the digests of layers used by image's configuration (which includes the digests of layers used by
the image). the image).
@ -1788,41 +1795,119 @@ definitions:
- Containers - Containers
properties: properties:
Id: Id:
description: |
ID is the content-addressable ID of an image.
This identifier is a content-addressable digest calculated from the
image's configuration (which includes the digests of layers used by
the image).
Note that this digest differs from the `RepoDigests` below, which
holds digests of image manifests that reference the image.
type: "string" type: "string"
x-nullable: false x-nullable: false
example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710"
ParentId: ParentId:
description: |
ID of the parent image.
Depending on how the image was created, this field may be empty and
is only set for images that were built/created locally. This field
is empty if the image was pulled from an image registry.
type: "string" type: "string"
x-nullable: false x-nullable: false
example: ""
RepoTags: RepoTags:
description: |
List of image names/tags in the local image cache that reference this
image.
Multiple image tags can refer to the same imagem and this list may be
empty if no tags reference the image, in which case the image is
"untagged", in which case it can still be referenced by its ID.
type: "array" type: "array"
x-nullable: false x-nullable: false
items: items:
type: "string" type: "string"
example:
- "example:1.0"
- "example:latest"
- "example:stable"
- "internal.registry.example.com:5000/example:1.0"
RepoDigests: RepoDigests:
description: |
List of content-addressable digests of locally available image manifests
that the image is referenced from. Multiple manifests can refer to the
same image.
These digests are usually only available if the image was either pulled
from a registry, or if the image was pushed to a registry, which is when
the manifest is generated and its digest calculated.
type: "array" type: "array"
x-nullable: false x-nullable: false
items: items:
type: "string" type: "string"
example:
- "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb"
- "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
Created: Created:
description: |
Date and time at which the image was created as a Unix timestamp
(number of seconds sinds EPOCH).
type: "integer" type: "integer"
x-nullable: false x-nullable: false
example: "1644009612"
Size: Size:
description: |
Total size of the image including all layers it is composed of.
type: "integer" type: "integer"
format: "int64"
x-nullable: false x-nullable: false
example: 172064416
SharedSize: SharedSize:
description: |
Total size of image layers that are shared between this image and other
images.
This size is not calculated by default. `-1` indicates that the value
has not been set / calculated.
type: "integer" type: "integer"
x-nullable: false x-nullable: false
example: 1239828
VirtualSize: VirtualSize:
description: |
Total size of the image including all layers it is composed of.
In versions of Docker before v1.10, this field was calculated from
the image itself and all of its parent images. Docker v1.10 and up
store images self-contained, and no longer use a parent-chain, making
this field an equivalent of the Size field.
This field is kept for backward compatibility, but may be removed in
a future version of the API.
type: "integer" type: "integer"
format: "int64"
x-nullable: false x-nullable: false
example: 172064416
Labels: Labels:
description: "User-defined key/value metadata."
type: "object" type: "object"
x-nullable: false x-nullable: false
additionalProperties: additionalProperties:
type: "string" type: "string"
example:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
Containers: Containers:
description: |
Number of containers using this image. Includes both stopped and running
containers.
This size is not calculated by default, and depends on which API endpoint
is used. `-1` indicates that the value has not been set / calculated.
x-nullable: false x-nullable: false
type: "integer" type: "integer"
example: 2
AuthConfig: AuthConfig:
type: "object" type: "object"
@ -1924,6 +2009,7 @@ definitions:
UsageData: UsageData:
type: "object" type: "object"
x-nullable: true x-nullable: true
x-go-name: "UsageData"
required: [Size, RefCount] required: [Size, RefCount]
description: | description: |
Usage details about the volume. This information is used by the Usage details about the volume. This information is used by the
@ -1950,7 +2036,7 @@ definitions:
description: "Volume configuration" description: "Volume configuration"
type: "object" type: "object"
title: "VolumeConfig" title: "VolumeConfig"
x-go-name: "VolumeCreateBody" x-go-name: "CreateOptions"
properties: properties:
Name: Name:
description: | description: |
@ -1984,6 +2070,25 @@ definitions:
com.example.some-label: "some-value" com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value" com.example.some-other-label: "some-other-value"
VolumeListResponse:
type: "object"
title: "VolumeListResponse"
x-go-name: "ListResponse"
description: "Volume list response"
properties:
Volumes:
type: "array"
description: "List of volumes"
items:
$ref: "#/definitions/Volume"
Warnings:
type: "array"
description: |
Warnings that occurred when fetching the list of volumes.
items:
type: "string"
example: []
Network: Network:
type: "object" type: "object"
properties: properties:
@ -4512,10 +4617,30 @@ definitions:
Health: Health:
$ref: "#/definitions/Health" $ref: "#/definitions/Health"
ContainerCreateResponse:
description: "OK response to ContainerCreate operation"
type: "object"
title: "ContainerCreateResponse"
x-go-name: "CreateResponse"
required: [Id, Warnings]
properties:
Id:
description: "The ID of the created container"
type: "string"
x-nullable: false
example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
Warnings:
description: "Warnings encountered when creating the container"
type: "array"
x-nullable: false
items:
type: "string"
example: []
ContainerWaitResponse: ContainerWaitResponse:
description: "OK response to ContainerWait operation" description: "OK response to ContainerWait operation"
type: "object" type: "object"
x-go-name: "ContainerWaitOKBody" x-go-name: "WaitResponse"
title: "ContainerWaitResponse" title: "ContainerWaitResponse"
required: [StatusCode, Error] required: [StatusCode, Error]
properties: properties:
@ -4529,7 +4654,7 @@ definitions:
ContainerWaitExitError: ContainerWaitExitError:
description: "container waiting error, if any" description: "container waiting error, if any"
type: "object" type: "object"
x-go-name: "ContainerWaitOKBodyError" x-go-name: "WaitExitError"
properties: properties:
Message: Message:
description: "Details of an error" description: "Details of an error"
@ -5976,25 +6101,7 @@ paths:
201: 201:
description: "Container created successfully" description: "Container created successfully"
schema: schema:
type: "object" $ref: "#/definitions/ContainerCreateResponse"
title: "ContainerCreateResponse"
description: "OK response to ContainerCreate operation"
required: [Id, Warnings]
properties:
Id:
description: "The ID of the created container"
type: "string"
x-nullable: false
Warnings:
description: "Warnings encountered when creating the container"
type: "array"
x-nullable: false
items:
type: "string"
examples:
application/json:
Id: "e90e34656806"
Warnings: []
400: 400:
description: "bad parameter" description: "bad parameter"
schema: schema:
@ -6784,6 +6891,11 @@ paths:
required: true required: true
description: "ID or name of the container" description: "ID or name of the container"
type: "string" type: "string"
- name: "signal"
in: "query"
description: |
Signal to send to the container as an integer or string (e.g. `SIGINT`).
type: "string"
- name: "t" - name: "t"
in: "query" in: "query"
description: "Number of seconds to wait before killing the container" description: "Number of seconds to wait before killing the container"
@ -6813,6 +6925,11 @@ paths:
required: true required: true
description: "ID or name of the container" description: "ID or name of the container"
type: "string" type: "string"
- name: "signal"
in: "query"
description: |
Signal to send to the container as an integer or string (e.g. `SIGINT`).
type: "string"
- name: "t" - name: "t"
in: "query" in: "query"
description: "Number of seconds to wait before killing the container" description: "Number of seconds to wait before killing the container"
@ -6854,7 +6971,8 @@ paths:
type: "string" type: "string"
- name: "signal" - name: "signal"
in: "query" in: "query"
description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" description: |
Signal to send to the container as an integer or string (e.g. `SIGINT`).
type: "string" type: "string"
default: "SIGKILL" default: "SIGKILL"
tags: ["Container"] tags: ["Container"]
@ -7542,35 +7660,6 @@ paths:
type: "array" type: "array"
items: items:
$ref: "#/definitions/ImageSummary" $ref: "#/definitions/ImageSummary"
examples:
application/json:
- Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
ParentId: ""
RepoTags:
- "ubuntu:12.04"
- "ubuntu:precise"
RepoDigests:
- "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787"
Created: 1474925151
Size: 103579269
VirtualSize: 103579269
SharedSize: 0
Labels: {}
Containers: 2
- Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175"
ParentId: ""
RepoTags:
- "ubuntu:12.10"
- "ubuntu:quantal"
RepoDigests:
- "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7"
- "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3"
Created: 1403128455
Size: 172064416
VirtualSize: 172064416
SharedSize: 0
Labels: {}
Containers: 5
500: 500:
description: "server error" description: "server error"
schema: schema:
@ -9078,24 +9167,7 @@ paths:
200: 200:
description: "Summary volume data that matches the query" description: "Summary volume data that matches the query"
schema: schema:
type: "object" $ref: "#/definitions/VolumeListResponse"
title: "VolumeListResponse"
description: "Volume list response"
required: [Volumes, Warnings]
properties:
Volumes:
type: "array"
x-nullable: false
description: "List of volumes"
items:
$ref: "#/definitions/Volume"
Warnings:
type: "array"
x-nullable: false
description: |
Warnings that occurred when fetching the list of volumes.
items:
type: "string"
500: 500:
description: "Server error" description: "Server error"
schema: schema:

View File

@ -13,6 +13,24 @@ import (
// Docker interprets it as 3 nanoseconds. // Docker interprets it as 3 nanoseconds.
const MinimumDuration = 1 * time.Millisecond const MinimumDuration = 1 * time.Millisecond
// StopOptions holds the options to stop or restart a container.
type StopOptions struct {
// Signal (optional) is the signal to send to the container to (gracefully)
// stop it before forcibly terminating the container with SIGKILL after the
// timeout expires. If not value is set, the default (SIGTERM) is used.
Signal string `json:",omitempty"`
// Timeout (optional) is the timeout (in seconds) to wait for the container
// to stop gracefully before forcibly terminating it with SIGKILL.
//
// - Use nil to use the default timeout (10 seconds).
// - Use '-1' to wait indefinitely.
// - Use '0' to not wait for the container to exit gracefully, and
// immediately proceeds to forcibly terminating the container.
// - Other positive values are used as timeout (in seconds).
Timeout *int `json:",omitempty"`
}
// HealthConfig holds configuration settings for the HEALTHCHECK feature. // HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct { type HealthConfig struct {
// Test is the test to perform to check that the container is healthy. // Test is the test to perform to check that the container is healthy.

View File

@ -1,20 +0,0 @@
package container // import "github.com/docker/docker/api/types/container"
// ----------------------------------------------------------------------------
// Code generated by `swagger generate operation`. DO NOT EDIT.
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerCreateCreatedBody OK response to ContainerCreate operation
// swagger:model ContainerCreateCreatedBody
type ContainerCreateCreatedBody struct {
// The ID of the created container
// Required: true
ID string `json:"Id"`
// Warnings encountered when creating the container
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,19 @@
package container
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// CreateResponse ContainerCreateResponse
//
// OK response to ContainerCreate operation
// swagger:model CreateResponse
type CreateResponse struct {
// The ID of the created container
// Required: true
ID string `json:"Id"`
// Warnings encountered when creating the container
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,16 @@
package container // import "github.com/docker/docker/api/types/container"
// ContainerCreateCreatedBody OK response to ContainerCreate operation
//
// Deprecated: use CreateResponse
type ContainerCreateCreatedBody = CreateResponse
// ContainerWaitOKBody OK response to ContainerWait operation
//
// Deprecated: use WaitResponse
type ContainerWaitOKBody = WaitResponse
// ContainerWaitOKBodyError container waiting error, if any
//
// Deprecated: use WaitExitError
type ContainerWaitOKBodyError = WaitExitError

View File

@ -3,9 +3,9 @@ package container
// This file was generated by the swagger tool. // This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command // Editing this file might prove futile when you re-run the swagger generate command
// ContainerWaitOKBodyError container waiting error, if any // WaitExitError container waiting error, if any
// swagger:model ContainerWaitOKBodyError // swagger:model WaitExitError
type ContainerWaitOKBodyError struct { type WaitExitError struct {
// Details of an error // Details of an error
Message string `json:"Message,omitempty"` Message string `json:"Message,omitempty"`

View File

@ -3,15 +3,15 @@ package container
// This file was generated by the swagger tool. // This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command // Editing this file might prove futile when you re-run the swagger generate command
// ContainerWaitOKBody ContainerWaitResponse // WaitResponse ContainerWaitResponse
// //
// OK response to ContainerWait operation // OK response to ContainerWait operation
// swagger:model ContainerWaitOKBody // swagger:model WaitResponse
type ContainerWaitOKBody struct { type WaitResponse struct {
// error // error
// Required: true // Required: true
Error *ContainerWaitOKBodyError `json:"Error"` Error *WaitExitError `json:"Error"`
// Exit code of the container // Exit code of the container
// Required: true // Required: true

View File

@ -0,0 +1,14 @@
package types // import "github.com/docker/docker/api/types"
import "github.com/docker/docker/api/types/volume"
// Volume volume
//
// Deprecated: use github.com/docker/docker/api/types/volume.Volume
type Volume = volume.Volume
// VolumeUsageData Usage details about the volume. This information is used by the
// `GET /system/df` endpoint, and omitted in other endpoints.
//
// Deprecated: use github.com/docker/docker/api/types/volume.UsageData
type VolumeUsageData = volume.UsageData

View File

@ -7,43 +7,91 @@ package types
// swagger:model ImageSummary // swagger:model ImageSummary
type ImageSummary struct { type ImageSummary struct {
// containers // Number of containers using this image. Includes both stopped and running
// containers.
//
// This size is not calculated by default, and depends on which API endpoint
// is used. `-1` indicates that the value has not been set / calculated.
//
// Required: true // Required: true
Containers int64 `json:"Containers"` Containers int64 `json:"Containers"`
// created // Date and time at which the image was created as a Unix timestamp
// (number of seconds sinds EPOCH).
//
// Required: true // Required: true
Created int64 `json:"Created"` Created int64 `json:"Created"`
// Id // ID is the content-addressable ID of an image.
//
// This identifier is a content-addressable digest calculated from the
// image's configuration (which includes the digests of layers used by
// the image).
//
// Note that this digest differs from the `RepoDigests` below, which
// holds digests of image manifests that reference the image.
//
// Required: true // Required: true
ID string `json:"Id"` ID string `json:"Id"`
// labels // User-defined key/value metadata.
// Required: true // Required: true
Labels map[string]string `json:"Labels"` Labels map[string]string `json:"Labels"`
// parent Id // ID of the parent image.
//
// Depending on how the image was created, this field may be empty and
// is only set for images that were built/created locally. This field
// is empty if the image was pulled from an image registry.
//
// Required: true // Required: true
ParentID string `json:"ParentId"` ParentID string `json:"ParentId"`
// repo digests // List of content-addressable digests of locally available image manifests
// that the image is referenced from. Multiple manifests can refer to the
// same image.
//
// These digests are usually only available if the image was either pulled
// from a registry, or if the image was pushed to a registry, which is when
// the manifest is generated and its digest calculated.
//
// Required: true // Required: true
RepoDigests []string `json:"RepoDigests"` RepoDigests []string `json:"RepoDigests"`
// repo tags // List of image names/tags in the local image cache that reference this
// image.
//
// Multiple image tags can refer to the same imagem and this list may be
// empty if no tags reference the image, in which case the image is
// "untagged", in which case it can still be referenced by its ID.
//
// Required: true // Required: true
RepoTags []string `json:"RepoTags"` RepoTags []string `json:"RepoTags"`
// shared size // Total size of image layers that are shared between this image and other
// images.
//
// This size is not calculated by default. `-1` indicates that the value
// has not been set / calculated.
//
// Required: true // Required: true
SharedSize int64 `json:"SharedSize"` SharedSize int64 `json:"SharedSize"`
// size // Total size of the image including all layers it is composed of.
//
// Required: true // Required: true
Size int64 `json:"Size"` Size int64 `json:"Size"`
// virtual size // Total size of the image including all layers it is composed of.
//
// In versions of Docker before v1.10, this field was calculated from
// the image itself and all of its parent images. Docker v1.10 and up
// store images self-contained, and no longer use a parent-chain, making
// this field an equivalent of the Size field.
//
// This field is kept for backward compatibility, but may be removed in
// a future version of the API.
//
// Required: true // Required: true
VirtualSize int64 `json:"VirtualSize"` VirtualSize int64 `json:"VirtualSize"`
} }

View File

@ -1,12 +0,0 @@
package time // import "github.com/docker/docker/api/types/time"
import (
"strconv"
"time"
)
// DurationToSecondsString converts the specified duration to the number
// seconds it represents, formatted as a string.
func DurationToSecondsString(duration time.Duration) string {
return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
}

View File

@ -14,6 +14,7 @@ import (
"github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume"
"github.com/docker/go-connections/nat" "github.com/docker/go-connections/nat"
) )
@ -28,7 +29,7 @@ type RootFS struct {
type ImageInspect struct { type ImageInspect struct {
// ID is the content-addressable ID of an image. // ID is the content-addressable ID of an image.
// //
// This identified is a content-addressable digest calculated from the // This identifier is a content-addressable digest calculated from the
// image's configuration (which includes the digests of layers used by // image's configuration (which includes the digests of layers used by
// the image). // the image).
// //
@ -73,8 +74,11 @@ type ImageInspect struct {
// Depending on how the image was created, this field may be empty. // Depending on how the image was created, this field may be empty.
Container string Container string
// ContainerConfig is the configuration of the container that was committed // ContainerConfig is an optional field containing the configuration of the
// into the image. // container that was last committed when creating the image.
//
// Previous versions of Docker builder used this field to store build cache,
// and it is not in active use anymore.
ContainerConfig *container.Config ContainerConfig *container.Config
// DockerVersion is the version of Docker that was used to build the image. // DockerVersion is the version of Docker that was used to build the image.
@ -683,7 +687,7 @@ type DiskUsage struct {
LayersSize int64 LayersSize int64
Images []*ImageSummary Images []*ImageSummary
Containers []*Container Containers []*Container
Volumes []*Volume Volumes []*volume.Volume
BuildCache []*BuildCache BuildCache []*BuildCache
BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40.
} }

View File

@ -3,11 +3,11 @@ package volume
// This file was generated by the swagger tool. // This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command // Editing this file might prove futile when you re-run the swagger generate command
// VolumeCreateBody VolumeConfig // CreateOptions VolumeConfig
// //
// Volume configuration // Volume configuration
// swagger:model VolumeCreateBody // swagger:model CreateOptions
type VolumeCreateBody struct { type CreateOptions struct {
// Name of the volume driver to use. // Name of the volume driver to use.
Driver string `json:"Driver,omitempty"` Driver string `json:"Driver,omitempty"`

View File

@ -0,0 +1,11 @@
package volume // import "github.com/docker/docker/api/types/volume"
// VolumeCreateBody Volume configuration
//
// Deprecated: use CreateOptions
type VolumeCreateBody = CreateOptions
// VolumeListOKBody Volume list response
//
// Deprecated: use ListResponse
type VolumeListOKBody = ListResponse

View File

@ -0,0 +1,18 @@
package volume
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ListResponse VolumeListResponse
//
// Volume list response
// swagger:model ListResponse
type ListResponse struct {
// List of volumes
Volumes []*Volume `json:"Volumes"`
// Warnings that occurred when fetching the list of volumes.
//
Warnings []string `json:"Warnings"`
}

View File

@ -1,4 +1,4 @@
package types package volume
// This file was generated by the swagger tool. // This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command // Editing this file might prove futile when you re-run the swagger generate command
@ -47,14 +47,14 @@ type Volume struct {
Status map[string]interface{} `json:"Status,omitempty"` Status map[string]interface{} `json:"Status,omitempty"`
// usage data // usage data
UsageData *VolumeUsageData `json:"UsageData,omitempty"` UsageData *UsageData `json:"UsageData,omitempty"`
} }
// VolumeUsageData Usage details about the volume. This information is used by the // UsageData Usage details about the volume. This information is used by the
// `GET /system/df` endpoint, and omitted in other endpoints. // `GET /system/df` endpoint, and omitted in other endpoints.
// //
// swagger:model VolumeUsageData // swagger:model UsageData
type VolumeUsageData struct { type UsageData struct {
// The number of containers referencing this volume. This field // The number of containers referencing this volume. This field
// is set to `-1` if the reference-count is not available. // is set to `-1` if the reference-count is not available.

View File

@ -1,23 +0,0 @@
package volume // import "github.com/docker/docker/api/types/volume"
// ----------------------------------------------------------------------------
// Code generated by `swagger generate operation`. DO NOT EDIT.
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
import "github.com/docker/docker/api/types"
// VolumeListOKBody Volume list response
// swagger:model VolumeListOKBody
type VolumeListOKBody struct {
// List of volumes
// Required: true
Volumes []*types.Volume `json:"Volumes"`
// Warnings that occurred when fetching the list of volumes.
//
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@ -0,0 +1,88 @@
// Package urlutil provides helper function to check if a given build-context
// location should be considered a URL or a remote Git repository.
//
// This package is specifically written for use with docker build contexts, and
// should not be used as a general-purpose utility.
package urlutil // import "github.com/docker/docker/builder/remotecontext/urlutil"
import (
"regexp"
"strings"
)
// urlPathWithFragmentSuffix matches fragments to use as Git reference and build
// context from the Git repository. See IsGitURL for details.
var urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
// IsURL returns true if the provided str is an HTTP(S) URL by checking if it
// has a http:// or https:// scheme. No validation is performed to verify if the
// URL is well-formed.
func IsURL(str string) bool {
return strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "http://")
}
// IsGitURL returns true if the provided str is a remote git repository "URL".
//
// This function only performs a rudimentary check (no validation is performed
// to ensure the URL is well-formed), and is written specifically for use with
// docker build, with some logic for backward compatibility with older versions
// of docker: do not use this function as a general-purpose utility.
//
// The following patterns are considered to be a Git URL:
//
// - https://(.*).git(?:#.+)?$ git repository URL with optional fragment, as
// known to be used by GitHub and GitLab.
// - http://(.*).git(?:#.+)?$ same, but non-TLS
// - git://(.*) URLs using git:// scheme
// - git@(.*)
// - github.com/ see description below
//
// The github.com/ prefix is a special case used to treat context-paths
// starting with "github.com/" as a git URL if the given path does not
// exist locally. The "github.com/" prefix is kept for backward compatibility,
// and is a legacy feature.
//
// Going forward, no additional prefixes should be added, and users should
// be encouraged to use explicit URLs (https://github.com/user/repo.git) instead.
//
// Note that IsGitURL does not check if "github.com/" prefixes exist as a local
// path. Code using this function should check if the path exists locally before
// using it as a URL.
//
// Fragments
//
// Git URLs accept context configuration in their fragment section, separated by
// a colon (`:`). The first part represents the reference to check out, and can
// be either a branch, a tag, or a remote reference. The second part represents
// a subdirectory inside the repository to use as the build context.
//
// For example,the following URL uses a directory named "docker" in the branch
// "container" in the https://github.com/myorg/my-repo.git repository:
//
// https://github.com/myorg/my-repo.git#container:docker
//
// The following table represents all the valid suffixes with their build
// contexts:
//
// | Build Syntax Suffix | Git reference used | Build Context Used |
// |--------------------------------|----------------------|--------------------|
// | my-repo.git | refs/heads/master | / |
// | my-repo.git#mytag | refs/tags/my-tag | / |
// | my-repo.git#mybranch | refs/heads/my-branch | / |
// | my-repo.git#pull/42/head | refs/pull/42/head | / |
// | my-repo.git#:directory | refs/heads/master | /directory |
// | my-repo.git#master:directory | refs/heads/master | /directory |
// | my-repo.git#mytag:directory | refs/tags/my-tag | /directory |
// | my-repo.git#mybranch:directory | refs/heads/my-branch | /directory |
//
func IsGitURL(str string) bool {
if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
return true
}
for _, prefix := range []string{"git://", "github.com/", "git@"} {
if strings.HasPrefix(str, prefix) {
return true
}
}
return false
}

View File

@ -50,11 +50,6 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str
return err return err
} }
// TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior
if response.statusCode != http.StatusOK {
return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
return nil return nil
} }
@ -70,11 +65,6 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s
return nil, types.ContainerPathStat{}, err return nil, types.ContainerPathStat{}, err
} }
// TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior
if response.statusCode != http.StatusOK {
return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
}
// In order to get the copy behavior right, we need to know information // In order to get the copy behavior right, we need to know information
// about both the source and the destination. The response headers include // about both the source and the destination. The response headers include
// stat info about the source that we can use in deciding exactly how to // stat info about the source that we can use in deciding exactly how to

View File

@ -20,8 +20,8 @@ type configWrapper struct {
// ContainerCreate creates a new container based on the given configuration. // ContainerCreate creates a new container based on the given configuration.
// It can be associated with a name, but it's not mandatory. // It can be associated with a name, but it's not mandatory.
func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) { func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) {
var response container.ContainerCreateCreatedBody var response container.CreateResponse
if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
return response, err return response, err

View File

@ -8,7 +8,9 @@ import (
// ContainerKill terminates the container process but does not remove the container from the docker host. // ContainerKill terminates the container process but does not remove the container from the docker host.
func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
query := url.Values{} query := url.Values{}
query.Set("signal", signal) if signal != "" {
query.Set("signal", signal)
}
resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
ensureReaderClosed(resp) ensureReaderClosed(resp)

View File

@ -18,7 +18,7 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis
query.Set("all", "1") query.Set("all", "1")
} }
if options.Limit != -1 { if options.Limit > 0 {
query.Set("limit", strconv.Itoa(options.Limit)) query.Set("limit", strconv.Itoa(options.Limit))
} }

View File

@ -3,18 +3,22 @@ package client // import "github.com/docker/docker/client"
import ( import (
"context" "context"
"net/url" "net/url"
"time" "strconv"
timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/versions"
) )
// ContainerRestart stops and starts a container again. // ContainerRestart stops and starts a container again.
// It makes the daemon wait for the container to be up again for // It makes the daemon wait for the container to be up again for
// a specific amount of time, given the timeout. // a specific amount of time, given the timeout.
func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error {
query := url.Values{} query := url.Values{}
if timeout != nil { if options.Timeout != nil {
query.Set("t", timetypes.DurationToSecondsString(*timeout)) query.Set("t", strconv.Itoa(*options.Timeout))
}
if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") {
query.Set("signal", options.Signal)
} }
resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
ensureReaderClosed(resp) ensureReaderClosed(resp)

View File

@ -3,9 +3,10 @@ package client // import "github.com/docker/docker/client"
import ( import (
"context" "context"
"net/url" "net/url"
"time" "strconv"
timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/versions"
) )
// ContainerStop stops a container. In case the container fails to stop // ContainerStop stops a container. In case the container fails to stop
@ -15,10 +16,13 @@ import (
// If the timeout is nil, the container's StopTimeout value is used, if set, // If the timeout is nil, the container's StopTimeout value is used, if set,
// otherwise the engine default. A negative timeout value can be specified, // otherwise the engine default. A negative timeout value can be specified,
// meaning no timeout, i.e. no forceful termination is performed. // meaning no timeout, i.e. no forceful termination is performed.
func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error {
query := url.Values{} query := url.Values{}
if timeout != nil { if options.Timeout != nil {
query.Set("t", timetypes.DurationToSecondsString(*timeout)) query.Set("t", strconv.Itoa(*options.Timeout))
}
if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") {
query.Set("signal", options.Signal)
} }
resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
ensureReaderClosed(resp) ensureReaderClosed(resp)

View File

@ -24,12 +24,12 @@ import (
// wait request or in getting the response. This allows the caller to // wait request or in getting the response. This allows the caller to
// synchronize ContainerWait with other calls, such as specifying a // synchronize ContainerWait with other calls, such as specifying a
// "next-exit" condition before issuing a ContainerStart request. // "next-exit" condition before issuing a ContainerStart request.
func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {
if versions.LessThan(cli.ClientVersion(), "1.30") { if versions.LessThan(cli.ClientVersion(), "1.30") {
return cli.legacyContainerWait(ctx, containerID) return cli.legacyContainerWait(ctx, containerID)
} }
resultC := make(chan container.ContainerWaitOKBody) resultC := make(chan container.WaitResponse)
errC := make(chan error, 1) errC := make(chan error, 1)
query := url.Values{} query := url.Values{}
@ -46,7 +46,7 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit
go func() { go func() {
defer ensureReaderClosed(resp) defer ensureReaderClosed(resp)
var res container.ContainerWaitOKBody var res container.WaitResponse
if err := json.NewDecoder(resp.body).Decode(&res); err != nil { if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
errC <- err errC <- err
return return
@ -60,8 +60,8 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit
// legacyContainerWait returns immediately and doesn't have an option to wait // legacyContainerWait returns immediately and doesn't have an option to wait
// until the container is removed. // until the container is removed.
func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) {
resultC := make(chan container.ContainerWaitOKBody) resultC := make(chan container.WaitResponse)
errC := make(chan error) errC := make(chan error)
go func() { go func() {
@ -72,7 +72,7 @@ func (cli *Client) legacyContainerWait(ctx context.Context, containerID string)
} }
defer ensureReaderClosed(resp) defer ensureReaderClosed(resp)
var res container.ContainerWaitOKBody var res container.WaitResponse
if err := json.NewDecoder(resp.body).Decode(&res); err != nil { if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
errC <- err errC <- err
return return

View File

@ -5,17 +5,16 @@ import (
"io" "io"
"net" "net"
"net/http" "net/http"
"time"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/image"
networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/swarm"
volumetypes "github.com/docker/docker/api/types/volume" "github.com/docker/docker/api/types/volume"
specs "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1"
) )
@ -48,8 +47,8 @@ type CommonAPIClient interface {
type ContainerAPIClient interface { type ContainerAPIClient interface {
ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error)
ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error)
ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error)
ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error)
ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error)
ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
@ -65,16 +64,16 @@ type ContainerAPIClient interface {
ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
ContainerRename(ctx context.Context, container, newContainerName string) error ContainerRename(ctx context.Context, container, newContainerName string) error
ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error ContainerRestart(ctx context.Context, container string, options container.StopOptions) error
ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error)
ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error)
ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
ContainerStop(ctx context.Context, container string, timeout *time.Duration) error ContainerStop(ctx context.Context, container string, options container.StopOptions) error
ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error)
ContainerUnpause(ctx context.Context, container string) error ContainerUnpause(ctx context.Context, container string) error
ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error)
ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error)
CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error)
@ -107,7 +106,7 @@ type ImageAPIClient interface {
// NetworkAPIClient defines API client methods for the networks // NetworkAPIClient defines API client methods for the networks
type NetworkAPIClient interface { type NetworkAPIClient interface {
NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error
NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
NetworkDisconnect(ctx context.Context, network, container string, force bool) error NetworkDisconnect(ctx context.Context, network, container string, force bool) error
NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error)
@ -174,10 +173,10 @@ type SystemAPIClient interface {
// VolumeAPIClient defines API client methods for the volumes // VolumeAPIClient defines API client methods for the volumes
type VolumeAPIClient interface { type VolumeAPIClient interface {
VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error)
VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error)
VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error)
VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error)
VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumeRemove(ctx context.Context, volumeID string, force bool) error
VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error)
} }

View File

@ -4,18 +4,17 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types/volume"
volumetypes "github.com/docker/docker/api/types/volume"
) )
// VolumeCreate creates a volume in the docker host. // VolumeCreate creates a volume in the docker host.
func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) {
var volume types.Volume var vol volume.Volume
resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
defer ensureReaderClosed(resp) defer ensureReaderClosed(resp)
if err != nil { if err != nil {
return volume, err return vol, err
} }
err = json.NewDecoder(resp.body).Decode(&volume) err = json.NewDecoder(resp.body).Decode(&vol)
return volume, err return vol, err
} }

View File

@ -6,33 +6,33 @@ import (
"encoding/json" "encoding/json"
"io" "io"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types/volume"
) )
// VolumeInspect returns the information about a specific volume in the docker host. // VolumeInspect returns the information about a specific volume in the docker host.
func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) {
volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID)
return volume, err return vol, err
} }
// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) {
if volumeID == "" { if volumeID == "" {
return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID}
} }
var volume types.Volume var vol volume.Volume
resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
defer ensureReaderClosed(resp) defer ensureReaderClosed(resp)
if err != nil { if err != nil {
return volume, nil, err return vol, nil, err
} }
body, err := io.ReadAll(resp.body) body, err := io.ReadAll(resp.body)
if err != nil { if err != nil {
return volume, nil, err return vol, nil, err
} }
rdr := bytes.NewReader(body) rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&volume) err = json.NewDecoder(rdr).Decode(&vol)
return volume, body, err return vol, body, err
} }

View File

@ -6,12 +6,12 @@ import (
"net/url" "net/url"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
volumetypes "github.com/docker/docker/api/types/volume" "github.com/docker/docker/api/types/volume"
) )
// VolumeList returns the volumes configured in the docker host. // VolumeList returns the volumes configured in the docker host.
func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) {
var volumes volumetypes.VolumeListOKBody var volumes volume.ListResponse
query := url.Values{} query := url.Values{}
if filter.Len() > 0 { if filter.Len() > 0 {

View File

@ -1,52 +0,0 @@
// Package urlutil provides helper function to check urls kind.
// It supports http urls, git urls and transport url (tcp://, …)
package urlutil // import "github.com/docker/docker/pkg/urlutil"
import (
"regexp"
"strings"
)
var (
validPrefixes = map[string][]string{
"url": {"http://", "https://"},
// The github.com/ prefix is a special case used to treat context-paths
// starting with `github.com` as a git URL if the given path does not
// exist locally. The "github.com/" prefix is kept for backward compatibility,
// and is a legacy feature.
//
// Going forward, no additional prefixes should be added, and users should
// be encouraged to use explicit URLs (https://github.com/user/repo.git) instead.
"git": {"git://", "github.com/", "git@"},
"transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"},
}
urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
)
// IsURL returns true if the provided str is an HTTP(S) URL.
func IsURL(str string) bool {
return checkURL(str, "url")
}
// IsGitURL returns true if the provided str is a git repository URL.
func IsGitURL(str string) bool {
if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
return true
}
return checkURL(str, "git")
}
// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL.
func IsTransportURL(str string) bool {
return checkURL(str, "transport")
}
func checkURL(str, kind string) bool {
for _, prefix := range validPrefixes[kind] {
if strings.HasPrefix(str, prefix) {
return true
}
}
return false
}

View File

@ -17,6 +17,23 @@ This package provides various compression algorithms.
# changelog # changelog
* Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
<details>
<summary>See Details</summary>
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
While the release has been extensively tested, it is recommended to testing when upgrading.
</details>
* Feb 22, 2022 (v1.14.4) * Feb 22, 2022 (v1.14.4)
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)

View File

@ -0,0 +1,5 @@
package huff0
//go:generate go run generate.go
//go:generate asmfmt -w decompress_amd64.s
//go:generate asmfmt -w decompress_8b_amd64.s

View File

@ -165,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63)) return uint16(b.value >> ((64 - n) & 63))
} }
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
return uint16(b.value >> n)
}
func (b *bitReaderShifted) advance(n uint8) { func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n b.bitsRead += n
b.value <<= n & 63 b.value <<= n & 63

View File

@ -725,189 +725,6 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
return dst, br.close() return dst, br.close()
} }
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
{
const stream = 0
const stream2 = 1
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
{
const stream = 2
const stream2 = 3
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
off += 2
if off == 0 {
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}
// Decompress4X will decompress a 4X encoded stream. // Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly. // The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of // The *capacity* of the dst slice must match the destination size of

View File

@ -0,0 +1,488 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// SECOND PART:
// val2 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 0+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// SECOND PART:
// val2 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 256+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// SECOND PART:
// val2 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 512+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// SECOND PART:
// val2 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 768+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -0,0 +1,197 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// SECOND PART:
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -0,0 +1,181 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X
// that uses an asm implementation of its main loop.
package huff0
import (
"errors"
"fmt"
)
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
// go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
// go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
use8BitTables := d.actualTableLog <= 8
if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
const debug = false
// see: bitReaderShifted.peekBitsFast()
peekBits := uint8((64 - d.actualTableLog) & 63)
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
if use8BitTables {
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
} else {
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
}
if debug {
fmt.Print("DEBUG: ")
fmt.Printf("off=%d,", off)
for i := 0; i < 4; i++ {
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
i, br[i].bitsRead, br[i].value, br[i].off)
}
fmt.Println("")
}
if off != 0 {
break
}
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}

View File

@ -0,0 +1,506 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -0,0 +1,195 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -0,0 +1,193 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// This file contains a generic implementation of Decoder.Decompress4X.
package huff0
import (
"errors"
"fmt"
)
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
{
const stream = 0
const stream2 = 1
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
{
const stream = 2
const stream2 = 3
br[stream].fillFast()
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
val2 := br[stream2].peekBitsFast(d.actualTableLog)
v := single[val&tlMask]
v2 := single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off] = uint8(v.entry >> 8)
buf[stream2][off] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
val2 = br[stream2].peekBitsFast(d.actualTableLog)
v = single[val&tlMask]
v2 = single[val2&tlMask]
br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
buf[stream][off+1] = uint8(v.entry >> 8)
buf[stream2][off+1] = uint8(v2.entry >> 8)
}
off += 2
if off == 0 {
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}

View File

@ -153,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
This package: This package:
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
silesia.tar zskp 1 211947520 73101992 643 313.87 silesia.tar zskp 1 211947520 73821326 634 318.47
silesia.tar zskp 2 211947520 67504318 969 208.38 silesia.tar zskp 2 211947520 67655404 1508 133.96
silesia.tar zskp 3 211947520 64595893 2007 100.68 silesia.tar zskp 3 211947520 64746933 3000 67.37
silesia.tar zskp 4 211947520 60995370 8825 22.90 silesia.tar zskp 4 211947520 60073508 16926 11.94
cgo zstd: cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56 silesia.tar zstd 1 211947520 73605392 543 371.56
@ -165,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
silesia.tar zstd 9 211947520 60212393 5063 39.92 silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package: gzip, stdlib/this package:
silesia.tar gzstd 1 211947520 80007735 1654 122.21 silesia.tar gzstd 1 211947520 80007735 1498 134.87
silesia.tar gzkp 1 211947520 80136201 1152 175.45 silesia.tar gzkp 1 211947520 80088272 1009 200.31
GOB stream of binary data. Highly compressible. GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z https://files.klauspost.com/compress/gob-stream.7z
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
gob-stream zskp 1 1911399616 235022249 3088 590.30 gob-stream zskp 1 1911399616 233948096 3230 564.34
gob-stream zskp 2 1911399616 205669791 3786 481.34 gob-stream zskp 2 1911399616 203997694 4997 364.73
gob-stream zskp 3 1911399616 175034659 9636 189.17 gob-stream zskp 3 1911399616 173526523 13435 135.68
gob-stream zskp 4 1911399616 165609838 50369 36.19 gob-stream zskp 4 1911399616 162195235 47559 38.33
gob-stream zstd 1 1911399616 249810424 2637 691.26 gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31 gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56 gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70 gob-stream zstd 9 1911399616 177620386 16175 112.70
gob-stream gzstd 1 1911399616 357382641 10251 177.82 gob-stream gzstd 1 1911399616 357382013 9046 201.49
gob-stream gzkp 1 1911399616 359753026 5438 335.20 gob-stream gzkp 1 1911399616 359136669 4885 373.08
The test data for the Large Text Compression Benchmark is the first The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
http://mattmahoney.net/dc/textdata.html http://mattmahoney.net/dc/textdata.html
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
enwik9 zskp 1 1000000000 343848582 3609 264.18 enwik9 zskp 1 1000000000 343833605 3687 258.64
enwik9 zskp 2 1000000000 317276632 5746 165.97 enwik9 zskp 2 1000000000 317001237 7672 124.29
enwik9 zskp 3 1000000000 292243069 12162 78.41 enwik9 zskp 3 1000000000 291915823 15923 59.89
enwik9 zskp 4 1000000000 262183768 82837 11.51 enwik9 zskp 4 1000000000 261710291 77697 12.27
enwik9 zstd 1 1000000000 358072021 3110 306.65 enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35 enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68 enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40 enwik9 zstd 9 1000000000 278348700 28549 33.40
enwik9 gzstd 1 1000000000 382578136 9604 99.30 enwik9 gzstd 1 1000000000 382578136 8608 110.78
enwik9 gzkp 1 1000000000 383825945 6544 145.73 enwik9 gzkp 1 1000000000 382781160 5628 169.45
Highly compressible JSON file. Highly compressible JSON file.
https://files.klauspost.com/compress/github-june-2days-2019.json.zst https://files.klauspost.com/compress/github-june-2days-2019.json.zst
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
VM Image, Linux mint with a few installed applications: VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z https://files.klauspost.com/compress/rawstudio-mint14.7z
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
CSV data: CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
file out level insize outsize millis mb/s file out level insize outsize millis mb/s
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
``` ```
## Decompressor ## Decompressor

View File

@ -167,6 +167,11 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
} }
return ErrCompressedSizeTooBig return ErrCompressedSizeTooBig
} }
// Empty compressed blocks must at least be 2 bytes
// for Literals_Block_Type and one for Sequences_Section_Header.
if cSize < 2 {
return ErrBlockTooSmall
}
case blockTypeRaw: case blockTypeRaw:
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
if debugDecoder { if debugDecoder {
@ -491,6 +496,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
} }
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
if debugDecoder {
printf("prepareSequences: %d byte(s) input\n", len(in))
}
// Decode Sequences // Decode Sequences
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
if len(in) < 1 { if len(in) < 1 {
@ -499,8 +507,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
var nSeqs int var nSeqs int
seqHeader := in[0] seqHeader := in[0]
switch { switch {
case seqHeader == 0:
in = in[1:]
case seqHeader < 128: case seqHeader < 128:
nSeqs = int(seqHeader) nSeqs = int(seqHeader)
in = in[1:] in = in[1:]
@ -517,6 +523,13 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:] in = in[3:]
} }
if nSeqs == 0 && len(in) != 0 {
// When no sequences, there should not be any more data...
if debugDecoder {
printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
}
return ErrUnexpectedBlockSize
}
var seqs = &hist.decoders var seqs = &hist.decoders
seqs.nSeqs = nSeqs seqs.nSeqs = nSeqs
@ -635,6 +648,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
hist.decoders.seqSize = len(hist.decoders.literals) hist.decoders.seqSize = len(hist.decoders.literals)
return nil return nil
} }
hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets hist.decoders.prevOffset = hist.recentOffsets
err := hist.decoders.decode(b.sequence) err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset hist.recentOffsets = hist.decoders.prevOffset

View File

@ -348,10 +348,10 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.history.setDict(&dict) frame.history.setDict(&dict)
} }
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded return dst, ErrDecoderSizeExceeded
} }
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { if frame.FrameContentSize < 1<<30 {
// Never preallocate more than 1 GB up front. // Never preallocate more than 1 GB up front.
if cap(dst)-len(dst) < int(frame.FrameContentSize) { if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
@ -514,7 +514,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
// Check frame size (before CRC) // Check frame size (before CRC)
d.syncStream.decodedFrame += uint64(len(d.current.b)) d.syncStream.decodedFrame += uint64(len(d.current.b))
if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize { if d.syncStream.decodedFrame > d.frame.FrameContentSize {
if debugDecoder { if debugDecoder {
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
} }
@ -523,7 +523,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
} }
// Check FCS // Check FCS
if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize { if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
if debugDecoder { if debugDecoder {
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
} }
@ -700,6 +700,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
} }
hist.decoders = block.async.newHist.decoders hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets hist.recentOffsets = block.async.newHist.recentOffsets
hist.windowSize = block.async.newHist.windowSize
if block.async.newHist.dict != nil { if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict) hist.setDict(block.async.newHist.dict)
} }
@ -811,11 +812,11 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
} }
if !hasErr { if !hasErr {
decodedFrame += uint64(len(do.b)) decodedFrame += uint64(len(do.b))
if fcs > 0 && decodedFrame > fcs { if decodedFrame > fcs {
println("fcs exceeded", block.Last, fcs, decodedFrame) println("fcs exceeded", block.Last, fcs, decodedFrame)
do.err = ErrFrameSizeExceeded do.err = ErrFrameSizeExceeded
hasErr = true hasErr = true
} else if block.Last && fcs > 0 && decodedFrame != fcs { } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
do.err = ErrFrameSizeMismatch do.err = ErrFrameSizeMismatch
hasErr = true hasErr = true
} else { } else {

View File

@ -197,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
default: default:
fcsSize = 1 << v fcsSize = 1 << v
} }
d.FrameContentSize = 0 d.FrameContentSize = fcsUnknown
if fcsSize > 0 { if fcsSize > 0 {
b, err := br.readSmall(fcsSize) b, err := br.readSmall(fcsSize)
if err != nil { if err != nil {
@ -343,12 +343,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
err = ErrDecoderSizeExceeded err = ErrDecoderSizeExceeded
break break
} }
if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
err = ErrFrameSizeExceeded
break
}
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
err = ErrFrameSizeExceeded err = ErrFrameSizeExceeded
break break
@ -356,13 +351,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if dec.Last { if dec.Last {
break break
} }
if debugDecoder && d.FrameContentSize > 0 { if debugDecoder {
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
} }
} }
dst = d.history.b dst = d.history.b
if err == nil { if err == nil {
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch err = ErrFrameSizeMismatch
} else if d.HasCheckSum { } else if d.HasCheckSum {
var n int var n int

View File

@ -1,5 +1,5 @@
//go:build gofuzz //go:build ignorecrc
// +build gofuzz // +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved. // Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file. // License information can be found in the LICENSE file.

View File

@ -1,5 +1,5 @@
//go:build !gofuzz //go:build !ignorecrc
// +build !gofuzz // +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved. // Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file. // License information can be found in the LICENSE file.

View File

@ -107,7 +107,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0 s.seqSize = 0
litRemain := len(s.literals) litRemain := len(s.literals)
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := range seqs { for i := range seqs {
var ll, mo, ml int var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) { if br.off > 4+((maxOffsetBits+16+16)>>3) {
@ -192,7 +195,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
} }
s.seqSize += ll + ml s.seqSize += ll + ml
if s.seqSize > maxBlockSize { if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize) return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
} }
litRemain -= ll litRemain -= ll
if litRemain < 0 { if litRemain < 0 {
@ -230,7 +233,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
} }
s.seqSize += litRemain s.seqSize += litRemain
if s.seqSize > maxBlockSize { if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize) return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
} }
err := br.close() err := br.close()
if err != nil { if err != nil {
@ -347,6 +350,10 @@ func (s *sequenceDecs) decodeSync(history *history) error {
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
hist := history.b[history.ignoreBuffer:] hist := history.b[history.ignoreBuffer:]
out := s.out out := s.out
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := seqs - 1; i >= 0; i-- { for i := seqs - 1; i >= 0; i-- {
if br.overread() { if br.overread() {
@ -426,7 +433,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
} }
size := ll + ml + len(out) size := ll + ml + len(out)
if size-startSize > maxBlockSize { if size-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size", size) return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
} }
if size > cap(out) { if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions // Not enough size, which can happen under high volume block streaming conditions
@ -535,6 +542,11 @@ func (s *sequenceDecs) decodeSync(history *history) error {
} }
} }
// Check if space for literals
if len(s.literals)+len(s.out)-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
}
// Add final literals // Add final literals
s.out = append(out, s.literals...) s.out = append(out, s.literals...)
return br.close() return br.close()

View File

@ -20,7 +20,7 @@ const ZipMethodPKWare = 20
var zipReaderPool sync.Pool var zipReaderPool sync.Pool
// newZipReader cannot be used since we would leak goroutines... // newZipReader creates a pooled zip decompressor.
func newZipReader(r io.Reader) io.ReadCloser { func newZipReader(r io.Reader) io.ReadCloser {
dec, ok := zipReaderPool.Get().(*Decoder) dec, ok := zipReaderPool.Get().(*Decoder)
if ok { if ok {
@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
r.mu.Lock() r.mu.Lock()
defer r.mu.Unlock() defer r.mu.Unlock()
if r.dec == nil { if r.dec == nil {
return 0, errors.New("Read after Close") return 0, errors.New("read after close or EOF")
} }
dec, err := r.dec.Read(p) dec, err := r.dec.Read(p)
if err == io.EOF {
err = r.dec.Reset(nil)
zipReaderPool.Put(r.dec)
r.dec = nil
}
return dec, err return dec, err
} }
@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries. // ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example. // See ZipCompressor for example.
func ZipDecompressor() func(r io.Reader) io.ReadCloser { func ZipDecompressor() func(r io.Reader) io.ReadCloser {
return func(r io.Reader) io.ReadCloser { return newZipReader
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
if err != nil {
panic(err)
}
return d.IOReadCloser()
}
} }

View File

@ -39,6 +39,9 @@ const zstdMinMatch = 3
// Reset the buffer offset when reaching this. // Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize const bufferReset = math.MaxInt32 - MaxWindowSize
// fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64
var ( var (
// ErrReservedBlockType is returned when a reserved block type is found. // ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input. // Typically this indicates wrong or corrupted input.
@ -52,6 +55,10 @@ var (
// Typically returned on invalid input. // Typically returned on invalid input.
ErrBlockTooSmall = errors.New("block too small") ErrBlockTooSmall = errors.New("block too small")
// ErrUnexpectedBlockSize is returned when a block has unexpected size.
// Typically returned on invalid input.
ErrUnexpectedBlockSize = errors.New("unexpected block size")
// ErrMagicMismatch is returned when a "magic" number isn't what is expected. // ErrMagicMismatch is returned when a "magic" number isn't what is expected.
// Typically this indicates wrong or corrupted input. // Typically this indicates wrong or corrupted input.
ErrMagicMismatch = errors.New("invalid input: magic number mismatch") ErrMagicMismatch = errors.New("invalid input: magic number mismatch")

View File

@ -1,14 +0,0 @@
language: go
sudo: required
dist: trusty
go:
- 1.9
- tip
script:
- go test -v ./...
before_script:
- sudo apt-get update
- sudo apt-get -y install libsofthsm

View File

@ -1,6 +1,6 @@
# PKCS#11 [![Build Status](https://travis-ci.org/miekg/pkcs11.png?branch=master)](https://travis-ci.org/miekg/pkcs11) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/miekg/pkcs11) # PKCS#11
This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom were This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom where
it makes sense. It has been tested with SoftHSM. it makes sense. It has been tested with SoftHSM.
## SoftHSM ## SoftHSM
@ -13,10 +13,10 @@ it makes sense. It has been tested with SoftHSM.
softhsm --init-token --slot 0 --label test --pin 1234 softhsm --init-token --slot 0 --label test --pin 1234
~~~ ~~~
* Then use `libsofthsm.so` as the pkcs11 module: * Then use `libsofthsm2.so` as the pkcs11 module:
~~~ go ~~~ go
p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so") p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
~~~ ~~~
## Examples ## Examples
@ -24,7 +24,7 @@ it makes sense. It has been tested with SoftHSM.
A skeleton program would look somewhat like this (yes, pkcs#11 is verbose): A skeleton program would look somewhat like this (yes, pkcs#11 is verbose):
~~~ go ~~~ go
p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so") p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
err := p.Initialize() err := p.Initialize()
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:generate go run const_generate.go
// Package pkcs11 is a wrapper around the PKCS#11 cryptographic library. // Package pkcs11 is a wrapper around the PKCS#11 cryptographic library.
package pkcs11 package pkcs11
@ -14,7 +16,7 @@ package pkcs11
#cgo windows CFLAGS: -DPACKED_STRUCTURES #cgo windows CFLAGS: -DPACKED_STRUCTURES
#cgo linux LDFLAGS: -ldl #cgo linux LDFLAGS: -ldl
#cgo darwin LDFLAGS: -ldl #cgo darwin LDFLAGS: -ldl
#cgo openbsd LDFLAGS: -ldl #cgo openbsd LDFLAGS:
#cgo freebsd LDFLAGS: -ldl #cgo freebsd LDFLAGS: -ldl
#include <stdlib.h> #include <stdlib.h>
@ -770,9 +772,10 @@ static inline CK_VOID_PTR getAttributePval(CK_ATTRIBUTE_PTR a)
*/ */
import "C" import "C"
import "strings" import (
"strings"
import "unsafe" "unsafe"
)
// Ctx contains the current pkcs11 context. // Ctx contains the current pkcs11 context.
type Ctx struct { type Ctx struct {

View File

@ -1,3 +1,4 @@
//go:build release
// +build release // +build release
package pkcs11 package pkcs11
@ -5,7 +6,7 @@ package pkcs11
import "fmt" import "fmt"
// Release is current version of the pkcs11 library. // Release is current version of the pkcs11 library.
var Release = R{1, 0, 3} var Release = R{1, 1, 1}
// R holds the version of this library. // R holds the version of this library.
type R struct { type R struct {

View File

@ -182,8 +182,20 @@ func NewAttribute(typ uint, x interface{}) *Attribute {
} }
case int: case int:
a.Value = uintToBytes(uint64(v)) a.Value = uintToBytes(uint64(v))
case int16:
a.Value = uintToBytes(uint64(v))
case int32:
a.Value = uintToBytes(uint64(v))
case int64:
a.Value = uintToBytes(uint64(v))
case uint: case uint:
a.Value = uintToBytes(uint64(v)) a.Value = uintToBytes(uint64(v))
case uint16:
a.Value = uintToBytes(uint64(v))
case uint32:
a.Value = uintToBytes(uint64(v))
case uint64:
a.Value = uintToBytes(uint64(v))
case string: case string:
a.Value = []byte(v) a.Value = []byte(v)
case []byte: case []byte:

View File

@ -2,48 +2,18 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Code generated by "go run const_generate.go"; DO NOT EDIT.
package pkcs11 package pkcs11
const ( const (
CKU_SO uint = 0 CK_TRUE = 1
CKU_USER uint = 1 CK_FALSE = 0
CKU_CONTEXT_SPECIFIC uint = 2 CK_UNAVAILABLE_INFORMATION = ^uint(0)
) CK_EFFECTIVELY_INFINITE = 0
CK_INVALID_HANDLE = 0
const ( CKN_SURRENDER = 0
CKO_DATA uint = 0x00000000 CKN_OTP_CHANGED = 1
CKO_CERTIFICATE uint = 0x00000001
CKO_PUBLIC_KEY uint = 0x00000002
CKO_PRIVATE_KEY uint = 0x00000003
CKO_SECRET_KEY uint = 0x00000004
CKO_HW_FEATURE uint = 0x00000005
CKO_DOMAIN_PARAMETERS uint = 0x00000006
CKO_MECHANISM uint = 0x00000007
CKO_OTP_KEY uint = 0x00000008
CKO_VENDOR_DEFINED uint = 0x80000000
)
const (
CKG_MGF1_SHA1 uint = 0x00000001
CKG_MGF1_SHA224 uint = 0x00000005
CKG_MGF1_SHA256 uint = 0x00000002
CKG_MGF1_SHA384 uint = 0x00000003
CKG_MGF1_SHA512 uint = 0x00000004
CKG_MGF1_SHA3_224 uint = 0x00000006
CKG_MGF1_SHA3_256 uint = 0x00000007
CKG_MGF1_SHA3_384 uint = 0x00000008
CKG_MGF1_SHA3_512 uint = 0x00000009
)
const (
CKZ_DATA_SPECIFIED uint = 0x00000001
)
// Generated with: awk '/#define CK[AFKMRC]/{ print $2 " = " $3 }' pkcs11t.h | sed -e 's/UL$//g' -e 's/UL)$/)/g'
// All the flag (CKF_), attribute (CKA_), error code (CKR_), key type (CKK_), certificate type (CKC_) and
// mechanism (CKM_) constants as defined in PKCS#11.
const (
CKF_TOKEN_PRESENT = 0x00000001 CKF_TOKEN_PRESENT = 0x00000001
CKF_REMOVABLE_DEVICE = 0x00000002 CKF_REMOVABLE_DEVICE = 0x00000002
CKF_HW_SLOT = 0x00000004 CKF_HW_SLOT = 0x00000004
@ -66,12 +36,34 @@ const (
CKF_SO_PIN_LOCKED = 0x00400000 CKF_SO_PIN_LOCKED = 0x00400000
CKF_SO_PIN_TO_BE_CHANGED = 0x00800000 CKF_SO_PIN_TO_BE_CHANGED = 0x00800000
CKF_ERROR_STATE = 0x01000000 CKF_ERROR_STATE = 0x01000000
CKU_SO = 0
CKU_USER = 1
CKU_CONTEXT_SPECIFIC = 2
CKS_RO_PUBLIC_SESSION = 0
CKS_RO_USER_FUNCTIONS = 1
CKS_RW_PUBLIC_SESSION = 2
CKS_RW_USER_FUNCTIONS = 3
CKS_RW_SO_FUNCTIONS = 4
CKF_RW_SESSION = 0x00000002 CKF_RW_SESSION = 0x00000002
CKF_SERIAL_SESSION = 0x00000004 CKF_SERIAL_SESSION = 0x00000004
CKO_DATA = 0x00000000
CKO_CERTIFICATE = 0x00000001
CKO_PUBLIC_KEY = 0x00000002
CKO_PRIVATE_KEY = 0x00000003
CKO_SECRET_KEY = 0x00000004
CKO_HW_FEATURE = 0x00000005
CKO_DOMAIN_PARAMETERS = 0x00000006
CKO_MECHANISM = 0x00000007
CKO_OTP_KEY = 0x00000008
CKO_VENDOR_DEFINED = 0x80000000
CKH_MONOTONIC_COUNTER = 0x00000001
CKH_CLOCK = 0x00000002
CKH_USER_INTERFACE = 0x00000003
CKH_VENDOR_DEFINED = 0x80000000
CKK_RSA = 0x00000000 CKK_RSA = 0x00000000
CKK_DSA = 0x00000001 CKK_DSA = 0x00000001
CKK_DH = 0x00000002 CKK_DH = 0x00000002
CKK_ECDSA = 0x00000003 CKK_ECDSA = 0x00000003 // Deprecated
CKK_EC = 0x00000003 CKK_EC = 0x00000003
CKK_X9_42_DH = 0x00000004 CKK_X9_42_DH = 0x00000004
CKK_KEA = 0x00000005 CKK_KEA = 0x00000005
@ -83,7 +75,7 @@ const (
CKK_DES3 = 0x00000015 CKK_DES3 = 0x00000015
CKK_CAST = 0x00000016 CKK_CAST = 0x00000016
CKK_CAST3 = 0x00000017 CKK_CAST3 = 0x00000017
CKK_CAST5 = 0x00000018 CKK_CAST5 = 0x00000018 // Deprecated
CKK_CAST128 = 0x00000018 CKK_CAST128 = 0x00000018
CKK_RC5 = 0x00000019 CKK_RC5 = 0x00000019
CKK_IDEA = 0x0000001A CKK_IDEA = 0x0000001A
@ -99,14 +91,14 @@ const (
CKK_ACTI = 0x00000024 CKK_ACTI = 0x00000024
CKK_CAMELLIA = 0x00000025 CKK_CAMELLIA = 0x00000025
CKK_ARIA = 0x00000026 CKK_ARIA = 0x00000026
CKK_SHA512_224_HMAC = 0x00000027 CKK_MD5_HMAC = 0x00000027
CKK_SHA512_256_HMAC = 0x00000028
CKK_SHA512_T_HMAC = 0x00000029
CKK_SHA_1_HMAC = 0x00000028 CKK_SHA_1_HMAC = 0x00000028
CKK_SHA224_HMAC = 0x0000002E CKK_RIPEMD128_HMAC = 0x00000029
CKK_RIPEMD160_HMAC = 0x0000002A
CKK_SHA256_HMAC = 0x0000002B CKK_SHA256_HMAC = 0x0000002B
CKK_SHA384_HMAC = 0x0000002C CKK_SHA384_HMAC = 0x0000002C
CKK_SHA512_HMAC = 0x0000002D CKK_SHA512_HMAC = 0x0000002D
CKK_SHA224_HMAC = 0x0000002E
CKK_SEED = 0x0000002F CKK_SEED = 0x0000002F
CKK_GOSTR3410 = 0x00000030 CKK_GOSTR3410 = 0x00000030
CKK_GOSTR3411 = 0x00000031 CKK_GOSTR3411 = 0x00000031
@ -116,11 +108,26 @@ const (
CKK_SHA3_384_HMAC = 0x00000035 CKK_SHA3_384_HMAC = 0x00000035
CKK_SHA3_512_HMAC = 0x00000036 CKK_SHA3_512_HMAC = 0x00000036
CKK_VENDOR_DEFINED = 0x80000000 CKK_VENDOR_DEFINED = 0x80000000
CK_CERTIFICATE_CATEGORY_UNSPECIFIED = 0
CK_CERTIFICATE_CATEGORY_TOKEN_USER = 1
CK_CERTIFICATE_CATEGORY_AUTHORITY = 2
CK_CERTIFICATE_CATEGORY_OTHER_ENTITY = 3
CK_SECURITY_DOMAIN_UNSPECIFIED = 0
CK_SECURITY_DOMAIN_MANUFACTURER = 1
CK_SECURITY_DOMAIN_OPERATOR = 2
CK_SECURITY_DOMAIN_THIRD_PARTY = 3
CKC_X_509 = 0x00000000 CKC_X_509 = 0x00000000
CKC_X_509_ATTR_CERT = 0x00000001 CKC_X_509_ATTR_CERT = 0x00000001
CKC_WTLS = 0x00000002 CKC_WTLS = 0x00000002
CKC_VENDOR_DEFINED = 0x80000000 CKC_VENDOR_DEFINED = 0x80000000
CKF_ARRAY_ATTRIBUTE = 0x40000000 CKF_ARRAY_ATTRIBUTE = 0x40000000
CK_OTP_FORMAT_DECIMAL = 0
CK_OTP_FORMAT_HEXADECIMAL = 1
CK_OTP_FORMAT_ALPHANUMERIC = 2
CK_OTP_FORMAT_BINARY = 3
CK_OTP_PARAM_IGNORED = 0
CK_OTP_PARAM_OPTIONAL = 1
CK_OTP_PARAM_MANDATORY = 2
CKA_CLASS = 0x00000000 CKA_CLASS = 0x00000000
CKA_TOKEN = 0x00000001 CKA_TOKEN = 0x00000001
CKA_PRIVATE = 0x00000002 CKA_PRIVATE = 0x00000002
@ -183,15 +190,16 @@ const (
CKA_MODIFIABLE = 0x00000170 CKA_MODIFIABLE = 0x00000170
CKA_COPYABLE = 0x00000171 CKA_COPYABLE = 0x00000171
CKA_DESTROYABLE = 0x00000172 CKA_DESTROYABLE = 0x00000172
CKA_ECDSA_PARAMS = 0x00000180 CKA_ECDSA_PARAMS = 0x00000180 // Deprecated
CKA_EC_PARAMS = 0x00000180 CKA_EC_PARAMS = 0x00000180
CKA_EC_POINT = 0x00000181 CKA_EC_POINT = 0x00000181
CKA_SECONDARY_AUTH = 0x00000200 CKA_SECONDARY_AUTH = 0x00000200 // Deprecated
CKA_AUTH_PIN_FLAGS = 0x00000201 CKA_AUTH_PIN_FLAGS = 0x00000201 // Deprecated
CKA_ALWAYS_AUTHENTICATE = 0x00000202 CKA_ALWAYS_AUTHENTICATE = 0x00000202
CKA_WRAP_WITH_TRUSTED = 0x00000210 CKA_WRAP_WITH_TRUSTED = 0x00000210
CKA_WRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000211 CKA_WRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000211)
CKA_UNWRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000212 CKA_UNWRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000212)
CKA_DERIVE_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000213)
CKA_OTP_FORMAT = 0x00000220 CKA_OTP_FORMAT = 0x00000220
CKA_OTP_LENGTH = 0x00000221 CKA_OTP_LENGTH = 0x00000221
CKA_OTP_TIME_INTERVAL = 0x00000222 CKA_OTP_TIME_INTERVAL = 0x00000222
@ -226,7 +234,7 @@ const (
CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501 CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501
CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502 CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502
CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503 CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503
CKA_ALLOWED_MECHANISMS = CKF_ARRAY_ATTRIBUTE | 0x00000600 CKA_ALLOWED_MECHANISMS = (CKF_ARRAY_ATTRIBUTE | 0x00000600)
CKA_VENDOR_DEFINED = 0x80000000 CKA_VENDOR_DEFINED = 0x80000000
CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000 CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000
CKM_RSA_PKCS = 0x00000001 CKM_RSA_PKCS = 0x00000001
@ -246,11 +254,10 @@ const (
CKM_DSA_KEY_PAIR_GEN = 0x00000010 CKM_DSA_KEY_PAIR_GEN = 0x00000010
CKM_DSA = 0x00000011 CKM_DSA = 0x00000011
CKM_DSA_SHA1 = 0x00000012 CKM_DSA_SHA1 = 0x00000012
CKM_DSA_FIPS_G_GEN = 0x00000013 CKM_DSA_SHA224 = 0x00000013
CKM_DSA_SHA224 = 0x00000014 CKM_DSA_SHA256 = 0x00000014
CKM_DSA_SHA256 = 0x00000015 CKM_DSA_SHA384 = 0x00000015
CKM_DSA_SHA384 = 0x00000016 CKM_DSA_SHA512 = 0x00000016
CKM_DSA_SHA512 = 0x00000017
CKM_DSA_SHA3_224 = 0x00000018 CKM_DSA_SHA3_224 = 0x00000018
CKM_DSA_SHA3_256 = 0x00000019 CKM_DSA_SHA3_256 = 0x00000019
CKM_DSA_SHA3_384 = 0x0000001A CKM_DSA_SHA3_384 = 0x0000001A
@ -387,13 +394,13 @@ const (
CKM_CAST128_KEY_GEN = 0x00000320 CKM_CAST128_KEY_GEN = 0x00000320
CKM_CAST5_ECB = 0x00000321 CKM_CAST5_ECB = 0x00000321
CKM_CAST128_ECB = 0x00000321 CKM_CAST128_ECB = 0x00000321
CKM_CAST5_CBC = 0x00000322 CKM_CAST5_CBC = 0x00000322 // Deprecated
CKM_CAST128_CBC = 0x00000322 CKM_CAST128_CBC = 0x00000322
CKM_CAST5_MAC = 0x00000323 CKM_CAST5_MAC = 0x00000323 // Deprecated
CKM_CAST128_MAC = 0x00000323 CKM_CAST128_MAC = 0x00000323
CKM_CAST5_MAC_GENERAL = 0x00000324 CKM_CAST5_MAC_GENERAL = 0x00000324 // Deprecated
CKM_CAST128_MAC_GENERAL = 0x00000324 CKM_CAST128_MAC_GENERAL = 0x00000324
CKM_CAST5_CBC_PAD = 0x00000325 CKM_CAST5_CBC_PAD = 0x00000325 // Deprecated
CKM_CAST128_CBC_PAD = 0x00000325 CKM_CAST128_CBC_PAD = 0x00000325
CKM_RC5_KEY_GEN = 0x00000330 CKM_RC5_KEY_GEN = 0x00000330
CKM_RC5_ECB = 0x00000331 CKM_RC5_ECB = 0x00000331
@ -441,9 +448,9 @@ const (
CKM_PBE_MD5_DES_CBC = 0x000003A1 CKM_PBE_MD5_DES_CBC = 0x000003A1
CKM_PBE_MD5_CAST_CBC = 0x000003A2 CKM_PBE_MD5_CAST_CBC = 0x000003A2
CKM_PBE_MD5_CAST3_CBC = 0x000003A3 CKM_PBE_MD5_CAST3_CBC = 0x000003A3
CKM_PBE_MD5_CAST5_CBC = 0x000003A4 CKM_PBE_MD5_CAST5_CBC = 0x000003A4 // Deprecated
CKM_PBE_MD5_CAST128_CBC = 0x000003A4 CKM_PBE_MD5_CAST128_CBC = 0x000003A4
CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 // Deprecated
CKM_PBE_SHA1_CAST128_CBC = 0x000003A5 CKM_PBE_SHA1_CAST128_CBC = 0x000003A5
CKM_PBE_SHA1_RC4_128 = 0x000003A6 CKM_PBE_SHA1_RC4_128 = 0x000003A6
CKM_PBE_SHA1_RC4_40 = 0x000003A7 CKM_PBE_SHA1_RC4_40 = 0x000003A7
@ -522,7 +529,7 @@ const (
CKM_BATON_COUNTER = 0x00001034 CKM_BATON_COUNTER = 0x00001034
CKM_BATON_SHUFFLE = 0x00001035 CKM_BATON_SHUFFLE = 0x00001035
CKM_BATON_WRAP = 0x00001036 CKM_BATON_WRAP = 0x00001036
CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 // Deprecated
CKM_EC_KEY_PAIR_GEN = 0x00001040 CKM_EC_KEY_PAIR_GEN = 0x00001040
CKM_ECDSA = 0x00001041 CKM_ECDSA = 0x00001041
CKM_ECDSA_SHA1 = 0x00001042 CKM_ECDSA_SHA1 = 0x00001042
@ -551,9 +558,9 @@ const (
CKM_AES_CTR = 0x00001086 CKM_AES_CTR = 0x00001086
CKM_AES_GCM = 0x00001087 CKM_AES_GCM = 0x00001087
CKM_AES_CCM = 0x00001088 CKM_AES_CCM = 0x00001088
CKM_AES_CMAC_GENERAL = 0x00001089 CKM_AES_CTS = 0x00001089
CKM_AES_CMAC = 0x0000108A CKM_AES_CMAC = 0x0000108A
CKM_AES_CTS = 0x0000108B CKM_AES_CMAC_GENERAL = 0x0000108B
CKM_AES_XCBC_MAC = 0x0000108C CKM_AES_XCBC_MAC = 0x0000108C
CKM_AES_XCBC_MAC_96 = 0x0000108D CKM_AES_XCBC_MAC_96 = 0x0000108D
CKM_AES_GMAC = 0x0000108E CKM_AES_GMAC = 0x0000108E
@ -704,33 +711,56 @@ const (
CKR_MUTEX_NOT_LOCKED = 0x000001A1 CKR_MUTEX_NOT_LOCKED = 0x000001A1
CKR_NEW_PIN_MODE = 0x000001B0 CKR_NEW_PIN_MODE = 0x000001B0
CKR_NEXT_OTP = 0x000001B1 CKR_NEXT_OTP = 0x000001B1
CKR_EXCEEDED_MAX_ITERATIONS = 0x000001C0 CKR_EXCEEDED_MAX_ITERATIONS = 0x000001B5
CKR_FIPS_SELF_TEST_FAILED = 0x000001C1 CKR_FIPS_SELF_TEST_FAILED = 0x000001B6
CKR_LIBRARY_LOAD_FAILED = 0x000001C2 CKR_LIBRARY_LOAD_FAILED = 0x000001B7
CKR_PIN_TOO_WEAK = 0x000001C3 CKR_PIN_TOO_WEAK = 0x000001B8
CKR_PUBLIC_KEY_INVALID = 0x000001C4 CKR_PUBLIC_KEY_INVALID = 0x000001B9
CKR_FUNCTION_REJECTED = 0x00000200 CKR_FUNCTION_REJECTED = 0x00000200
CKR_VENDOR_DEFINED = 0x80000000 CKR_VENDOR_DEFINED = 0x80000000
CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001 CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001
CKF_OS_LOCKING_OK = 0x00000002 CKF_OS_LOCKING_OK = 0x00000002
CKF_DONT_BLOCK = 1 CKF_DONT_BLOCK = 1
CKG_MGF1_SHA1 = 0x00000001
CKG_MGF1_SHA256 = 0x00000002
CKG_MGF1_SHA384 = 0x00000003
CKG_MGF1_SHA512 = 0x00000004
CKG_MGF1_SHA224 = 0x00000005
CKZ_DATA_SPECIFIED = 0x00000001
CKD_NULL = 0x00000001
CKD_SHA1_KDF = 0x00000002
CKD_SHA1_KDF_ASN1 = 0x00000003
CKD_SHA1_KDF_CONCATENATE = 0x00000004
CKD_SHA224_KDF = 0x00000005
CKD_SHA256_KDF = 0x00000006
CKD_SHA384_KDF = 0x00000007
CKD_SHA512_KDF = 0x00000008
CKD_CPDIVERSIFY_KDF = 0x00000009
CKD_SHA3_224_KDF = 0x0000000A
CKD_SHA3_256_KDF = 0x0000000B
CKD_SHA3_384_KDF = 0x0000000C
CKD_SHA3_512_KDF = 0x0000000D
CKP_PKCS5_PBKD2_HMAC_SHA1 = 0x00000001
CKP_PKCS5_PBKD2_HMAC_GOSTR3411 = 0x00000002
CKP_PKCS5_PBKD2_HMAC_SHA224 = 0x00000003
CKP_PKCS5_PBKD2_HMAC_SHA256 = 0x00000004
CKP_PKCS5_PBKD2_HMAC_SHA384 = 0x00000005
CKP_PKCS5_PBKD2_HMAC_SHA512 = 0x00000006
CKP_PKCS5_PBKD2_HMAC_SHA512_224 = 0x00000007
CKP_PKCS5_PBKD2_HMAC_SHA512_256 = 0x00000008
CKZ_SALT_SPECIFIED = 0x00000001
CK_OTP_VALUE = 0
CK_OTP_PIN = 1
CK_OTP_CHALLENGE = 2
CK_OTP_TIME = 3
CK_OTP_COUNTER = 4
CK_OTP_FLAGS = 5
CK_OTP_OUTPUT_LENGTH = 6
CK_OTP_OUTPUT_FORMAT = 7
CKF_NEXT_OTP = 0x00000001 CKF_NEXT_OTP = 0x00000001
CKF_EXCLUDE_TIME = 0x00000002 CKF_EXCLUDE_TIME = 0x00000002
CKF_EXCLUDE_COUNTER = 0x00000004 CKF_EXCLUDE_COUNTER = 0x00000004
CKF_EXCLUDE_CHALLENGE = 0x00000008 CKF_EXCLUDE_CHALLENGE = 0x00000008
CKF_EXCLUDE_PIN = 0x00000010 CKF_EXCLUDE_PIN = 0x00000010
CKF_USER_FRIENDLY_OTP = 0x00000020 CKF_USER_FRIENDLY_OTP = 0x00000020
CKD_NULL = 0x00000001
CKD_SHA1_KDF = 0x00000002
)
// Special return values defined in PKCS#11 v2.40 section 3.2.
const (
// CK_EFFECTIVELY_INFINITE may be returned in the CK_TOKEN_INFO fields ulMaxSessionCount and ulMaxRwSessionCount.
// It indicates there is no practical limit on the number of sessions.
CK_EFFECTIVELY_INFINITE = 0
// CK_UNAVAILABLE_INFORMATION may be returned for several fields within CK_TOKEN_INFO. It indicates
// the token is unable or unwilling to provide the requested information.
CK_UNAVAILABLE_INFORMATION = ^uint(0)
) )

View File

@ -6,11 +6,11 @@ package api
import ( import (
context "context" context "context"
fmt "fmt" fmt "fmt"
github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
raftselector "github.com/docker/swarmkit/manager/raftselector"
_ "github.com/docker/swarmkit/protobuf/plugin"
_ "github.com/gogo/protobuf/gogoproto" _ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto" proto "github.com/gogo/protobuf/proto"
github_com_moby_swarmkit_v2_api_deepcopy "github.com/moby/swarmkit/v2/api/deepcopy"
raftselector "github.com/moby/swarmkit/v2/manager/raftselector"
_ "github.com/moby/swarmkit/v2/protobuf/plugin"
grpc "google.golang.org/grpc" grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes" codes "google.golang.org/grpc/codes"
metadata "google.golang.org/grpc/metadata" metadata "google.golang.org/grpc/metadata"
@ -469,11 +469,11 @@ func (m *NodeCertificateStatusResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Status != nil { if o.Status != nil {
m.Status = &IssuanceStatus{} m.Status = &IssuanceStatus{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Status, o.Status) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Status, o.Status)
} }
if o.Certificate != nil { if o.Certificate != nil {
m.Certificate = &Certificate{} m.Certificate = &Certificate{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Certificate, o.Certificate) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Certificate, o.Certificate)
} }
} }
@ -567,7 +567,7 @@ func (m *GetUnlockKeyResponse) CopyFrom(src interface{}) {
m.UnlockKey = make([]byte, len(o.UnlockKey)) m.UnlockKey = make([]byte, len(o.UnlockKey))
copy(m.UnlockKey, o.UnlockKey) copy(m.UnlockKey, o.UnlockKey)
} }
github_com_docker_swarmkit_api_deepcopy.Copy(&m.Version, &o.Version) github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Version, &o.Version)
} }
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.

View File

@ -6,13 +6,13 @@ package api
import ( import (
context "context" context "context"
fmt "fmt" fmt "fmt"
github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
raftselector "github.com/docker/swarmkit/manager/raftselector"
_ "github.com/docker/swarmkit/protobuf/plugin"
_ "github.com/gogo/protobuf/gogoproto" _ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto" proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
types "github.com/gogo/protobuf/types" types "github.com/gogo/protobuf/types"
github_com_moby_swarmkit_v2_api_deepcopy "github.com/moby/swarmkit/v2/api/deepcopy"
raftselector "github.com/moby/swarmkit/v2/manager/raftselector"
_ "github.com/moby/swarmkit/v2/protobuf/plugin"
grpc "google.golang.org/grpc" grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes" codes "google.golang.org/grpc/codes"
metadata "google.golang.org/grpc/metadata" metadata "google.golang.org/grpc/metadata"
@ -4436,7 +4436,7 @@ func (m *GetNodeResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Node != nil { if o.Node != nil {
m.Node = &Node{} m.Node = &Node{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Node, o.Node)
} }
} }
@ -4455,7 +4455,7 @@ func (m *ListNodesRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Filters != nil { if o.Filters != nil {
m.Filters = &ListNodesRequest_Filters{} m.Filters = &ListNodesRequest_Filters{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
} }
} }
@ -4530,7 +4530,7 @@ func (m *ListNodesResponse) CopyFrom(src interface{}) {
m.Nodes = make([]*Node, len(o.Nodes)) m.Nodes = make([]*Node, len(o.Nodes))
for i := range m.Nodes { for i := range m.Nodes {
m.Nodes[i] = &Node{} m.Nodes[i] = &Node{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Nodes[i], o.Nodes[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Nodes[i], o.Nodes[i])
} }
} }
@ -4551,11 +4551,11 @@ func (m *UpdateNodeRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.NodeVersion != nil { if o.NodeVersion != nil {
m.NodeVersion = &Version{} m.NodeVersion = &Version{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.NodeVersion, o.NodeVersion) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.NodeVersion, o.NodeVersion)
} }
if o.Spec != nil { if o.Spec != nil {
m.Spec = &NodeSpec{} m.Spec = &NodeSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -4574,7 +4574,7 @@ func (m *UpdateNodeResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Node != nil { if o.Node != nil {
m.Node = &Node{} m.Node = &Node{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Node, o.Node)
} }
} }
@ -4633,7 +4633,7 @@ func (m *GetTaskResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Task != nil { if o.Task != nil {
m.Task = &Task{} m.Task = &Task{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Task, o.Task) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Task, o.Task)
} }
} }
@ -4677,7 +4677,7 @@ func (m *ListTasksRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Filters != nil { if o.Filters != nil {
m.Filters = &ListTasksRequest_Filters{} m.Filters = &ListTasksRequest_Filters{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
} }
} }
@ -4755,7 +4755,7 @@ func (m *ListTasksResponse) CopyFrom(src interface{}) {
m.Tasks = make([]*Task, len(o.Tasks)) m.Tasks = make([]*Task, len(o.Tasks))
for i := range m.Tasks { for i := range m.Tasks {
m.Tasks[i] = &Task{} m.Tasks[i] = &Task{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i])
} }
} }
@ -4776,7 +4776,7 @@ func (m *CreateServiceRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Spec != nil { if o.Spec != nil {
m.Spec = &ServiceSpec{} m.Spec = &ServiceSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -4795,7 +4795,7 @@ func (m *CreateServiceResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Service != nil { if o.Service != nil {
m.Service = &Service{} m.Service = &Service{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Service, o.Service)
} }
} }
@ -4829,7 +4829,7 @@ func (m *GetServiceResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Service != nil { if o.Service != nil {
m.Service = &Service{} m.Service = &Service{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Service, o.Service)
} }
} }
@ -4848,11 +4848,11 @@ func (m *UpdateServiceRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.ServiceVersion != nil { if o.ServiceVersion != nil {
m.ServiceVersion = &Version{} m.ServiceVersion = &Version{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.ServiceVersion, o.ServiceVersion) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.ServiceVersion, o.ServiceVersion)
} }
if o.Spec != nil { if o.Spec != nil {
m.Spec = &ServiceSpec{} m.Spec = &ServiceSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -4871,7 +4871,7 @@ func (m *UpdateServiceResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Service != nil { if o.Service != nil {
m.Service = &Service{} m.Service = &Service{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Service, o.Service)
} }
} }
@ -4915,7 +4915,7 @@ func (m *ListServicesRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Filters != nil { if o.Filters != nil {
m.Filters = &ListServicesRequest_Filters{} m.Filters = &ListServicesRequest_Filters{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
} }
} }
@ -4978,7 +4978,7 @@ func (m *ListServicesResponse) CopyFrom(src interface{}) {
m.Services = make([]*Service, len(o.Services)) m.Services = make([]*Service, len(o.Services))
for i := range m.Services { for i := range m.Services {
m.Services[i] = &Service{} m.Services[i] = &Service{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Services[i], o.Services[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Services[i], o.Services[i])
} }
} }
@ -5021,7 +5021,7 @@ func (m *ListServiceStatusesResponse) CopyFrom(src interface{}) {
m.Statuses = make([]*ListServiceStatusesResponse_ServiceStatus, len(o.Statuses)) m.Statuses = make([]*ListServiceStatusesResponse_ServiceStatus, len(o.Statuses))
for i := range m.Statuses { for i := range m.Statuses {
m.Statuses[i] = &ListServiceStatusesResponse_ServiceStatus{} m.Statuses[i] = &ListServiceStatusesResponse_ServiceStatus{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Statuses[i], o.Statuses[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Statuses[i], o.Statuses[i])
} }
} }
@ -5057,7 +5057,7 @@ func (m *CreateNetworkRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Spec != nil { if o.Spec != nil {
m.Spec = &NetworkSpec{} m.Spec = &NetworkSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -5076,7 +5076,7 @@ func (m *CreateNetworkResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Network != nil { if o.Network != nil {
m.Network = &Network{} m.Network = &Network{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Network, o.Network)
} }
} }
@ -5110,7 +5110,7 @@ func (m *GetNetworkResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Network != nil { if o.Network != nil {
m.Network = &Network{} m.Network = &Network{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Network, o.Network)
} }
} }
@ -5154,7 +5154,7 @@ func (m *ListNetworksRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Filters != nil { if o.Filters != nil {
m.Filters = &ListNetworksRequest_Filters{} m.Filters = &ListNetworksRequest_Filters{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
} }
} }
@ -5212,7 +5212,7 @@ func (m *ListNetworksResponse) CopyFrom(src interface{}) {
m.Networks = make([]*Network, len(o.Networks)) m.Networks = make([]*Network, len(o.Networks))
for i := range m.Networks { for i := range m.Networks {
m.Networks[i] = &Network{} m.Networks[i] = &Network{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Networks[i], o.Networks[i])
} }
} }
@ -5248,7 +5248,7 @@ func (m *GetClusterResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Cluster != nil { if o.Cluster != nil {
m.Cluster = &Cluster{} m.Cluster = &Cluster{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Cluster, o.Cluster) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Cluster, o.Cluster)
} }
} }
@ -5267,7 +5267,7 @@ func (m *ListClustersRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Filters != nil { if o.Filters != nil {
m.Filters = &ListClustersRequest_Filters{} m.Filters = &ListClustersRequest_Filters{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
} }
} }
@ -5325,7 +5325,7 @@ func (m *ListClustersResponse) CopyFrom(src interface{}) {
m.Clusters = make([]*Cluster, len(o.Clusters)) m.Clusters = make([]*Cluster, len(o.Clusters))
for i := range m.Clusters { for i := range m.Clusters {
m.Clusters[i] = &Cluster{} m.Clusters[i] = &Cluster{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Clusters[i], o.Clusters[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Clusters[i], o.Clusters[i])
} }
} }
@ -5361,13 +5361,13 @@ func (m *UpdateClusterRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.ClusterVersion != nil { if o.ClusterVersion != nil {
m.ClusterVersion = &Version{} m.ClusterVersion = &Version{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.ClusterVersion, o.ClusterVersion) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.ClusterVersion, o.ClusterVersion)
} }
if o.Spec != nil { if o.Spec != nil {
m.Spec = &ClusterSpec{} m.Spec = &ClusterSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
github_com_docker_swarmkit_api_deepcopy.Copy(&m.Rotation, &o.Rotation) github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Rotation, &o.Rotation)
} }
func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse { func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse {
@ -5385,7 +5385,7 @@ func (m *UpdateClusterResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Cluster != nil { if o.Cluster != nil {
m.Cluster = &Cluster{} m.Cluster = &Cluster{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Cluster, o.Cluster) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Cluster, o.Cluster)
} }
} }
@ -5419,7 +5419,7 @@ func (m *GetSecretResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Secret != nil { if o.Secret != nil {
m.Secret = &Secret{} m.Secret = &Secret{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Secret, o.Secret)
} }
} }
@ -5438,11 +5438,11 @@ func (m *UpdateSecretRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.SecretVersion != nil { if o.SecretVersion != nil {
m.SecretVersion = &Version{} m.SecretVersion = &Version{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.SecretVersion, o.SecretVersion) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.SecretVersion, o.SecretVersion)
} }
if o.Spec != nil { if o.Spec != nil {
m.Spec = &SecretSpec{} m.Spec = &SecretSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -5461,7 +5461,7 @@ func (m *UpdateSecretResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Secret != nil { if o.Secret != nil {
m.Secret = &Secret{} m.Secret = &Secret{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Secret, o.Secret)
} }
} }
@ -5480,7 +5480,7 @@ func (m *ListSecretsRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Filters != nil { if o.Filters != nil {
m.Filters = &ListSecretsRequest_Filters{} m.Filters = &ListSecretsRequest_Filters{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
} }
} }
@ -5538,7 +5538,7 @@ func (m *ListSecretsResponse) CopyFrom(src interface{}) {
m.Secrets = make([]*Secret, len(o.Secrets)) m.Secrets = make([]*Secret, len(o.Secrets))
for i := range m.Secrets { for i := range m.Secrets {
m.Secrets[i] = &Secret{} m.Secrets[i] = &Secret{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i])
} }
} }
@ -5559,7 +5559,7 @@ func (m *CreateSecretRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Spec != nil { if o.Spec != nil {
m.Spec = &SecretSpec{} m.Spec = &SecretSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -5578,7 +5578,7 @@ func (m *CreateSecretResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Secret != nil { if o.Secret != nil {
m.Secret = &Secret{} m.Secret = &Secret{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Secret, o.Secret)
} }
} }
@ -5637,7 +5637,7 @@ func (m *GetConfigResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Config != nil { if o.Config != nil {
m.Config = &Config{} m.Config = &Config{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Config, o.Config)
} }
} }
@ -5656,11 +5656,11 @@ func (m *UpdateConfigRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.ConfigVersion != nil { if o.ConfigVersion != nil {
m.ConfigVersion = &Version{} m.ConfigVersion = &Version{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.ConfigVersion, o.ConfigVersion) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.ConfigVersion, o.ConfigVersion)
} }
if o.Spec != nil { if o.Spec != nil {
m.Spec = &ConfigSpec{} m.Spec = &ConfigSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -5679,7 +5679,7 @@ func (m *UpdateConfigResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Config != nil { if o.Config != nil {
m.Config = &Config{} m.Config = &Config{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Config, o.Config)
} }
} }
@ -5698,7 +5698,7 @@ func (m *ListConfigsRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Filters != nil { if o.Filters != nil {
m.Filters = &ListConfigsRequest_Filters{} m.Filters = &ListConfigsRequest_Filters{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
} }
} }
@ -5756,7 +5756,7 @@ func (m *ListConfigsResponse) CopyFrom(src interface{}) {
m.Configs = make([]*Config, len(o.Configs)) m.Configs = make([]*Config, len(o.Configs))
for i := range m.Configs { for i := range m.Configs {
m.Configs[i] = &Config{} m.Configs[i] = &Config{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Configs[i], o.Configs[i])
} }
} }
@ -5777,7 +5777,7 @@ func (m *CreateConfigRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Spec != nil { if o.Spec != nil {
m.Spec = &ConfigSpec{} m.Spec = &ConfigSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -5796,7 +5796,7 @@ func (m *CreateConfigResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Config != nil { if o.Config != nil {
m.Config = &Config{} m.Config = &Config{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Config, o.Config)
} }
} }
@ -5840,7 +5840,7 @@ func (m *CreateExtensionRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Annotations != nil { if o.Annotations != nil {
m.Annotations = &Annotations{} m.Annotations = &Annotations{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Annotations, o.Annotations) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Annotations, o.Annotations)
} }
} }
@ -5859,7 +5859,7 @@ func (m *CreateExtensionResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Extension != nil { if o.Extension != nil {
m.Extension = &Extension{} m.Extension = &Extension{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Extension, o.Extension) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Extension, o.Extension)
} }
} }
@ -5918,7 +5918,7 @@ func (m *GetExtensionResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Extension != nil { if o.Extension != nil {
m.Extension = &Extension{} m.Extension = &Extension{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Extension, o.Extension) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Extension, o.Extension)
} }
} }
@ -5937,11 +5937,11 @@ func (m *CreateResourceRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Annotations != nil { if o.Annotations != nil {
m.Annotations = &Annotations{} m.Annotations = &Annotations{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Annotations, o.Annotations) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Annotations, o.Annotations)
} }
if o.Payload != nil { if o.Payload != nil {
m.Payload = &types.Any{} m.Payload = &types.Any{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Payload, o.Payload)
} }
} }
@ -5960,7 +5960,7 @@ func (m *CreateResourceResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Resource != nil { if o.Resource != nil {
m.Resource = &Resource{} m.Resource = &Resource{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Resource, o.Resource) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Resource, o.Resource)
} }
} }
@ -6004,15 +6004,15 @@ func (m *UpdateResourceRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.ResourceVersion != nil { if o.ResourceVersion != nil {
m.ResourceVersion = &Version{} m.ResourceVersion = &Version{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.ResourceVersion, o.ResourceVersion) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.ResourceVersion, o.ResourceVersion)
} }
if o.Annotations != nil { if o.Annotations != nil {
m.Annotations = &Annotations{} m.Annotations = &Annotations{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Annotations, o.Annotations) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Annotations, o.Annotations)
} }
if o.Payload != nil { if o.Payload != nil {
m.Payload = &types.Any{} m.Payload = &types.Any{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Payload, o.Payload)
} }
} }
@ -6031,7 +6031,7 @@ func (m *UpdateResourceResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Resource != nil { if o.Resource != nil {
m.Resource = &Resource{} m.Resource = &Resource{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Resource, o.Resource) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Resource, o.Resource)
} }
} }
@ -6065,7 +6065,7 @@ func (m *GetResourceResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Resource != nil { if o.Resource != nil {
m.Resource = &Resource{} m.Resource = &Resource{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Resource, o.Resource) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Resource, o.Resource)
} }
} }
@ -6084,7 +6084,7 @@ func (m *ListResourcesRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Filters != nil { if o.Filters != nil {
m.Filters = &ListResourcesRequest_Filters{} m.Filters = &ListResourcesRequest_Filters{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
} }
} }
@ -6142,7 +6142,7 @@ func (m *ListResourcesResponse) CopyFrom(src interface{}) {
m.Resources = make([]*Resource, len(o.Resources)) m.Resources = make([]*Resource, len(o.Resources))
for i := range m.Resources { for i := range m.Resources {
m.Resources[i] = &Resource{} m.Resources[i] = &Resource{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Resources[i], o.Resources[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Resources[i], o.Resources[i])
} }
} }
@ -6163,7 +6163,7 @@ func (m *CreateVolumeRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Spec != nil { if o.Spec != nil {
m.Spec = &VolumeSpec{} m.Spec = &VolumeSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -6182,7 +6182,7 @@ func (m *CreateVolumeResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Volume != nil { if o.Volume != nil {
m.Volume = &Volume{} m.Volume = &Volume{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Volume, o.Volume) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Volume, o.Volume)
} }
} }
@ -6216,7 +6216,7 @@ func (m *GetVolumeResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Volume != nil { if o.Volume != nil {
m.Volume = &Volume{} m.Volume = &Volume{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Volume, o.Volume) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Volume, o.Volume)
} }
} }
@ -6235,11 +6235,11 @@ func (m *UpdateVolumeRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.VolumeVersion != nil { if o.VolumeVersion != nil {
m.VolumeVersion = &Version{} m.VolumeVersion = &Version{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.VolumeVersion, o.VolumeVersion) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.VolumeVersion, o.VolumeVersion)
} }
if o.Spec != nil { if o.Spec != nil {
m.Spec = &VolumeSpec{} m.Spec = &VolumeSpec{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Spec, o.Spec)
} }
} }
@ -6258,7 +6258,7 @@ func (m *UpdateVolumeResponse) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Volume != nil { if o.Volume != nil {
m.Volume = &Volume{} m.Volume = &Volume{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Volume, o.Volume) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Volume, o.Volume)
} }
} }
@ -6277,7 +6277,7 @@ func (m *ListVolumesRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Filters != nil { if o.Filters != nil {
m.Filters = &ListVolumesRequest_Filters{} m.Filters = &ListVolumesRequest_Filters{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Filters, o.Filters)
} }
} }
@ -6345,7 +6345,7 @@ func (m *ListVolumesResponse) CopyFrom(src interface{}) {
m.Volumes = make([]*Volume, len(o.Volumes)) m.Volumes = make([]*Volume, len(o.Volumes))
for i := range m.Volumes { for i := range m.Volumes {
m.Volumes[i] = &Volume{} m.Volumes[i] = &Volume{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Volumes[i], o.Volumes[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Volumes[i], o.Volumes[i])
} }
} }

View File

@ -3,8 +3,8 @@ package defaults
import ( import (
"time" "time"
"github.com/docker/swarmkit/api" "github.com/moby/swarmkit/v2/api"
"github.com/docker/swarmkit/api/deepcopy" "github.com/moby/swarmkit/v2/api/deepcopy"
gogotypes "github.com/gogo/protobuf/types" gogotypes "github.com/gogo/protobuf/types"
) )

View File

@ -6,13 +6,13 @@ package api
import ( import (
context "context" context "context"
fmt "fmt" fmt "fmt"
github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
raftselector "github.com/docker/swarmkit/manager/raftselector"
_ "github.com/docker/swarmkit/protobuf/plugin"
_ "github.com/gogo/protobuf/gogoproto" _ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto" proto "github.com/gogo/protobuf/proto"
_ "github.com/gogo/protobuf/types" _ "github.com/gogo/protobuf/types"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
github_com_moby_swarmkit_v2_api_deepcopy "github.com/moby/swarmkit/v2/api/deepcopy"
raftselector "github.com/moby/swarmkit/v2/manager/raftselector"
_ "github.com/moby/swarmkit/v2/protobuf/plugin"
grpc "google.golang.org/grpc" grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes" codes "google.golang.org/grpc/codes"
metadata "google.golang.org/grpc/metadata" metadata "google.golang.org/grpc/metadata"
@ -1013,7 +1013,7 @@ func (m *SessionRequest) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Description != nil { if o.Description != nil {
m.Description = &NodeDescription{} m.Description = &NodeDescription{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Description, o.Description) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Description, o.Description)
} }
} }
@ -1032,13 +1032,13 @@ func (m *SessionMessage) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Node != nil { if o.Node != nil {
m.Node = &Node{} m.Node = &Node{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Node, o.Node)
} }
if o.Managers != nil { if o.Managers != nil {
m.Managers = make([]*WeightedPeer, len(o.Managers)) m.Managers = make([]*WeightedPeer, len(o.Managers))
for i := range m.Managers { for i := range m.Managers {
m.Managers[i] = &WeightedPeer{} m.Managers[i] = &WeightedPeer{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Managers[i], o.Managers[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Managers[i], o.Managers[i])
} }
} }
@ -1046,7 +1046,7 @@ func (m *SessionMessage) CopyFrom(src interface{}) {
m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys))
for i := range m.NetworkBootstrapKeys { for i := range m.NetworkBootstrapKeys {
m.NetworkBootstrapKeys[i] = &EncryptionKey{} m.NetworkBootstrapKeys[i] = &EncryptionKey{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i])
} }
} }
@ -1084,7 +1084,7 @@ func (m *HeartbeatResponse) CopyFrom(src interface{}) {
o := src.(*HeartbeatResponse) o := src.(*HeartbeatResponse)
*m = *o *m = *o
github_com_docker_swarmkit_api_deepcopy.Copy(&m.Period, &o.Period) github_com_moby_swarmkit_v2_api_deepcopy.Copy(&m.Period, &o.Period)
} }
func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest { func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest {
@ -1104,7 +1104,7 @@ func (m *UpdateTaskStatusRequest) CopyFrom(src interface{}) {
m.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, len(o.Updates)) m.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, len(o.Updates))
for i := range m.Updates { for i := range m.Updates {
m.Updates[i] = &UpdateTaskStatusRequest_TaskStatusUpdate{} m.Updates[i] = &UpdateTaskStatusRequest_TaskStatusUpdate{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Updates[i], o.Updates[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Updates[i], o.Updates[i])
} }
} }
@ -1125,7 +1125,7 @@ func (m *UpdateTaskStatusRequest_TaskStatusUpdate) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Status != nil { if o.Status != nil {
m.Status = &TaskStatus{} m.Status = &TaskStatus{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Status, o.Status) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Status, o.Status)
} }
} }
@ -1156,7 +1156,7 @@ func (m *UpdateVolumeStatusRequest) CopyFrom(src interface{}) {
m.Updates = make([]*UpdateVolumeStatusRequest_VolumeStatusUpdate, len(o.Updates)) m.Updates = make([]*UpdateVolumeStatusRequest_VolumeStatusUpdate, len(o.Updates))
for i := range m.Updates { for i := range m.Updates {
m.Updates[i] = &UpdateVolumeStatusRequest_VolumeStatusUpdate{} m.Updates[i] = &UpdateVolumeStatusRequest_VolumeStatusUpdate{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Updates[i], o.Updates[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Updates[i], o.Updates[i])
} }
} }
@ -1219,7 +1219,7 @@ func (m *TasksMessage) CopyFrom(src interface{}) {
m.Tasks = make([]*Task, len(o.Tasks)) m.Tasks = make([]*Task, len(o.Tasks))
for i := range m.Tasks { for i := range m.Tasks {
m.Tasks[i] = &Task{} m.Tasks[i] = &Task{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i])
} }
} }
@ -1259,25 +1259,25 @@ func (m *Assignment) CopyFrom(src interface{}) {
v := Assignment_Task{ v := Assignment_Task{
Task: &Task{}, Task: &Task{},
} }
github_com_docker_swarmkit_api_deepcopy.Copy(v.Task, o.GetTask()) github_com_moby_swarmkit_v2_api_deepcopy.Copy(v.Task, o.GetTask())
m.Item = &v m.Item = &v
case *Assignment_Secret: case *Assignment_Secret:
v := Assignment_Secret{ v := Assignment_Secret{
Secret: &Secret{}, Secret: &Secret{},
} }
github_com_docker_swarmkit_api_deepcopy.Copy(v.Secret, o.GetSecret()) github_com_moby_swarmkit_v2_api_deepcopy.Copy(v.Secret, o.GetSecret())
m.Item = &v m.Item = &v
case *Assignment_Config: case *Assignment_Config:
v := Assignment_Config{ v := Assignment_Config{
Config: &Config{}, Config: &Config{},
} }
github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig()) github_com_moby_swarmkit_v2_api_deepcopy.Copy(v.Config, o.GetConfig())
m.Item = &v m.Item = &v
case *Assignment_Volume: case *Assignment_Volume:
v := Assignment_Volume{ v := Assignment_Volume{
Volume: &VolumeAssignment{}, Volume: &VolumeAssignment{},
} }
github_com_docker_swarmkit_api_deepcopy.Copy(v.Volume, o.GetVolume()) github_com_moby_swarmkit_v2_api_deepcopy.Copy(v.Volume, o.GetVolume())
m.Item = &v m.Item = &v
} }
} }
@ -1299,7 +1299,7 @@ func (m *AssignmentChange) CopyFrom(src interface{}) {
*m = *o *m = *o
if o.Assignment != nil { if o.Assignment != nil {
m.Assignment = &Assignment{} m.Assignment = &Assignment{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Assignment, o.Assignment) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Assignment, o.Assignment)
} }
} }
@ -1320,7 +1320,7 @@ func (m *AssignmentsMessage) CopyFrom(src interface{}) {
m.Changes = make([]*AssignmentChange, len(o.Changes)) m.Changes = make([]*AssignmentChange, len(o.Changes))
for i := range m.Changes { for i := range m.Changes {
m.Changes[i] = &AssignmentChange{} m.Changes[i] = &AssignmentChange{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Changes[i], o.Changes[i]) github_com_moby_swarmkit_v2_api_deepcopy.Copy(m.Changes[i], o.Changes[i])
} }
} }

View File

@ -1,7 +1,7 @@
package genericresource package genericresource
import ( import (
"github.com/docker/swarmkit/api" "github.com/moby/swarmkit/v2/api"
) )
// NewSet creates a set object // NewSet creates a set object

View File

@ -6,7 +6,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/docker/swarmkit/api" "github.com/moby/swarmkit/v2/api"
) )
func newParseError(format string, args ...interface{}) error { func newParseError(format string, args ...interface{}) error {

View File

@ -3,7 +3,7 @@ package genericresource
import ( import (
"fmt" "fmt"
"github.com/docker/swarmkit/api" "github.com/moby/swarmkit/v2/api"
) )
// Claim assigns GenericResources to a task by taking them from the // Claim assigns GenericResources to a task by taking them from the

View File

@ -4,7 +4,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/docker/swarmkit/api" "github.com/moby/swarmkit/v2/api"
) )
func discreteToString(d *api.GenericResource_DiscreteResourceSpec) string { func discreteToString(d *api.GenericResource_DiscreteResourceSpec) string {

View File

@ -3,7 +3,7 @@ package genericresource
import ( import (
"fmt" "fmt"
"github.com/docker/swarmkit/api" "github.com/moby/swarmkit/v2/api"
) )
// ValidateTask validates that the task only uses integers // ValidateTask validates that the task only uses integers

Some files were not shown because too many files have changed in this diff Show More