Merge pull request #3606 from thaJeztah/carry_csi_volumes

Proposed Cluster (CSI) Volume Command (rebase)
This commit is contained in:
Sebastiaan van Stijn 2022-05-17 18:43:16 +02:00 committed by GitHub
commit 7f073ab823
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 704 additions and 21 deletions

View File

@ -1,6 +1,7 @@
package formatter package formatter
import ( import (
"fmt"
"strconv" "strconv"
"strings" "strings"
@ -12,10 +13,13 @@ const (
defaultVolumeQuietFormat = "{{.Name}}" defaultVolumeQuietFormat = "{{.Name}}"
defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}" defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}"
idHeader = "ID"
volumeNameHeader = "VOLUME NAME" volumeNameHeader = "VOLUME NAME"
mountpointHeader = "MOUNTPOINT" mountpointHeader = "MOUNTPOINT"
linksHeader = "LINKS" linksHeader = "LINKS"
// Status header ? groupHeader = "GROUP"
availabilityHeader = "AVAILABILITY"
statusHeader = "STATUS"
) )
// NewVolumeFormat returns a format for use with a volume Context // NewVolumeFormat returns a format for use with a volume Context
@ -56,13 +60,17 @@ type volumeContext struct {
func newVolumeContext() *volumeContext { func newVolumeContext() *volumeContext {
volumeCtx := volumeContext{} volumeCtx := volumeContext{}
volumeCtx.Header = SubHeaderContext{ volumeCtx.Header = SubHeaderContext{
"ID": idHeader,
"Name": volumeNameHeader, "Name": volumeNameHeader,
"Group": groupHeader,
"Driver": DriverHeader, "Driver": DriverHeader,
"Scope": ScopeHeader, "Scope": ScopeHeader,
"Availability": availabilityHeader,
"Mountpoint": mountpointHeader, "Mountpoint": mountpointHeader,
"Labels": LabelsHeader, "Labels": LabelsHeader,
"Links": linksHeader, "Links": linksHeader,
"Size": SizeHeader, "Size": SizeHeader,
"Status": statusHeader,
} }
return &volumeCtx return &volumeCtx
} }
@ -119,3 +127,39 @@ func (c *volumeContext) Size() string {
} }
return units.HumanSize(float64(c.v.UsageData.Size)) return units.HumanSize(float64(c.v.UsageData.Size))
} }
func (c *volumeContext) Group() string {
if c.v.ClusterVolume == nil {
return "N/A"
}
return c.v.ClusterVolume.Spec.Group
}
func (c *volumeContext) Availability() string {
if c.v.ClusterVolume == nil {
return "N/A"
}
return string(c.v.ClusterVolume.Spec.Availability)
}
func (c *volumeContext) Status() string {
if c.v.ClusterVolume == nil {
return "N/A"
}
if c.v.ClusterVolume.Info == nil || c.v.ClusterVolume.Info.VolumeID == "" {
return "pending creation"
}
l := len(c.v.ClusterVolume.PublishStatus)
switch l {
case 0:
return "created"
case 1:
return "in use (1 node)"
default:
return fmt.Sprintf("in use (%d nodes)", l)
}
}

View File

@ -148,8 +148,8 @@ func TestVolumeContextWriteJSON(t *testing.T) {
{Driver: "bar", Name: "foobar_bar"}, {Driver: "bar", Name: "foobar_bar"},
} }
expectedJSONs := []map[string]interface{}{ expectedJSONs := []map[string]interface{}{
{"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"}, {"Availability": "N/A", "Driver": "foo", "Group": "N/A", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A", "Status": "N/A"},
{"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"}, {"Availability": "N/A", "Driver": "bar", "Group": "N/A", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A", "Status": "N/A"},
} }
out := bytes.NewBufferString("") out := bytes.NewBufferString("")
err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes) err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes)

View File

@ -21,6 +21,7 @@ func NewVolumeCommand(dockerCli command.Cli) *cobra.Command {
newListCommand(dockerCli), newListCommand(dockerCli),
newRemoveCommand(dockerCli), newRemoveCommand(dockerCli),
NewPruneCommand(dockerCli), NewPruneCommand(dockerCli),
newUpdateCommand(dockerCli),
) )
return cmd return cmd
} }

View File

@ -3,6 +3,7 @@ package volume
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"github.com/docker/cli/cli" "github.com/docker/cli/cli"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
@ -11,6 +12,7 @@ import (
"github.com/docker/docker/api/types/volume" "github.com/docker/docker/api/types/volume"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag"
) )
type createOptions struct { type createOptions struct {
@ -18,12 +20,28 @@ type createOptions struct {
driver string driver string
driverOpts opts.MapOpts driverOpts opts.MapOpts
labels opts.ListOpts labels opts.ListOpts
// options for cluster volumes only
cluster bool
group string
scope string
sharing string
availability string
secrets opts.MapOpts
requiredBytes opts.MemBytes
limitBytes opts.MemBytes
accessType string
requisiteTopology opts.ListOpts
preferredTopology opts.ListOpts
} }
func newCreateCommand(dockerCli command.Cli) *cobra.Command { func newCreateCommand(dockerCli command.Cli) *cobra.Command {
options := createOptions{ options := createOptions{
driverOpts: *opts.NewMapOpts(nil, nil), driverOpts: *opts.NewMapOpts(nil, nil),
labels: opts.NewListOpts(opts.ValidateLabel), labels: opts.NewListOpts(opts.ValidateLabel),
secrets: *opts.NewMapOpts(nil, nil),
requisiteTopology: opts.NewListOpts(nil),
preferredTopology: opts.NewListOpts(nil),
} }
cmd := &cobra.Command{ cmd := &cobra.Command{
@ -37,6 +55,7 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
} }
options.name = args[0] options.name = args[0]
} }
options.cluster = hasClusterVolumeOptionSet(cmd.Flags())
return runCreate(dockerCli, options) return runCreate(dockerCli, options)
}, },
ValidArgsFunction: completion.NoComplete, ValidArgsFunction: completion.NoComplete,
@ -48,16 +67,130 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
flags.VarP(&options.driverOpts, "opt", "o", "Set driver specific options") flags.VarP(&options.driverOpts, "opt", "o", "Set driver specific options")
flags.Var(&options.labels, "label", "Set metadata for a volume") flags.Var(&options.labels, "label", "Set metadata for a volume")
// flags for cluster volumes only
flags.StringVar(&options.group, "group", "", "Cluster Volume group (cluster volumes)")
flags.SetAnnotation("group", "version", []string{"1.42"})
flags.SetAnnotation("group", "swarm", []string{"manager"})
flags.StringVar(&options.scope, "scope", "single", `Cluster Volume access scope ("single"|"multi")`)
flags.SetAnnotation("scope", "version", []string{"1.42"})
flags.SetAnnotation("scope", "swarm", []string{"manager"})
flags.StringVar(&options.sharing, "sharing", "none", `Cluster Volume access sharing ("none"|"readonly"|"onewriter"|"all")`)
flags.SetAnnotation("sharing", "version", []string{"1.42"})
flags.SetAnnotation("sharing", "swarm", []string{"manager"})
flags.StringVar(&options.availability, "availability", "active", `Cluster Volume availability ("active"|"pause"|"drain")`)
flags.SetAnnotation("availability", "version", []string{"1.42"})
flags.SetAnnotation("availability", "swarm", []string{"manager"})
flags.StringVar(&options.accessType, "type", "block", `Cluster Volume access type ("mount"|"block")`)
flags.SetAnnotation("type", "version", []string{"1.42"})
flags.SetAnnotation("type", "swarm", []string{"manager"})
flags.Var(&options.secrets, "secret", "Cluster Volume secrets")
flags.SetAnnotation("secret", "version", []string{"1.42"})
flags.SetAnnotation("secret", "swarm", []string{"manager"})
flags.Var(&options.limitBytes, "limit-bytes", "Minimum size of the Cluster Volume in bytes")
flags.SetAnnotation("limit-bytes", "version", []string{"1.42"})
flags.SetAnnotation("limit-bytes", "swarm", []string{"manager"})
flags.Var(&options.requiredBytes, "required-bytes", "Maximum size of the Cluster Volume in bytes")
flags.SetAnnotation("required-bytes", "version", []string{"1.42"})
flags.SetAnnotation("required-bytes", "swarm", []string{"manager"})
flags.Var(&options.requisiteTopology, "topology-required", "A topology that the Cluster Volume must be accessible from")
flags.SetAnnotation("topology-required", "version", []string{"1.42"})
flags.SetAnnotation("topology-required", "swarm", []string{"manager"})
flags.Var(&options.preferredTopology, "topology-preferred", "A topology that the Cluster Volume would be preferred in")
flags.SetAnnotation("topology-preferred", "version", []string{"1.42"})
flags.SetAnnotation("topology-preferred", "swarm", []string{"manager"})
return cmd return cmd
} }
// hasClusterVolumeOptionSet returns true if any of the cluster-specific
// options are set.
func hasClusterVolumeOptionSet(flags *pflag.FlagSet) bool {
return flags.Changed("group") || flags.Changed("scope") ||
flags.Changed("sharing") || flags.Changed("availability") ||
flags.Changed("type") || flags.Changed("secrets") ||
flags.Changed("limit-bytes") || flags.Changed("required-bytes")
}
func runCreate(dockerCli command.Cli, options createOptions) error { func runCreate(dockerCli command.Cli, options createOptions) error {
vol, err := dockerCli.Client().VolumeCreate(context.Background(), volume.CreateOptions{ volOpts := volume.CreateOptions{
Driver: options.driver, Driver: options.driver,
DriverOpts: options.driverOpts.GetAll(), DriverOpts: options.driverOpts.GetAll(),
Name: options.name, Name: options.name,
Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()), Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()),
}) }
if options.cluster {
volOpts.ClusterVolumeSpec = &volume.ClusterVolumeSpec{
Group: options.group,
AccessMode: &volume.AccessMode{
Scope: volume.Scope(options.scope),
Sharing: volume.SharingMode(options.sharing),
},
Availability: volume.Availability(options.availability),
}
if options.accessType == "mount" {
volOpts.ClusterVolumeSpec.AccessMode.MountVolume = &volume.TypeMount{}
} else if options.accessType == "block" {
volOpts.ClusterVolumeSpec.AccessMode.BlockVolume = &volume.TypeBlock{}
}
vcr := &volume.CapacityRange{}
if r := options.requiredBytes.Value(); r >= 0 {
vcr.RequiredBytes = r
}
if l := options.limitBytes.Value(); l >= 0 {
vcr.LimitBytes = l
}
volOpts.ClusterVolumeSpec.CapacityRange = vcr
for key, secret := range options.secrets.GetAll() {
volOpts.ClusterVolumeSpec.Secrets = append(
volOpts.ClusterVolumeSpec.Secrets,
volume.Secret{
Key: key,
Secret: secret,
},
)
}
// TODO(dperny): ignore if no topology specified
topology := &volume.TopologyRequirement{}
for _, top := range options.requisiteTopology.GetAll() {
// each topology takes the form segment=value,segment=value
// comma-separated list of equal separated maps
segments := map[string]string{}
for _, segment := range strings.Split(top, ",") {
parts := strings.SplitN(segment, "=", 2)
// TODO(dperny): validate topology syntax
segments[parts[0]] = parts[1]
}
topology.Requisite = append(
topology.Requisite,
volume.Topology{Segments: segments},
)
}
for _, top := range options.preferredTopology.GetAll() {
// each topology takes the form segment=value,segment=value
// comma-separated list of equal separated maps
segments := map[string]string{}
for _, segment := range strings.Split(top, ",") {
parts := strings.SplitN(segment, "=", 2)
// TODO(dperny): validate topology syntax
segments[parts[0]] = parts[1]
}
topology.Preferred = append(
topology.Preferred,
volume.Topology{Segments: segments},
)
}
volOpts.ClusterVolumeSpec.AccessibilityRequirements = topology
}
vol, err := dockerCli.Client().VolumeCreate(context.Background(), volOpts)
if err != nil { if err != nil {
return err return err
} }

View File

@ -123,3 +123,100 @@ func TestVolumeCreateWithFlags(t *testing.T) {
assert.NilError(t, cmd.Execute()) assert.NilError(t, cmd.Execute())
assert.Check(t, is.Equal(name, strings.TrimSpace(cli.OutBuffer().String()))) assert.Check(t, is.Equal(name, strings.TrimSpace(cli.OutBuffer().String())))
} }
func TestVolumeCreateCluster(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
volumeCreateFunc: func(body volume.CreateOptions) (volume.Volume, error) {
if body.Driver == "csi" && body.ClusterVolumeSpec == nil {
return volume.Volume{}, errors.New("expected ClusterVolumeSpec, but none present")
}
if body.Driver == "notcsi" && body.ClusterVolumeSpec != nil {
return volume.Volume{}, errors.New("expected no ClusterVolumeSpec, but present")
}
return volume.Volume{}, nil
},
})
cmd := newCreateCommand(cli)
cmd.Flags().Set("type", "block")
cmd.Flags().Set("group", "gronp")
cmd.Flags().Set("driver", "csi")
cmd.SetArgs([]string{"name"})
assert.NilError(t, cmd.Execute())
cmd = newCreateCommand(cli)
cmd.Flags().Set("driver", "notcsi")
cmd.SetArgs([]string{"name"})
assert.NilError(t, cmd.Execute())
}
func TestVolumeCreateClusterOpts(t *testing.T) {
expectedBody := volume.CreateOptions{
Name: "name",
Driver: "csi",
DriverOpts: map[string]string{},
Labels: map[string]string{},
ClusterVolumeSpec: &volume.ClusterVolumeSpec{
Group: "gronp",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeMultiNode,
Sharing: volume.SharingOneWriter,
// TODO(dperny): support mount options
MountVolume: &volume.TypeMount{},
},
// TODO(dperny): topology requirements
CapacityRange: &volume.CapacityRange{
RequiredBytes: 1234,
LimitBytes: 567890,
},
Secrets: []volume.Secret{
{Key: "key1", Secret: "secret1"},
{Key: "key2", Secret: "secret2"},
},
Availability: volume.AvailabilityActive,
AccessibilityRequirements: &volume.TopologyRequirement{
Requisite: []volume.Topology{
{Segments: map[string]string{"region": "R1", "zone": "Z1"}},
{Segments: map[string]string{"region": "R1", "zone": "Z2"}},
{Segments: map[string]string{"region": "R1", "zone": "Z3"}},
},
Preferred: []volume.Topology{
{Segments: map[string]string{"region": "R1", "zone": "Z2"}},
{Segments: map[string]string{"region": "R1", "zone": "Z3"}},
},
},
},
}
cli := test.NewFakeCli(&fakeClient{
volumeCreateFunc: func(body volume.CreateOptions) (volume.Volume, error) {
assert.DeepEqual(t, body, expectedBody)
return volume.Volume{}, nil
},
})
cmd := newCreateCommand(cli)
cmd.SetArgs([]string{"name"})
cmd.Flags().Set("driver", "csi")
cmd.Flags().Set("group", "gronp")
cmd.Flags().Set("scope", "multi")
cmd.Flags().Set("sharing", "onewriter")
cmd.Flags().Set("type", "mount")
cmd.Flags().Set("sharing", "onewriter")
cmd.Flags().Set("required-bytes", "1234")
cmd.Flags().Set("limit-bytes", "567890")
cmd.Flags().Set("secret", "key1=secret1")
cmd.Flags().Set("secret", "key2=secret2")
cmd.Flags().Set("topology-required", "region=R1,zone=Z1")
cmd.Flags().Set("topology-required", "region=R1,zone=Z2")
cmd.Flags().Set("topology-required", "region=R1,zone=Z3")
cmd.Flags().Set("topology-preferred", "region=R1,zone=Z2")
cmd.Flags().Set("topology-preferred", "region=R1,zone=Z3")
cmd.Execute()
}

View File

@ -7,6 +7,7 @@ import (
"github.com/docker/cli/internal/test" "github.com/docker/cli/internal/test"
. "github.com/docker/cli/internal/test/builders" // Import builders to get the builder function as package function . "github.com/docker/cli/internal/test/builders" // Import builders to get the builder function as package function
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume" "github.com/docker/docker/api/types/volume"
"github.com/pkg/errors" "github.com/pkg/errors"
"gotest.tools/v3/assert" "gotest.tools/v3/assert"
@ -138,3 +139,108 @@ func TestVolumeInspectWithFormat(t *testing.T) {
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-inspect-with-format.%s.golden", tc.name)) golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-inspect-with-format.%s.golden", tc.name))
} }
} }
func TestVolumeInspectCluster(t *testing.T) {
volumeInspectFunc := func(volumeID string) (volume.Volume, error) {
return volume.Volume{
Name: "clustervolume",
Driver: "clusterdriver1",
Scope: "global",
ClusterVolume: &volume.ClusterVolume{
ID: "fooid",
Meta: swarm.Meta{
Version: swarm.Version{
Index: uint64(123),
},
},
Spec: volume.ClusterVolumeSpec{
Group: "group0",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeMultiNode,
Sharing: volume.SharingAll,
BlockVolume: &volume.TypeBlock{},
},
AccessibilityRequirements: &volume.TopologyRequirement{
Requisite: []volume.Topology{
{
Segments: map[string]string{
"region": "R1",
"zone": "Z1",
},
}, {
Segments: map[string]string{
"region": "R1",
"zone": "Z2",
},
},
},
Preferred: []volume.Topology{
{
Segments: map[string]string{
"region": "R1",
"zone": "Z1",
},
},
},
},
CapacityRange: &volume.CapacityRange{
RequiredBytes: 1000,
LimitBytes: 1000000,
},
Secrets: []volume.Secret{
{
Key: "secretkey1",
Secret: "mysecret1",
}, {
Key: "secretkey2",
Secret: "mysecret2",
},
},
Availability: volume.AvailabilityActive,
},
Info: &volume.Info{
CapacityBytes: 10000,
VolumeContext: map[string]string{
"the": "context",
"has": "entries",
},
VolumeID: "clusterdriver1volume1id",
AccessibleTopology: []volume.Topology{
{
Segments: map[string]string{
"region": "R1",
"zone": "Z1",
},
},
},
},
PublishStatus: []*volume.PublishStatus{
{
NodeID: "node1",
State: volume.StatePublished,
PublishContext: map[string]string{
"some": "data",
"yup": "data",
},
}, {
NodeID: "node2",
State: volume.StatePendingNodeUnpublish,
PublishContext: map[string]string{
"some": "more",
"publish": "context",
},
},
},
},
}, nil
}
cli := test.NewFakeCli(&fakeClient{
volumeInspectFunc: volumeInspectFunc,
})
cmd := newInspectCommand(cli)
cmd.SetArgs([]string{"clustervolume"})
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "volume-inspect-cluster.golden")
}

View File

@ -14,9 +14,14 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
const (
clusterTableFormat = "table {{.Name}}\t{{.Group}}\t{{.Driver}}\t{{.Availability}}\t{{.Status}}"
)
type listOptions struct { type listOptions struct {
quiet bool quiet bool
format string format string
cluster bool
filter opts.FilterOpt filter opts.FilterOpt
} }
@ -38,6 +43,9 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display volume names") flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display volume names")
flags.StringVar(&options.format, "format", "", flagsHelper.FormatHelp) flags.StringVar(&options.format, "format", "", flagsHelper.FormatHelp)
flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')") flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')")
flags.BoolVar(&options.cluster, "cluster", false, "Display only cluster volumes, and use cluster volume list formatting")
flags.SetAnnotation("cluster", "version", []string{"1.42"})
flags.SetAnnotation("cluster", "swarm", []string{"manager"})
return cmd return cmd
} }
@ -50,12 +58,30 @@ func runList(dockerCli command.Cli, options listOptions) error {
} }
format := options.format format := options.format
if len(format) == 0 { if len(format) == 0 && !options.cluster {
if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !options.quiet { if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !options.quiet {
format = dockerCli.ConfigFile().VolumesFormat format = dockerCli.ConfigFile().VolumesFormat
} else { } else {
format = formatter.TableFormatKey format = formatter.TableFormatKey
} }
} else if options.cluster {
// TODO(dperny): write server-side filter for cluster volumes. For this
// proof of concept, we'll just filter out non-cluster volumes here
// trick for filtering in place
n := 0
for _, volume := range volumes.Volumes {
if volume.ClusterVolume != nil {
volumes.Volumes[n] = volume
n++
}
}
volumes.Volumes = volumes.Volumes[:n]
if !options.quiet {
format = clusterTableFormat
} else {
format = formatter.TableFormatKey
}
} }
sort.Slice(volumes.Volumes, func(i, j int) bool { sort.Slice(volumes.Volumes, func(i, j int) bool {

View File

@ -125,3 +125,108 @@ func TestVolumeListSortOrder(t *testing.T) {
assert.NilError(t, cmd.Execute()) assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "volume-list-sort.golden") golden.Assert(t, cli.OutBuffer().String(), "volume-list-sort.golden")
} }
func TestClusterVolumeList(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
volumeListFunc: func(filter filters.Args) (volume.ListResponse, error) {
return volume.ListResponse{
Volumes: []*volume.Volume{
{
Name: "volume1",
Scope: "global",
Driver: "driver1",
ClusterVolume: &volume.ClusterVolume{
Spec: volume.ClusterVolumeSpec{
Group: "group1",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeSingleNode,
Sharing: volume.SharingOneWriter,
MountVolume: &volume.TypeMount{},
},
Availability: volume.AvailabilityActive,
},
},
}, {
Name: "volume2",
Scope: "global",
Driver: "driver1",
ClusterVolume: &volume.ClusterVolume{
Spec: volume.ClusterVolumeSpec{
Group: "group1",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeSingleNode,
Sharing: volume.SharingOneWriter,
MountVolume: &volume.TypeMount{},
},
Availability: volume.AvailabilityPause,
},
Info: &volume.Info{
CapacityBytes: 100000000,
VolumeID: "driver1vol2",
},
},
}, {
Name: "volume3",
Scope: "global",
Driver: "driver2",
ClusterVolume: &volume.ClusterVolume{
Spec: volume.ClusterVolumeSpec{
Group: "group2",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeMultiNode,
Sharing: volume.SharingAll,
MountVolume: &volume.TypeMount{},
},
Availability: volume.AvailabilityActive,
},
PublishStatus: []*volume.PublishStatus{
{
NodeID: "nodeid1",
State: volume.StatePublished,
},
},
Info: &volume.Info{
CapacityBytes: 100000000,
VolumeID: "driver1vol3",
},
},
}, {
Name: "volume4",
Scope: "global",
Driver: "driver2",
ClusterVolume: &volume.ClusterVolume{
Spec: volume.ClusterVolumeSpec{
Group: "group2",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeMultiNode,
Sharing: volume.SharingAll,
MountVolume: &volume.TypeMount{},
},
Availability: volume.AvailabilityActive,
},
PublishStatus: []*volume.PublishStatus{
{
NodeID: "nodeid1",
State: volume.StatePublished,
}, {
NodeID: "nodeid2",
State: volume.StatePublished,
},
},
Info: &volume.Info{
CapacityBytes: 100000000,
VolumeID: "driver1vol4",
},
},
},
Volume(VolumeName("volume-local-1")),
},
}, nil
},
})
cmd := newListCommand(cli)
cmd.Flags().Set("cluster", "true")
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "volume-cluster-volume-list.golden")
}

View File

@ -0,0 +1,5 @@
VOLUME NAME GROUP DRIVER AVAILABILITY STATUS
volume1 group1 driver1 active pending creation
volume2 group1 driver1 pause created
volume3 group2 driver2 active in use (1 node)
volume4 group2 driver2 active in use (2 nodes)

View File

@ -0,0 +1,99 @@
[
{
"ClusterVolume": {
"ID": "fooid",
"Version": {
"Index": 123
},
"CreatedAt": "0001-01-01T00:00:00Z",
"UpdatedAt": "0001-01-01T00:00:00Z",
"Spec": {
"Group": "group0",
"AccessMode": {
"Scope": "multi",
"Sharing": "all",
"BlockVolume": {}
},
"AccessibilityRequirements": {
"Requisite": [
{
"Segments": {
"region": "R1",
"zone": "Z1"
}
},
{
"Segments": {
"region": "R1",
"zone": "Z2"
}
}
],
"Preferred": [
{
"Segments": {
"region": "R1",
"zone": "Z1"
}
}
]
},
"CapacityRange": {
"RequiredBytes": 1000,
"LimitBytes": 1000000
},
"Secrets": [
{
"Key": "secretkey1",
"Secret": "mysecret1"
},
{
"Key": "secretkey2",
"Secret": "mysecret2"
}
],
"Availability": "active"
},
"PublishStatus": [
{
"NodeID": "node1",
"State": "published",
"PublishContext": {
"some": "data",
"yup": "data"
}
},
{
"NodeID": "node2",
"State": "pending-node-unpublish",
"PublishContext": {
"publish": "context",
"some": "more"
}
}
],
"Info": {
"CapacityBytes": 10000,
"VolumeContext": {
"has": "entries",
"the": "context"
},
"VolumeID": "clusterdriver1volume1id",
"AccessibleTopology": [
{
"Segments": {
"region": "R1",
"zone": "Z1"
}
}
]
}
},
"Driver": "clusterdriver1",
"Labels": null,
"Mountpoint": "",
"Name": "clustervolume",
"Options": null,
"Scope": "global"
}
]

View File

@ -0,0 +1,67 @@
package volume
import (
"context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/completion"
"github.com/docker/docker/api/types/volume"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
var availability string
cmd := &cobra.Command{
Use: "update [OPTIONS] [VOLUME]",
Short: "Update a volume (cluster volumes only)",
Args: cli.RequiresMaxArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runUpdate(dockerCli, args[0], availability, cmd.Flags())
},
Annotations: map[string]string{
"version": "1.42",
"swarm": "manager",
},
ValidArgsFunction: completion.VolumeNames(dockerCli),
}
flags := cmd.Flags()
flags.StringVar(&availability, "availability", "active", `Cluster Volume availability ("active"|"pause"|"drain")`)
flags.SetAnnotation("availability", "version", []string{"1.42"})
flags.SetAnnotation("availability", "swarm", []string{"manager"})
return cmd
}
func runUpdate(dockerCli command.Cli, volumeID, availability string, flags *pflag.FlagSet) error {
// TODO(dperny): For this earliest version, the only thing that can be
// updated is Availability, which is necessary because to delete a cluster
// volume, the availability must first be set to "drain"
apiClient := dockerCli.Client()
ctx := context.Background()
vol, _, err := apiClient.VolumeInspectWithRaw(ctx, volumeID)
if err != nil {
return err
}
if vol.ClusterVolume == nil {
return errors.New("Can only update cluster volumes")
}
if flags.Changed("availability") {
vol.ClusterVolume.Spec.Availability = volume.Availability(availability)
}
return apiClient.VolumeUpdate(
ctx, vol.ClusterVolume.ID, vol.ClusterVolume.Version,
volume.UpdateOptions{
Spec: &vol.ClusterVolume.Spec,
},
)
}