Add cluster volume support

- Write test for cluster volumes
- Add inspect test, add update command
- Add cluster volume opts to create
- Add requisite and preferred topology flags
- volume: move cluster bool in opts

Signed-off-by: Drew Erny <derny@mirantis.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Drew Erny 2021-02-23 08:23:53 -06:00 committed by Sebastiaan van Stijn
parent d0df532a25
commit 3455580ebc
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
11 changed files with 676 additions and 21 deletions

View File

@ -1,6 +1,7 @@
package formatter
import (
"fmt"
"strconv"
"strings"
@ -12,10 +13,13 @@ const (
defaultVolumeQuietFormat = "{{.Name}}"
defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}"
idHeader = "ID"
volumeNameHeader = "VOLUME NAME"
mountpointHeader = "MOUNTPOINT"
linksHeader = "LINKS"
// Status header ?
groupHeader = "GROUP"
availabilityHeader = "AVAILABILITY"
statusHeader = "STATUS"
)
// NewVolumeFormat returns a format for use with a volume Context
@ -56,13 +60,17 @@ type volumeContext struct {
func newVolumeContext() *volumeContext {
volumeCtx := volumeContext{}
volumeCtx.Header = SubHeaderContext{
"ID": idHeader,
"Name": volumeNameHeader,
"Group": groupHeader,
"Driver": DriverHeader,
"Scope": ScopeHeader,
"Availability": availabilityHeader,
"Mountpoint": mountpointHeader,
"Labels": LabelsHeader,
"Links": linksHeader,
"Size": SizeHeader,
"Status": statusHeader,
}
return &volumeCtx
}
@ -119,3 +127,39 @@ func (c *volumeContext) Size() string {
}
return units.HumanSize(float64(c.v.UsageData.Size))
}
func (c *volumeContext) Group() string {
if c.v.ClusterVolume == nil {
return "N/A"
}
return c.v.ClusterVolume.Spec.Group
}
func (c *volumeContext) Availability() string {
if c.v.ClusterVolume == nil {
return "N/A"
}
return string(c.v.ClusterVolume.Spec.Availability)
}
func (c *volumeContext) Status() string {
if c.v.ClusterVolume == nil {
return "N/A"
}
if c.v.ClusterVolume.Info == nil || c.v.ClusterVolume.Info.VolumeID == "" {
return "pending creation"
}
l := len(c.v.ClusterVolume.PublishStatus)
switch l {
case 0:
return "created"
case 1:
return "in use (1 node)"
default:
return fmt.Sprintf("in use (%d nodes)", l)
}
}

View File

@ -148,8 +148,8 @@ func TestVolumeContextWriteJSON(t *testing.T) {
{Driver: "bar", Name: "foobar_bar"},
}
expectedJSONs := []map[string]interface{}{
{"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"},
{"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"},
{"Availability": "N/A", "Driver": "foo", "Group": "N/A", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A", "Status": "N/A"},
{"Availability": "N/A", "Driver": "bar", "Group": "N/A", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A", "Status": "N/A"},
}
out := bytes.NewBufferString("")
err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes)

View File

@ -21,6 +21,7 @@ func NewVolumeCommand(dockerCli command.Cli) *cobra.Command {
newListCommand(dockerCli),
newRemoveCommand(dockerCli),
NewPruneCommand(dockerCli),
newUpdateCommand(dockerCli),
)
return cmd
}

View File

@ -3,6 +3,7 @@ package volume
import (
"context"
"fmt"
"strings"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
@ -11,6 +12,7 @@ import (
"github.com/docker/docker/api/types/volume"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
type createOptions struct {
@ -18,12 +20,28 @@ type createOptions struct {
driver string
driverOpts opts.MapOpts
labels opts.ListOpts
// options for cluster volumes only
cluster bool
group string
scope string
sharing string
availability string
secrets opts.MapOpts
requiredBytes opts.MemBytes
limitBytes opts.MemBytes
accessType string
requisiteTopology opts.ListOpts
preferredTopology opts.ListOpts
}
func newCreateCommand(dockerCli command.Cli) *cobra.Command {
options := createOptions{
driverOpts: *opts.NewMapOpts(nil, nil),
labels: opts.NewListOpts(opts.ValidateLabel),
secrets: *opts.NewMapOpts(nil, nil),
requisiteTopology: opts.NewListOpts(nil),
preferredTopology: opts.NewListOpts(nil),
}
cmd := &cobra.Command{
@ -37,6 +55,7 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
}
options.name = args[0]
}
options.cluster = hasClusterVolumeOptionSet(cmd.Flags())
return runCreate(dockerCli, options)
},
ValidArgsFunction: completion.NoComplete,
@ -48,16 +67,110 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
flags.VarP(&options.driverOpts, "opt", "o", "Set driver specific options")
flags.Var(&options.labels, "label", "Set metadata for a volume")
// flags for cluster volumes only
flags.StringVar(&options.group, "group", "", "Cluster Volume group (cluster volumes)")
flags.StringVar(&options.scope, "scope", "single", `Cluster Volume access scope ("single"|"multi")`)
flags.StringVar(&options.sharing, "sharing", "none", `Cluster Volume access sharing ("none"|"readonly"|"onewriter"|"all")`)
flags.StringVar(&options.availability, "availability", "active", `Cluster Volume availability ("active"|"pause"|"drain")`)
flags.StringVar(&options.accessType, "type", "block", `Cluster Volume access type ("mount"|"block")`)
flags.Var(&options.secrets, "secret", "Cluster Volume secrets")
flags.Var(&options.limitBytes, "limit-bytes", "Minimum size of the Cluster Volume in bytes")
flags.Var(&options.requiredBytes, "required-bytes", "Maximum size of the Cluster Volume in bytes")
flags.Var(&options.requisiteTopology, "topology-required", "A topology that the Cluster Volume must be accessible from")
flags.Var(&options.preferredTopology, "topology-preferred", "A topology that the Cluster Volume would be preferred in")
return cmd
}
// hasClusterVolumeOptionSet returns true if any of the cluster-specific
// options are set.
func hasClusterVolumeOptionSet(flags *pflag.FlagSet) bool {
return flags.Changed("group") || flags.Changed("scope") ||
flags.Changed("sharing") || flags.Changed("availability") ||
flags.Changed("type") || flags.Changed("secrets") ||
flags.Changed("limit-bytes") || flags.Changed("required-bytes")
}
func runCreate(dockerCli command.Cli, options createOptions) error {
vol, err := dockerCli.Client().VolumeCreate(context.Background(), volume.CreateOptions{
volOpts := volume.CreateOptions{
Driver: options.driver,
DriverOpts: options.driverOpts.GetAll(),
Name: options.name,
Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()),
})
}
if options.cluster {
volOpts.ClusterVolumeSpec = &volume.ClusterVolumeSpec{
Group: options.group,
AccessMode: &volume.AccessMode{
Scope: volume.Scope(options.scope),
Sharing: volume.SharingMode(options.sharing),
},
Availability: volume.Availability(options.availability),
}
if options.accessType == "mount" {
volOpts.ClusterVolumeSpec.AccessMode.MountVolume = &volume.TypeMount{}
} else if options.accessType == "block" {
volOpts.ClusterVolumeSpec.AccessMode.BlockVolume = &volume.TypeBlock{}
}
vcr := &volume.CapacityRange{}
if r := options.requiredBytes.Value(); r >= 0 {
vcr.RequiredBytes = r
}
if l := options.limitBytes.Value(); l >= 0 {
vcr.LimitBytes = l
}
volOpts.ClusterVolumeSpec.CapacityRange = vcr
for key, secret := range options.secrets.GetAll() {
volOpts.ClusterVolumeSpec.Secrets = append(
volOpts.ClusterVolumeSpec.Secrets,
volume.Secret{
Key: key,
Secret: secret,
},
)
}
// TODO(dperny): ignore if no topology specified
topology := &volume.TopologyRequirement{}
for _, top := range options.requisiteTopology.GetAll() {
// each topology takes the form segment=value,segment=value
// comma-separated list of equal separated maps
segments := map[string]string{}
for _, segment := range strings.Split(top, ",") {
parts := strings.SplitN(segment, "=", 2)
// TODO(dperny): validate topology syntax
segments[parts[0]] = parts[1]
}
topology.Requisite = append(
topology.Requisite,
volume.Topology{Segments: segments},
)
}
for _, top := range options.preferredTopology.GetAll() {
// each topology takes the form segment=value,segment=value
// comma-separated list of equal separated maps
segments := map[string]string{}
for _, segment := range strings.Split(top, ",") {
parts := strings.SplitN(segment, "=", 2)
// TODO(dperny): validate topology syntax
segments[parts[0]] = parts[1]
}
topology.Preferred = append(
topology.Preferred,
volume.Topology{Segments: segments},
)
}
volOpts.ClusterVolumeSpec.AccessibilityRequirements = topology
}
vol, err := dockerCli.Client().VolumeCreate(context.Background(), volOpts)
if err != nil {
return err
}

View File

@ -123,3 +123,100 @@ func TestVolumeCreateWithFlags(t *testing.T) {
assert.NilError(t, cmd.Execute())
assert.Check(t, is.Equal(name, strings.TrimSpace(cli.OutBuffer().String())))
}
func TestVolumeCreateCluster(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
volumeCreateFunc: func(body volume.CreateOptions) (volume.Volume, error) {
if body.Driver == "csi" && body.ClusterVolumeSpec == nil {
return volume.Volume{}, errors.New("expected ClusterVolumeSpec, but none present")
}
if body.Driver == "notcsi" && body.ClusterVolumeSpec != nil {
return volume.Volume{}, errors.New("expected no ClusterVolumeSpec, but present")
}
return volume.Volume{}, nil
},
})
cmd := newCreateCommand(cli)
cmd.Flags().Set("type", "block")
cmd.Flags().Set("group", "gronp")
cmd.Flags().Set("driver", "csi")
cmd.SetArgs([]string{"name"})
assert.NilError(t, cmd.Execute())
cmd = newCreateCommand(cli)
cmd.Flags().Set("driver", "notcsi")
cmd.SetArgs([]string{"name"})
assert.NilError(t, cmd.Execute())
}
func TestVolumeCreateClusterOpts(t *testing.T) {
expectedBody := volume.CreateOptions{
Name: "name",
Driver: "csi",
DriverOpts: map[string]string{},
Labels: map[string]string{},
ClusterVolumeSpec: &volume.ClusterVolumeSpec{
Group: "gronp",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeMultiNode,
Sharing: volume.SharingOneWriter,
// TODO(dperny): support mount options
MountVolume: &volume.TypeMount{},
},
// TODO(dperny): topology requirements
CapacityRange: &volume.CapacityRange{
RequiredBytes: 1234,
LimitBytes: 567890,
},
Secrets: []volume.Secret{
{Key: "key1", Secret: "secret1"},
{Key: "key2", Secret: "secret2"},
},
Availability: volume.AvailabilityActive,
AccessibilityRequirements: &volume.TopologyRequirement{
Requisite: []volume.Topology{
{Segments: map[string]string{"region": "R1", "zone": "Z1"}},
{Segments: map[string]string{"region": "R1", "zone": "Z2"}},
{Segments: map[string]string{"region": "R1", "zone": "Z3"}},
},
Preferred: []volume.Topology{
{Segments: map[string]string{"region": "R1", "zone": "Z2"}},
{Segments: map[string]string{"region": "R1", "zone": "Z3"}},
},
},
},
}
cli := test.NewFakeCli(&fakeClient{
volumeCreateFunc: func(body volume.CreateOptions) (volume.Volume, error) {
assert.DeepEqual(t, body, expectedBody)
return volume.Volume{}, nil
},
})
cmd := newCreateCommand(cli)
cmd.SetArgs([]string{"name"})
cmd.Flags().Set("driver", "csi")
cmd.Flags().Set("group", "gronp")
cmd.Flags().Set("scope", "multi")
cmd.Flags().Set("sharing", "onewriter")
cmd.Flags().Set("type", "mount")
cmd.Flags().Set("sharing", "onewriter")
cmd.Flags().Set("required-bytes", "1234")
cmd.Flags().Set("limit-bytes", "567890")
cmd.Flags().Set("secret", "key1=secret1")
cmd.Flags().Set("secret", "key2=secret2")
cmd.Flags().Set("topology-required", "region=R1,zone=Z1")
cmd.Flags().Set("topology-required", "region=R1,zone=Z2")
cmd.Flags().Set("topology-required", "region=R1,zone=Z3")
cmd.Flags().Set("topology-preferred", "region=R1,zone=Z2")
cmd.Flags().Set("topology-preferred", "region=R1,zone=Z3")
cmd.Execute()
}

View File

@ -7,6 +7,7 @@ import (
"github.com/docker/cli/internal/test"
. "github.com/docker/cli/internal/test/builders" // Import builders to get the builder function as package function
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume"
"github.com/pkg/errors"
"gotest.tools/v3/assert"
@ -138,3 +139,108 @@ func TestVolumeInspectWithFormat(t *testing.T) {
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-inspect-with-format.%s.golden", tc.name))
}
}
func TestVolumeInspectCluster(t *testing.T) {
volumeInspectFunc := func(volumeID string) (volume.Volume, error) {
return volume.Volume{
Name: "clustervolume",
Driver: "clusterdriver1",
Scope: "global",
ClusterVolume: &volume.ClusterVolume{
ID: "fooid",
Meta: swarm.Meta{
Version: swarm.Version{
Index: uint64(123),
},
},
Spec: volume.ClusterVolumeSpec{
Group: "group0",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeMultiNode,
Sharing: volume.SharingAll,
BlockVolume: &volume.TypeBlock{},
},
AccessibilityRequirements: &volume.TopologyRequirement{
Requisite: []volume.Topology{
{
Segments: map[string]string{
"region": "R1",
"zone": "Z1",
},
}, {
Segments: map[string]string{
"region": "R1",
"zone": "Z2",
},
},
},
Preferred: []volume.Topology{
{
Segments: map[string]string{
"region": "R1",
"zone": "Z1",
},
},
},
},
CapacityRange: &volume.CapacityRange{
RequiredBytes: 1000,
LimitBytes: 1000000,
},
Secrets: []volume.Secret{
{
Key: "secretkey1",
Secret: "mysecret1",
}, {
Key: "secretkey2",
Secret: "mysecret2",
},
},
Availability: volume.AvailabilityActive,
},
Info: &volume.Info{
CapacityBytes: 10000,
VolumeContext: map[string]string{
"the": "context",
"has": "entries",
},
VolumeID: "clusterdriver1volume1id",
AccessibleTopology: []volume.Topology{
{
Segments: map[string]string{
"region": "R1",
"zone": "Z1",
},
},
},
},
PublishStatus: []*volume.PublishStatus{
{
NodeID: "node1",
State: volume.StatePublished,
PublishContext: map[string]string{
"some": "data",
"yup": "data",
},
}, {
NodeID: "node2",
State: volume.StatePendingNodeUnpublish,
PublishContext: map[string]string{
"some": "more",
"publish": "context",
},
},
},
},
}, nil
}
cli := test.NewFakeCli(&fakeClient{
volumeInspectFunc: volumeInspectFunc,
})
cmd := newInspectCommand(cli)
cmd.SetArgs([]string{"clustervolume"})
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "volume-inspect-cluster.golden")
}

View File

@ -14,9 +14,14 @@ import (
"github.com/spf13/cobra"
)
const (
clusterTableFormat = "table {{.Name}}\t{{.Group}}\t{{.Driver}}\t{{.Availability}}\t{{.Status}}"
)
type listOptions struct {
quiet bool
format string
cluster bool
filter opts.FilterOpt
}
@ -38,6 +43,7 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display volume names")
flags.StringVar(&options.format, "format", "", flagsHelper.FormatHelp)
flags.VarP(&options.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')")
flags.BoolVar(&options.cluster, "cluster", false, "Display only cluster volumes, and use cluster volume list formatting")
return cmd
}
@ -50,12 +56,30 @@ func runList(dockerCli command.Cli, options listOptions) error {
}
format := options.format
if len(format) == 0 {
if len(format) == 0 && !options.cluster {
if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !options.quiet {
format = dockerCli.ConfigFile().VolumesFormat
} else {
format = formatter.TableFormatKey
}
} else if options.cluster {
// TODO(dperny): write server-side filter for cluster volumes. For this
// proof of concept, we'll just filter out non-cluster volumes here
// trick for filtering in place
n := 0
for _, volume := range volumes.Volumes {
if volume.ClusterVolume != nil {
volumes.Volumes[n] = volume
n++
}
}
volumes.Volumes = volumes.Volumes[:n]
if !options.quiet {
format = clusterTableFormat
} else {
format = formatter.TableFormatKey
}
}
sort.Slice(volumes.Volumes, func(i, j int) bool {

View File

@ -125,3 +125,108 @@ func TestVolumeListSortOrder(t *testing.T) {
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "volume-list-sort.golden")
}
func TestClusterVolumeList(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
volumeListFunc: func(filter filters.Args) (volume.ListResponse, error) {
return volume.ListResponse{
Volumes: []*volume.Volume{
{
Name: "volume1",
Scope: "global",
Driver: "driver1",
ClusterVolume: &volume.ClusterVolume{
Spec: volume.ClusterVolumeSpec{
Group: "group1",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeSingleNode,
Sharing: volume.SharingOneWriter,
MountVolume: &volume.TypeMount{},
},
Availability: volume.AvailabilityActive,
},
},
}, {
Name: "volume2",
Scope: "global",
Driver: "driver1",
ClusterVolume: &volume.ClusterVolume{
Spec: volume.ClusterVolumeSpec{
Group: "group1",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeSingleNode,
Sharing: volume.SharingOneWriter,
MountVolume: &volume.TypeMount{},
},
Availability: volume.AvailabilityPause,
},
Info: &volume.Info{
CapacityBytes: 100000000,
VolumeID: "driver1vol2",
},
},
}, {
Name: "volume3",
Scope: "global",
Driver: "driver2",
ClusterVolume: &volume.ClusterVolume{
Spec: volume.ClusterVolumeSpec{
Group: "group2",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeMultiNode,
Sharing: volume.SharingAll,
MountVolume: &volume.TypeMount{},
},
Availability: volume.AvailabilityActive,
},
PublishStatus: []*volume.PublishStatus{
{
NodeID: "nodeid1",
State: volume.StatePublished,
},
},
Info: &volume.Info{
CapacityBytes: 100000000,
VolumeID: "driver1vol3",
},
},
}, {
Name: "volume4",
Scope: "global",
Driver: "driver2",
ClusterVolume: &volume.ClusterVolume{
Spec: volume.ClusterVolumeSpec{
Group: "group2",
AccessMode: &volume.AccessMode{
Scope: volume.ScopeMultiNode,
Sharing: volume.SharingAll,
MountVolume: &volume.TypeMount{},
},
Availability: volume.AvailabilityActive,
},
PublishStatus: []*volume.PublishStatus{
{
NodeID: "nodeid1",
State: volume.StatePublished,
}, {
NodeID: "nodeid2",
State: volume.StatePublished,
},
},
Info: &volume.Info{
CapacityBytes: 100000000,
VolumeID: "driver1vol4",
},
},
},
Volume(VolumeName("volume-local-1")),
},
}, nil
},
})
cmd := newListCommand(cli)
cmd.Flags().Set("cluster", "true")
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "volume-cluster-volume-list.golden")
}

View File

@ -0,0 +1,5 @@
VOLUME NAME GROUP DRIVER AVAILABILITY STATUS
volume1 group1 driver1 active pending creation
volume2 group1 driver1 pause created
volume3 group2 driver2 active in use (1 node)
volume4 group2 driver2 active in use (2 nodes)

View File

@ -0,0 +1,99 @@
[
{
"ClusterVolume": {
"ID": "fooid",
"Version": {
"Index": 123
},
"CreatedAt": "0001-01-01T00:00:00Z",
"UpdatedAt": "0001-01-01T00:00:00Z",
"Spec": {
"Group": "group0",
"AccessMode": {
"Scope": "multi",
"Sharing": "all",
"BlockVolume": {}
},
"AccessibilityRequirements": {
"Requisite": [
{
"Segments": {
"region": "R1",
"zone": "Z1"
}
},
{
"Segments": {
"region": "R1",
"zone": "Z2"
}
}
],
"Preferred": [
{
"Segments": {
"region": "R1",
"zone": "Z1"
}
}
]
},
"CapacityRange": {
"RequiredBytes": 1000,
"LimitBytes": 1000000
},
"Secrets": [
{
"Key": "secretkey1",
"Secret": "mysecret1"
},
{
"Key": "secretkey2",
"Secret": "mysecret2"
}
],
"Availability": "active"
},
"PublishStatus": [
{
"NodeID": "node1",
"State": "published",
"PublishContext": {
"some": "data",
"yup": "data"
}
},
{
"NodeID": "node2",
"State": "pending-node-unpublish",
"PublishContext": {
"publish": "context",
"some": "more"
}
}
],
"Info": {
"CapacityBytes": 10000,
"VolumeContext": {
"has": "entries",
"the": "context"
},
"VolumeID": "clusterdriver1volume1id",
"AccessibleTopology": [
{
"Segments": {
"region": "R1",
"zone": "Z1"
}
}
]
}
},
"Driver": "clusterdriver1",
"Labels": null,
"Mountpoint": "",
"Name": "clustervolume",
"Options": null,
"Scope": "global"
}
]

View File

@ -0,0 +1,61 @@
package volume
import (
"context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/completion"
"github.com/docker/docker/api/types/volume"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
var availability string
cmd := &cobra.Command{
Use: "update [OPTIONS] [VOLUME]",
Short: "Update a volume (cluster volumes only)",
Args: cli.RequiresMaxArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runUpdate(dockerCli, args[0], availability, cmd.Flags())
},
ValidArgsFunction: completion.VolumeNames(dockerCli),
}
flags := cmd.Flags()
flags.StringVar(&availability, "availability", "active", `Cluster Volume availability ("active"|"pause"|"drain")`)
return cmd
}
func runUpdate(dockerCli command.Cli, volumeID, availability string, flags *pflag.FlagSet) error {
// TODO(dperny): For this earliest version, the only thing that can be
// updated is Availability, which is necessary because to delete a cluster
// volume, the availability must first be set to "drain"
apiClient := dockerCli.Client()
ctx := context.Background()
vol, _, err := apiClient.VolumeInspectWithRaw(ctx, volumeID)
if err != nil {
return err
}
if vol.ClusterVolume == nil {
return errors.New("Can only update cluster volumes")
}
if flags.Changed("availability") {
vol.ClusterVolume.Spec.Availability = volume.Availability(availability)
}
return apiClient.VolumeUpdate(
ctx, vol.ClusterVolume.ID, vol.ClusterVolume.Version,
volume.UpdateOptions{
Spec: &vol.ClusterVolume.Spec,
},
)
}