DockerCLI/cli/command/volume/prune_test.go

212 lines
5.6 KiB
Go
Raw Normal View History

package volume
import (
"context"
"fmt"
"io"
"runtime"
"strings"
"testing"
"github.com/docker/cli/cli/streams"
"github.com/docker/cli/internal/test"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume"
"github.com/pkg/errors"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
"gotest.tools/v3/golden"
"gotest.tools/v3/skip"
)
func TestVolumePruneErrors(t *testing.T) {
testCases := []struct {
name string
args []string
flags map[string]string
volumePruneFunc func(args filters.Args) (volume.PruneReport, error)
expectedError string
}{
{
name: "accepts no arguments",
args: []string{"foo"},
expectedError: "accepts no argument",
},
{
name: "forced but other error",
flags: map[string]string{
"force": "true",
},
volumePruneFunc: func(args filters.Args) (volume.PruneReport, error) {
return volume.PruneReport{}, errors.Errorf("error pruning volumes")
},
expectedError: "error pruning volumes",
},
{
name: "conflicting options",
flags: map[string]string{
"all": "true",
"filter": "all=1",
},
expectedError: "conflicting options: cannot specify both --all and --filter all=1",
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
cmd := NewPruneCommand(
test.NewFakeCli(&fakeClient{
volumePruneFunc: tc.volumePruneFunc,
}),
)
cmd.SetArgs(tc.args)
for key, value := range tc.flags {
cmd.Flags().Set(key, value)
}
cmd.SetOut(io.Discard)
cmd.SetErr(io.Discard)
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
})
}
}
func TestVolumePruneSuccess(t *testing.T) {
testCases := []struct {
name string
args []string
input string
volumePruneFunc func(args filters.Args) (volume.PruneReport, error)
}{
{
name: "all",
args: []string{"--all"},
input: "y",
volumePruneFunc: func(pruneFilter filters.Args) (volume.PruneReport, error) {
assert.Check(t, is.DeepEqual([]string{"true"}, pruneFilter.Get("all")))
return volume.PruneReport{}, nil
},
},
{
name: "all-forced",
args: []string{"--all", "--force"},
volumePruneFunc: func(pruneFilter filters.Args) (volume.PruneReport, error) {
return volume.PruneReport{}, nil
},
},
{
name: "label-filter",
args: []string{"--filter", "label=foobar"},
input: "y",
volumePruneFunc: func(pruneFilter filters.Args) (volume.PruneReport, error) {
assert.Check(t, is.DeepEqual([]string{"foobar"}, pruneFilter.Get("label")))
return volume.PruneReport{}, nil
},
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{volumePruneFunc: tc.volumePruneFunc})
cmd := NewPruneCommand(cli)
if tc.input != "" {
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader(tc.input))))
}
cmd.SetOut(io.Discard)
cmd.SetArgs(tc.args)
err := cmd.Execute()
assert.NilError(t, err)
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-prune-success.%s.golden", tc.name))
})
}
}
func TestVolumePruneForce(t *testing.T) {
testCases := []struct {
name string
volumePruneFunc func(args filters.Args) (volume.PruneReport, error)
}{
{
name: "empty",
},
{
name: "deletedVolumes",
volumePruneFunc: simplePruneFunc,
},
}
for _, tc := range testCases {
cli := test.NewFakeCli(&fakeClient{
volumePruneFunc: tc.volumePruneFunc,
})
cmd := NewPruneCommand(cli)
cmd.Flags().Set("force", "true")
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-prune.%s.golden", tc.name))
}
}
func TestVolumePrunePromptYes(t *testing.T) {
// FIXME(vdemeester) make it work..
skip.If(t, runtime.GOOS == "windows", "TODO: fix test on windows")
for _, input := range []string{"y", "Y"} {
cli := test.NewFakeCli(&fakeClient{
volumePruneFunc: simplePruneFunc,
})
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader(input))))
cmd := NewPruneCommand(cli)
cmd.SetArgs([]string{})
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "volume-prune-yes.golden")
}
}
func TestVolumePrunePromptNo(t *testing.T) {
// FIXME(vdemeester) make it work..
skip.If(t, runtime.GOOS == "windows", "TODO: fix test on windows")
for _, input := range []string{"n", "N", "no", "anything", "really"} {
test spring-cleaning This makes a quick pass through our tests; Discard output/err ---------------------------------------------- Many tests were testing for error-conditions, but didn't discard output. This produced a lot of noise when running the tests, and made it hard to discover if there were actual failures, or if the output was expected. For example: === RUN TestConfigCreateErrors Error: "create" requires exactly 2 arguments. See 'create --help'. Usage: create [OPTIONS] CONFIG file|- [flags] Create a config from a file or STDIN Error: "create" requires exactly 2 arguments. See 'create --help'. Usage: create [OPTIONS] CONFIG file|- [flags] Create a config from a file or STDIN Error: error creating config --- PASS: TestConfigCreateErrors (0.00s) And after discarding output: === RUN TestConfigCreateErrors --- PASS: TestConfigCreateErrors (0.00s) Use sub-tests where possible ---------------------------------------------- Some tests were already set-up to use test-tables, and even had a usable name (or in some cases "error" to check for). Change them to actual sub- tests. Same test as above, but now with sub-tests and output discarded: === RUN TestConfigCreateErrors === RUN TestConfigCreateErrors/requires_exactly_2_arguments === RUN TestConfigCreateErrors/requires_exactly_2_arguments#01 === RUN TestConfigCreateErrors/error_creating_config --- PASS: TestConfigCreateErrors (0.00s) --- PASS: TestConfigCreateErrors/requires_exactly_2_arguments (0.00s) --- PASS: TestConfigCreateErrors/requires_exactly_2_arguments#01 (0.00s) --- PASS: TestConfigCreateErrors/error_creating_config (0.00s) PASS It's not perfect in all cases (in the above, there's duplicate "expected" errors, but Go conveniently adds "#01" for the duplicate). There's probably also various tests I missed that could still use the same changes applied; we can improve these in follow-ups. Set cmd.Args to prevent test-failures ---------------------------------------------- When running tests from my IDE, it compiles the tests before running, then executes the compiled binary to run the tests. Cobra doesn't like that, because in that situation `os.Args` is taken as argument for the command that's executed. The command that's tested now sees the test- flags as arguments (`-test.v -test.run ..`), which causes various tests to fail ("Command XYZ does not accept arguments"). # compile the tests: go test -c -o foo.test # execute the test: ./foo.test -test.v -test.run TestFoo === RUN TestFoo Error: "foo" accepts no arguments. The Cobra maintainers ran into the same situation, and for their own use have added a special case to ignore `os.Args` in these cases; https://github.com/spf13/cobra/blob/v1.8.1/command.go#L1078-L1083 args := c.args // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { args = os.Args[1:] } Unfortunately, that exception is too specific (only checks for `cobra.test`), so doesn't automatically fix the issue for other test-binaries. They did provide a `cmd.SetArgs()` utility for this purpose https://github.com/spf13/cobra/blob/v1.8.1/command.go#L276-L280 // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden // particularly useful when testing. func (c *Command) SetArgs(a []string) { c.args = a } And the fix is to explicitly set the command's args to an empty slice to prevent Cobra from falling back to using `os.Args[1:]` as arguments. cmd := newSomeThingCommand() cmd.SetArgs([]string{}) Some tests already take this issue into account, and I updated some tests for this, but there's likely many other ones that can use the same treatment. Perhaps the Cobra maintainers would accept a contribution to make their condition less specific and to look for binaries ending with a `.test` suffix (which is what compiled binaries usually are named as). Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2024-07-03 19:29:04 -04:00
input := input
t.Run(input, func(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
volumePruneFunc: simplePruneFunc,
})
test spring-cleaning This makes a quick pass through our tests; Discard output/err ---------------------------------------------- Many tests were testing for error-conditions, but didn't discard output. This produced a lot of noise when running the tests, and made it hard to discover if there were actual failures, or if the output was expected. For example: === RUN TestConfigCreateErrors Error: "create" requires exactly 2 arguments. See 'create --help'. Usage: create [OPTIONS] CONFIG file|- [flags] Create a config from a file or STDIN Error: "create" requires exactly 2 arguments. See 'create --help'. Usage: create [OPTIONS] CONFIG file|- [flags] Create a config from a file or STDIN Error: error creating config --- PASS: TestConfigCreateErrors (0.00s) And after discarding output: === RUN TestConfigCreateErrors --- PASS: TestConfigCreateErrors (0.00s) Use sub-tests where possible ---------------------------------------------- Some tests were already set-up to use test-tables, and even had a usable name (or in some cases "error" to check for). Change them to actual sub- tests. Same test as above, but now with sub-tests and output discarded: === RUN TestConfigCreateErrors === RUN TestConfigCreateErrors/requires_exactly_2_arguments === RUN TestConfigCreateErrors/requires_exactly_2_arguments#01 === RUN TestConfigCreateErrors/error_creating_config --- PASS: TestConfigCreateErrors (0.00s) --- PASS: TestConfigCreateErrors/requires_exactly_2_arguments (0.00s) --- PASS: TestConfigCreateErrors/requires_exactly_2_arguments#01 (0.00s) --- PASS: TestConfigCreateErrors/error_creating_config (0.00s) PASS It's not perfect in all cases (in the above, there's duplicate "expected" errors, but Go conveniently adds "#01" for the duplicate). There's probably also various tests I missed that could still use the same changes applied; we can improve these in follow-ups. Set cmd.Args to prevent test-failures ---------------------------------------------- When running tests from my IDE, it compiles the tests before running, then executes the compiled binary to run the tests. Cobra doesn't like that, because in that situation `os.Args` is taken as argument for the command that's executed. The command that's tested now sees the test- flags as arguments (`-test.v -test.run ..`), which causes various tests to fail ("Command XYZ does not accept arguments"). # compile the tests: go test -c -o foo.test # execute the test: ./foo.test -test.v -test.run TestFoo === RUN TestFoo Error: "foo" accepts no arguments. The Cobra maintainers ran into the same situation, and for their own use have added a special case to ignore `os.Args` in these cases; https://github.com/spf13/cobra/blob/v1.8.1/command.go#L1078-L1083 args := c.args // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { args = os.Args[1:] } Unfortunately, that exception is too specific (only checks for `cobra.test`), so doesn't automatically fix the issue for other test-binaries. They did provide a `cmd.SetArgs()` utility for this purpose https://github.com/spf13/cobra/blob/v1.8.1/command.go#L276-L280 // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden // particularly useful when testing. func (c *Command) SetArgs(a []string) { c.args = a } And the fix is to explicitly set the command's args to an empty slice to prevent Cobra from falling back to using `os.Args[1:]` as arguments. cmd := newSomeThingCommand() cmd.SetArgs([]string{}) Some tests already take this issue into account, and I updated some tests for this, but there's likely many other ones that can use the same treatment. Perhaps the Cobra maintainers would accept a contribution to make their condition less specific and to look for binaries ending with a `.test` suffix (which is what compiled binaries usually are named as). Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2024-07-03 19:29:04 -04:00
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader(input))))
cmd := NewPruneCommand(cli)
cmd.SetArgs([]string{})
cmd.SetOut(io.Discard)
cmd.SetErr(io.Discard)
assert.ErrorContains(t, cmd.Execute(), "volume prune has been cancelled")
golden.Assert(t, cli.OutBuffer().String(), "volume-prune-no.golden")
})
}
}
func simplePruneFunc(filters.Args) (volume.PruneReport, error) {
return volume.PruneReport{
VolumesDeleted: []string{
"foo", "bar", "baz",
},
SpaceReclaimed: 2000,
}, nil
}
func TestVolumePrunePromptTerminate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
cli := test.NewFakeCli(&fakeClient{
volumePruneFunc: func(filter filters.Args) (volume.PruneReport, error) {
return volume.PruneReport{}, errors.New("fakeClient volumePruneFunc should not be called")
},
})
cmd := NewPruneCommand(cli)
cmd.SetArgs([]string{})
test spring-cleaning This makes a quick pass through our tests; Discard output/err ---------------------------------------------- Many tests were testing for error-conditions, but didn't discard output. This produced a lot of noise when running the tests, and made it hard to discover if there were actual failures, or if the output was expected. For example: === RUN TestConfigCreateErrors Error: "create" requires exactly 2 arguments. See 'create --help'. Usage: create [OPTIONS] CONFIG file|- [flags] Create a config from a file or STDIN Error: "create" requires exactly 2 arguments. See 'create --help'. Usage: create [OPTIONS] CONFIG file|- [flags] Create a config from a file or STDIN Error: error creating config --- PASS: TestConfigCreateErrors (0.00s) And after discarding output: === RUN TestConfigCreateErrors --- PASS: TestConfigCreateErrors (0.00s) Use sub-tests where possible ---------------------------------------------- Some tests were already set-up to use test-tables, and even had a usable name (or in some cases "error" to check for). Change them to actual sub- tests. Same test as above, but now with sub-tests and output discarded: === RUN TestConfigCreateErrors === RUN TestConfigCreateErrors/requires_exactly_2_arguments === RUN TestConfigCreateErrors/requires_exactly_2_arguments#01 === RUN TestConfigCreateErrors/error_creating_config --- PASS: TestConfigCreateErrors (0.00s) --- PASS: TestConfigCreateErrors/requires_exactly_2_arguments (0.00s) --- PASS: TestConfigCreateErrors/requires_exactly_2_arguments#01 (0.00s) --- PASS: TestConfigCreateErrors/error_creating_config (0.00s) PASS It's not perfect in all cases (in the above, there's duplicate "expected" errors, but Go conveniently adds "#01" for the duplicate). There's probably also various tests I missed that could still use the same changes applied; we can improve these in follow-ups. Set cmd.Args to prevent test-failures ---------------------------------------------- When running tests from my IDE, it compiles the tests before running, then executes the compiled binary to run the tests. Cobra doesn't like that, because in that situation `os.Args` is taken as argument for the command that's executed. The command that's tested now sees the test- flags as arguments (`-test.v -test.run ..`), which causes various tests to fail ("Command XYZ does not accept arguments"). # compile the tests: go test -c -o foo.test # execute the test: ./foo.test -test.v -test.run TestFoo === RUN TestFoo Error: "foo" accepts no arguments. The Cobra maintainers ran into the same situation, and for their own use have added a special case to ignore `os.Args` in these cases; https://github.com/spf13/cobra/blob/v1.8.1/command.go#L1078-L1083 args := c.args // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { args = os.Args[1:] } Unfortunately, that exception is too specific (only checks for `cobra.test`), so doesn't automatically fix the issue for other test-binaries. They did provide a `cmd.SetArgs()` utility for this purpose https://github.com/spf13/cobra/blob/v1.8.1/command.go#L276-L280 // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden // particularly useful when testing. func (c *Command) SetArgs(a []string) { c.args = a } And the fix is to explicitly set the command's args to an empty slice to prevent Cobra from falling back to using `os.Args[1:]` as arguments. cmd := newSomeThingCommand() cmd.SetArgs([]string{}) Some tests already take this issue into account, and I updated some tests for this, but there's likely many other ones that can use the same treatment. Perhaps the Cobra maintainers would accept a contribution to make their condition less specific and to look for binaries ending with a `.test` suffix (which is what compiled binaries usually are named as). Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2024-07-03 19:29:04 -04:00
cmd.SetOut(io.Discard)
cmd.SetErr(io.Discard)
test.TerminatePrompt(ctx, t, cmd, cli)
golden.Assert(t, cli.OutBuffer().String(), "volume-prune-terminate.golden")
}