2017-02-27 12:39:35 -05:00
|
|
|
package volume
|
|
|
|
|
|
|
|
import (
|
2024-02-21 10:36:17 -05:00
|
|
|
"context"
|
2017-02-27 12:39:35 -05:00
|
|
|
"fmt"
|
2022-02-25 08:34:38 -05:00
|
|
|
"io"
|
2017-02-27 12:39:35 -05:00
|
|
|
"runtime"
|
|
|
|
"strings"
|
|
|
|
"testing"
|
|
|
|
|
2019-01-28 08:30:31 -05:00
|
|
|
"github.com/docker/cli/cli/streams"
|
2017-08-21 16:30:09 -04:00
|
|
|
"github.com/docker/cli/internal/test"
|
2017-02-27 12:39:35 -05:00
|
|
|
"github.com/docker/docker/api/types/filters"
|
2024-06-09 07:54:37 -04:00
|
|
|
"github.com/docker/docker/api/types/volume"
|
2017-03-09 13:23:45 -05:00
|
|
|
"github.com/pkg/errors"
|
2020-02-22 12:12:14 -05:00
|
|
|
"gotest.tools/v3/assert"
|
2023-04-20 06:54:56 -04:00
|
|
|
is "gotest.tools/v3/assert/cmp"
|
2020-02-22 12:12:14 -05:00
|
|
|
"gotest.tools/v3/golden"
|
|
|
|
"gotest.tools/v3/skip"
|
2017-02-27 12:39:35 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestVolumePruneErrors(t *testing.T) {
|
|
|
|
testCases := []struct {
|
2023-04-20 06:54:56 -04:00
|
|
|
name string
|
2017-02-27 12:39:35 -05:00
|
|
|
args []string
|
|
|
|
flags map[string]string
|
2024-06-09 07:54:37 -04:00
|
|
|
volumePruneFunc func(args filters.Args) (volume.PruneReport, error)
|
2017-02-27 12:39:35 -05:00
|
|
|
expectedError string
|
|
|
|
}{
|
|
|
|
{
|
2023-04-20 06:54:56 -04:00
|
|
|
name: "accepts no arguments",
|
2017-02-27 12:39:35 -05:00
|
|
|
args: []string{"foo"},
|
|
|
|
expectedError: "accepts no argument",
|
|
|
|
},
|
|
|
|
{
|
2023-04-20 06:54:56 -04:00
|
|
|
name: "forced but other error",
|
2017-02-27 12:39:35 -05:00
|
|
|
flags: map[string]string{
|
|
|
|
"force": "true",
|
|
|
|
},
|
2024-06-09 07:54:37 -04:00
|
|
|
volumePruneFunc: func(args filters.Args) (volume.PruneReport, error) {
|
|
|
|
return volume.PruneReport{}, errors.Errorf("error pruning volumes")
|
2017-02-27 12:39:35 -05:00
|
|
|
},
|
|
|
|
expectedError: "error pruning volumes",
|
|
|
|
},
|
2023-04-20 06:54:56 -04:00
|
|
|
{
|
|
|
|
name: "conflicting options",
|
|
|
|
flags: map[string]string{
|
|
|
|
"all": "true",
|
|
|
|
"filter": "all=1",
|
|
|
|
},
|
|
|
|
expectedError: "conflicting options: cannot specify both --all and --filter all=1",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
|
|
tc := tc
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
cmd := NewPruneCommand(
|
|
|
|
test.NewFakeCli(&fakeClient{
|
|
|
|
volumePruneFunc: tc.volumePruneFunc,
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
cmd.SetArgs(tc.args)
|
|
|
|
for key, value := range tc.flags {
|
|
|
|
cmd.Flags().Set(key, value)
|
|
|
|
}
|
|
|
|
cmd.SetOut(io.Discard)
|
|
|
|
cmd.SetErr(io.Discard)
|
|
|
|
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestVolumePruneSuccess(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
args []string
|
2024-02-06 09:04:53 -05:00
|
|
|
input string
|
2024-06-09 07:54:37 -04:00
|
|
|
volumePruneFunc func(args filters.Args) (volume.PruneReport, error)
|
2023-04-20 06:54:56 -04:00
|
|
|
}{
|
|
|
|
{
|
2024-02-06 09:04:53 -05:00
|
|
|
name: "all",
|
|
|
|
args: []string{"--all"},
|
|
|
|
input: "y",
|
2024-06-09 07:54:37 -04:00
|
|
|
volumePruneFunc: func(pruneFilter filters.Args) (volume.PruneReport, error) {
|
2024-02-06 09:04:53 -05:00
|
|
|
assert.Check(t, is.DeepEqual([]string{"true"}, pruneFilter.Get("all")))
|
2024-06-09 07:54:37 -04:00
|
|
|
return volume.PruneReport{}, nil
|
2023-04-20 06:54:56 -04:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "all-forced",
|
|
|
|
args: []string{"--all", "--force"},
|
2024-06-09 07:54:37 -04:00
|
|
|
volumePruneFunc: func(pruneFilter filters.Args) (volume.PruneReport, error) {
|
|
|
|
return volume.PruneReport{}, nil
|
2023-04-20 06:54:56 -04:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2024-02-06 09:04:53 -05:00
|
|
|
name: "label-filter",
|
|
|
|
args: []string{"--filter", "label=foobar"},
|
|
|
|
input: "y",
|
2024-06-09 07:54:37 -04:00
|
|
|
volumePruneFunc: func(pruneFilter filters.Args) (volume.PruneReport, error) {
|
2024-02-06 09:04:53 -05:00
|
|
|
assert.Check(t, is.DeepEqual([]string{"foobar"}, pruneFilter.Get("label")))
|
2024-06-09 07:54:37 -04:00
|
|
|
return volume.PruneReport{}, nil
|
2023-04-20 06:54:56 -04:00
|
|
|
},
|
|
|
|
},
|
2017-02-27 12:39:35 -05:00
|
|
|
}
|
|
|
|
for _, tc := range testCases {
|
2023-04-20 06:54:56 -04:00
|
|
|
tc := tc
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
cli := test.NewFakeCli(&fakeClient{volumePruneFunc: tc.volumePruneFunc})
|
|
|
|
cmd := NewPruneCommand(cli)
|
2024-02-06 09:04:53 -05:00
|
|
|
if tc.input != "" {
|
|
|
|
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader(tc.input))))
|
|
|
|
}
|
2023-04-20 06:54:56 -04:00
|
|
|
cmd.SetOut(io.Discard)
|
|
|
|
cmd.SetArgs(tc.args)
|
|
|
|
err := cmd.Execute()
|
|
|
|
assert.NilError(t, err)
|
|
|
|
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-prune-success.%s.golden", tc.name))
|
|
|
|
})
|
2017-02-27 12:39:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestVolumePruneForce(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
name string
|
2024-06-09 07:54:37 -04:00
|
|
|
volumePruneFunc func(args filters.Args) (volume.PruneReport, error)
|
2017-02-27 12:39:35 -05:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "empty",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "deletedVolumes",
|
|
|
|
volumePruneFunc: simplePruneFunc,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tc := range testCases {
|
2017-08-16 13:50:28 -04:00
|
|
|
cli := test.NewFakeCli(&fakeClient{
|
|
|
|
volumePruneFunc: tc.volumePruneFunc,
|
|
|
|
})
|
|
|
|
cmd := NewPruneCommand(cli)
|
2017-02-27 12:39:35 -05:00
|
|
|
cmd.Flags().Set("force", "true")
|
2018-03-06 15:13:00 -05:00
|
|
|
assert.NilError(t, cmd.Execute())
|
2017-08-16 13:50:28 -04:00
|
|
|
golden.Assert(t, cli.OutBuffer().String(), fmt.Sprintf("volume-prune.%s.golden", tc.name))
|
2017-02-27 12:39:35 -05:00
|
|
|
}
|
|
|
|
}
|
2017-08-16 13:50:28 -04:00
|
|
|
|
2017-02-27 12:39:35 -05:00
|
|
|
func TestVolumePrunePromptYes(t *testing.T) {
|
2017-08-16 13:50:28 -04:00
|
|
|
// FIXME(vdemeester) make it work..
|
2018-06-08 12:24:26 -04:00
|
|
|
skip.If(t, runtime.GOOS == "windows", "TODO: fix test on windows")
|
2017-08-16 13:50:28 -04:00
|
|
|
|
2017-02-27 12:39:35 -05:00
|
|
|
for _, input := range []string{"y", "Y"} {
|
2017-08-16 13:50:28 -04:00
|
|
|
cli := test.NewFakeCli(&fakeClient{
|
2017-02-27 12:39:35 -05:00
|
|
|
volumePruneFunc: simplePruneFunc,
|
2017-08-16 13:50:28 -04:00
|
|
|
})
|
2017-02-27 12:39:35 -05:00
|
|
|
|
2022-02-25 08:34:38 -05:00
|
|
|
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader(input))))
|
2017-08-16 13:50:28 -04:00
|
|
|
cmd := NewPruneCommand(cli)
|
2024-03-12 07:38:47 -04:00
|
|
|
cmd.SetArgs([]string{})
|
2018-03-06 15:13:00 -05:00
|
|
|
assert.NilError(t, cmd.Execute())
|
2017-08-16 13:50:28 -04:00
|
|
|
golden.Assert(t, cli.OutBuffer().String(), "volume-prune-yes.golden")
|
2017-02-27 12:39:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestVolumePrunePromptNo(t *testing.T) {
|
2017-08-16 13:50:28 -04:00
|
|
|
// FIXME(vdemeester) make it work..
|
2018-06-08 12:24:26 -04:00
|
|
|
skip.If(t, runtime.GOOS == "windows", "TODO: fix test on windows")
|
2017-08-16 13:50:28 -04:00
|
|
|
|
2017-02-27 12:39:35 -05:00
|
|
|
for _, input := range []string{"n", "N", "no", "anything", "really"} {
|
test spring-cleaning
This makes a quick pass through our tests;
Discard output/err
----------------------------------------------
Many tests were testing for error-conditions, but didn't discard output.
This produced a lot of noise when running the tests, and made it hard
to discover if there were actual failures, or if the output was expected.
For example:
=== RUN TestConfigCreateErrors
Error: "create" requires exactly 2 arguments.
See 'create --help'.
Usage: create [OPTIONS] CONFIG file|- [flags]
Create a config from a file or STDIN
Error: "create" requires exactly 2 arguments.
See 'create --help'.
Usage: create [OPTIONS] CONFIG file|- [flags]
Create a config from a file or STDIN
Error: error creating config
--- PASS: TestConfigCreateErrors (0.00s)
And after discarding output:
=== RUN TestConfigCreateErrors
--- PASS: TestConfigCreateErrors (0.00s)
Use sub-tests where possible
----------------------------------------------
Some tests were already set-up to use test-tables, and even had a usable
name (or in some cases "error" to check for). Change them to actual sub-
tests. Same test as above, but now with sub-tests and output discarded:
=== RUN TestConfigCreateErrors
=== RUN TestConfigCreateErrors/requires_exactly_2_arguments
=== RUN TestConfigCreateErrors/requires_exactly_2_arguments#01
=== RUN TestConfigCreateErrors/error_creating_config
--- PASS: TestConfigCreateErrors (0.00s)
--- PASS: TestConfigCreateErrors/requires_exactly_2_arguments (0.00s)
--- PASS: TestConfigCreateErrors/requires_exactly_2_arguments#01 (0.00s)
--- PASS: TestConfigCreateErrors/error_creating_config (0.00s)
PASS
It's not perfect in all cases (in the above, there's duplicate "expected"
errors, but Go conveniently adds "#01" for the duplicate). There's probably
also various tests I missed that could still use the same changes applied;
we can improve these in follow-ups.
Set cmd.Args to prevent test-failures
----------------------------------------------
When running tests from my IDE, it compiles the tests before running,
then executes the compiled binary to run the tests. Cobra doesn't like
that, because in that situation `os.Args` is taken as argument for the
command that's executed. The command that's tested now sees the test-
flags as arguments (`-test.v -test.run ..`), which causes various tests
to fail ("Command XYZ does not accept arguments").
# compile the tests:
go test -c -o foo.test
# execute the test:
./foo.test -test.v -test.run TestFoo
=== RUN TestFoo
Error: "foo" accepts no arguments.
The Cobra maintainers ran into the same situation, and for their own
use have added a special case to ignore `os.Args` in these cases;
https://github.com/spf13/cobra/blob/v1.8.1/command.go#L1078-L1083
args := c.args
// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
args = os.Args[1:]
}
Unfortunately, that exception is too specific (only checks for `cobra.test`),
so doesn't automatically fix the issue for other test-binaries. They did
provide a `cmd.SetArgs()` utility for this purpose
https://github.com/spf13/cobra/blob/v1.8.1/command.go#L276-L280
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
c.args = a
}
And the fix is to explicitly set the command's args to an empty slice to
prevent Cobra from falling back to using `os.Args[1:]` as arguments.
cmd := newSomeThingCommand()
cmd.SetArgs([]string{})
Some tests already take this issue into account, and I updated some tests
for this, but there's likely many other ones that can use the same treatment.
Perhaps the Cobra maintainers would accept a contribution to make their
condition less specific and to look for binaries ending with a `.test`
suffix (which is what compiled binaries usually are named as).
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2024-07-03 19:29:04 -04:00
|
|
|
input := input
|
|
|
|
t.Run(input, func(t *testing.T) {
|
|
|
|
cli := test.NewFakeCli(&fakeClient{
|
|
|
|
volumePruneFunc: simplePruneFunc,
|
|
|
|
})
|
2017-02-27 12:39:35 -05:00
|
|
|
|
test spring-cleaning
This makes a quick pass through our tests;
Discard output/err
----------------------------------------------
Many tests were testing for error-conditions, but didn't discard output.
This produced a lot of noise when running the tests, and made it hard
to discover if there were actual failures, or if the output was expected.
For example:
=== RUN TestConfigCreateErrors
Error: "create" requires exactly 2 arguments.
See 'create --help'.
Usage: create [OPTIONS] CONFIG file|- [flags]
Create a config from a file or STDIN
Error: "create" requires exactly 2 arguments.
See 'create --help'.
Usage: create [OPTIONS] CONFIG file|- [flags]
Create a config from a file or STDIN
Error: error creating config
--- PASS: TestConfigCreateErrors (0.00s)
And after discarding output:
=== RUN TestConfigCreateErrors
--- PASS: TestConfigCreateErrors (0.00s)
Use sub-tests where possible
----------------------------------------------
Some tests were already set-up to use test-tables, and even had a usable
name (or in some cases "error" to check for). Change them to actual sub-
tests. Same test as above, but now with sub-tests and output discarded:
=== RUN TestConfigCreateErrors
=== RUN TestConfigCreateErrors/requires_exactly_2_arguments
=== RUN TestConfigCreateErrors/requires_exactly_2_arguments#01
=== RUN TestConfigCreateErrors/error_creating_config
--- PASS: TestConfigCreateErrors (0.00s)
--- PASS: TestConfigCreateErrors/requires_exactly_2_arguments (0.00s)
--- PASS: TestConfigCreateErrors/requires_exactly_2_arguments#01 (0.00s)
--- PASS: TestConfigCreateErrors/error_creating_config (0.00s)
PASS
It's not perfect in all cases (in the above, there's duplicate "expected"
errors, but Go conveniently adds "#01" for the duplicate). There's probably
also various tests I missed that could still use the same changes applied;
we can improve these in follow-ups.
Set cmd.Args to prevent test-failures
----------------------------------------------
When running tests from my IDE, it compiles the tests before running,
then executes the compiled binary to run the tests. Cobra doesn't like
that, because in that situation `os.Args` is taken as argument for the
command that's executed. The command that's tested now sees the test-
flags as arguments (`-test.v -test.run ..`), which causes various tests
to fail ("Command XYZ does not accept arguments").
# compile the tests:
go test -c -o foo.test
# execute the test:
./foo.test -test.v -test.run TestFoo
=== RUN TestFoo
Error: "foo" accepts no arguments.
The Cobra maintainers ran into the same situation, and for their own
use have added a special case to ignore `os.Args` in these cases;
https://github.com/spf13/cobra/blob/v1.8.1/command.go#L1078-L1083
args := c.args
// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
args = os.Args[1:]
}
Unfortunately, that exception is too specific (only checks for `cobra.test`),
so doesn't automatically fix the issue for other test-binaries. They did
provide a `cmd.SetArgs()` utility for this purpose
https://github.com/spf13/cobra/blob/v1.8.1/command.go#L276-L280
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
c.args = a
}
And the fix is to explicitly set the command's args to an empty slice to
prevent Cobra from falling back to using `os.Args[1:]` as arguments.
cmd := newSomeThingCommand()
cmd.SetArgs([]string{})
Some tests already take this issue into account, and I updated some tests
for this, but there's likely many other ones that can use the same treatment.
Perhaps the Cobra maintainers would accept a contribution to make their
condition less specific and to look for binaries ending with a `.test`
suffix (which is what compiled binaries usually are named as).
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2024-07-03 19:29:04 -04:00
|
|
|
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader(input))))
|
|
|
|
cmd := NewPruneCommand(cli)
|
|
|
|
cmd.SetArgs([]string{})
|
|
|
|
cmd.SetOut(io.Discard)
|
|
|
|
cmd.SetErr(io.Discard)
|
|
|
|
assert.ErrorContains(t, cmd.Execute(), "volume prune has been cancelled")
|
|
|
|
golden.Assert(t, cli.OutBuffer().String(), "volume-prune-no.golden")
|
|
|
|
})
|
2017-02-27 12:39:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-09 07:54:37 -04:00
|
|
|
func simplePruneFunc(filters.Args) (volume.PruneReport, error) {
|
|
|
|
return volume.PruneReport{
|
2017-02-27 12:39:35 -05:00
|
|
|
VolumesDeleted: []string{
|
|
|
|
"foo", "bar", "baz",
|
|
|
|
},
|
|
|
|
SpaceReclaimed: 2000,
|
|
|
|
}, nil
|
|
|
|
}
|
2024-02-21 10:36:17 -05:00
|
|
|
|
|
|
|
func TestVolumePrunePromptTerminate(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
t.Cleanup(cancel)
|
|
|
|
|
|
|
|
cli := test.NewFakeCli(&fakeClient{
|
2024-06-09 07:54:37 -04:00
|
|
|
volumePruneFunc: func(filter filters.Args) (volume.PruneReport, error) {
|
|
|
|
return volume.PruneReport{}, errors.New("fakeClient volumePruneFunc should not be called")
|
2024-02-21 10:36:17 -05:00
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
cmd := NewPruneCommand(cli)
|
2024-03-12 07:38:47 -04:00
|
|
|
cmd.SetArgs([]string{})
|
test spring-cleaning
This makes a quick pass through our tests;
Discard output/err
----------------------------------------------
Many tests were testing for error-conditions, but didn't discard output.
This produced a lot of noise when running the tests, and made it hard
to discover if there were actual failures, or if the output was expected.
For example:
=== RUN TestConfigCreateErrors
Error: "create" requires exactly 2 arguments.
See 'create --help'.
Usage: create [OPTIONS] CONFIG file|- [flags]
Create a config from a file or STDIN
Error: "create" requires exactly 2 arguments.
See 'create --help'.
Usage: create [OPTIONS] CONFIG file|- [flags]
Create a config from a file or STDIN
Error: error creating config
--- PASS: TestConfigCreateErrors (0.00s)
And after discarding output:
=== RUN TestConfigCreateErrors
--- PASS: TestConfigCreateErrors (0.00s)
Use sub-tests where possible
----------------------------------------------
Some tests were already set-up to use test-tables, and even had a usable
name (or in some cases "error" to check for). Change them to actual sub-
tests. Same test as above, but now with sub-tests and output discarded:
=== RUN TestConfigCreateErrors
=== RUN TestConfigCreateErrors/requires_exactly_2_arguments
=== RUN TestConfigCreateErrors/requires_exactly_2_arguments#01
=== RUN TestConfigCreateErrors/error_creating_config
--- PASS: TestConfigCreateErrors (0.00s)
--- PASS: TestConfigCreateErrors/requires_exactly_2_arguments (0.00s)
--- PASS: TestConfigCreateErrors/requires_exactly_2_arguments#01 (0.00s)
--- PASS: TestConfigCreateErrors/error_creating_config (0.00s)
PASS
It's not perfect in all cases (in the above, there's duplicate "expected"
errors, but Go conveniently adds "#01" for the duplicate). There's probably
also various tests I missed that could still use the same changes applied;
we can improve these in follow-ups.
Set cmd.Args to prevent test-failures
----------------------------------------------
When running tests from my IDE, it compiles the tests before running,
then executes the compiled binary to run the tests. Cobra doesn't like
that, because in that situation `os.Args` is taken as argument for the
command that's executed. The command that's tested now sees the test-
flags as arguments (`-test.v -test.run ..`), which causes various tests
to fail ("Command XYZ does not accept arguments").
# compile the tests:
go test -c -o foo.test
# execute the test:
./foo.test -test.v -test.run TestFoo
=== RUN TestFoo
Error: "foo" accepts no arguments.
The Cobra maintainers ran into the same situation, and for their own
use have added a special case to ignore `os.Args` in these cases;
https://github.com/spf13/cobra/blob/v1.8.1/command.go#L1078-L1083
args := c.args
// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
args = os.Args[1:]
}
Unfortunately, that exception is too specific (only checks for `cobra.test`),
so doesn't automatically fix the issue for other test-binaries. They did
provide a `cmd.SetArgs()` utility for this purpose
https://github.com/spf13/cobra/blob/v1.8.1/command.go#L276-L280
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
c.args = a
}
And the fix is to explicitly set the command's args to an empty slice to
prevent Cobra from falling back to using `os.Args[1:]` as arguments.
cmd := newSomeThingCommand()
cmd.SetArgs([]string{})
Some tests already take this issue into account, and I updated some tests
for this, but there's likely many other ones that can use the same treatment.
Perhaps the Cobra maintainers would accept a contribution to make their
condition less specific and to look for binaries ending with a `.test`
suffix (which is what compiled binaries usually are named as).
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2024-07-03 19:29:04 -04:00
|
|
|
cmd.SetOut(io.Discard)
|
|
|
|
cmd.SetErr(io.Discard)
|
2024-03-12 07:38:47 -04:00
|
|
|
test.TerminatePrompt(ctx, t, cmd, cli)
|
2024-02-21 10:36:17 -05:00
|
|
|
golden.Assert(t, cli.OutBuffer().String(), "volume-prune-terminate.golden")
|
|
|
|
}
|