DockerCLI/cli/command/config/create_test.go

151 lines
4.2 KiB
Go
Raw Normal View History

package config
import (
"context"
"io"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/docker/cli/internal/test"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/pkg/errors"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
"gotest.tools/v3/golden"
)
const configDataFile = "config-create-with-name.golden"
func TestConfigCreateErrors(t *testing.T) {
testCases := []struct {
args []string
configCreateFunc func(context.Context, swarm.ConfigSpec) (types.ConfigCreateResponse, error)
expectedError string
}{
{
args: []string{"too_few"},
expectedError: "requires exactly 2 arguments",
},
{
args: []string{"too", "many", "arguments"},
expectedError: "requires exactly 2 arguments",
},
{
args: []string{"name", filepath.Join("testdata", configDataFile)},
configCreateFunc: func(_ context.Context, configSpec swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
return types.ConfigCreateResponse{}, errors.Errorf("error creating config")
},
expectedError: "error creating config",
},
}
for _, tc := range testCases {
test spring-cleaning This makes a quick pass through our tests; Discard output/err ---------------------------------------------- Many tests were testing for error-conditions, but didn't discard output. This produced a lot of noise when running the tests, and made it hard to discover if there were actual failures, or if the output was expected. For example: === RUN TestConfigCreateErrors Error: "create" requires exactly 2 arguments. See 'create --help'. Usage: create [OPTIONS] CONFIG file|- [flags] Create a config from a file or STDIN Error: "create" requires exactly 2 arguments. See 'create --help'. Usage: create [OPTIONS] CONFIG file|- [flags] Create a config from a file or STDIN Error: error creating config --- PASS: TestConfigCreateErrors (0.00s) And after discarding output: === RUN TestConfigCreateErrors --- PASS: TestConfigCreateErrors (0.00s) Use sub-tests where possible ---------------------------------------------- Some tests were already set-up to use test-tables, and even had a usable name (or in some cases "error" to check for). Change them to actual sub- tests. Same test as above, but now with sub-tests and output discarded: === RUN TestConfigCreateErrors === RUN TestConfigCreateErrors/requires_exactly_2_arguments === RUN TestConfigCreateErrors/requires_exactly_2_arguments#01 === RUN TestConfigCreateErrors/error_creating_config --- PASS: TestConfigCreateErrors (0.00s) --- PASS: TestConfigCreateErrors/requires_exactly_2_arguments (0.00s) --- PASS: TestConfigCreateErrors/requires_exactly_2_arguments#01 (0.00s) --- PASS: TestConfigCreateErrors/error_creating_config (0.00s) PASS It's not perfect in all cases (in the above, there's duplicate "expected" errors, but Go conveniently adds "#01" for the duplicate). There's probably also various tests I missed that could still use the same changes applied; we can improve these in follow-ups. Set cmd.Args to prevent test-failures ---------------------------------------------- When running tests from my IDE, it compiles the tests before running, then executes the compiled binary to run the tests. Cobra doesn't like that, because in that situation `os.Args` is taken as argument for the command that's executed. The command that's tested now sees the test- flags as arguments (`-test.v -test.run ..`), which causes various tests to fail ("Command XYZ does not accept arguments"). # compile the tests: go test -c -o foo.test # execute the test: ./foo.test -test.v -test.run TestFoo === RUN TestFoo Error: "foo" accepts no arguments. The Cobra maintainers ran into the same situation, and for their own use have added a special case to ignore `os.Args` in these cases; https://github.com/spf13/cobra/blob/v1.8.1/command.go#L1078-L1083 args := c.args // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { args = os.Args[1:] } Unfortunately, that exception is too specific (only checks for `cobra.test`), so doesn't automatically fix the issue for other test-binaries. They did provide a `cmd.SetArgs()` utility for this purpose https://github.com/spf13/cobra/blob/v1.8.1/command.go#L276-L280 // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden // particularly useful when testing. func (c *Command) SetArgs(a []string) { c.args = a } And the fix is to explicitly set the command's args to an empty slice to prevent Cobra from falling back to using `os.Args[1:]` as arguments. cmd := newSomeThingCommand() cmd.SetArgs([]string{}) Some tests already take this issue into account, and I updated some tests for this, but there's likely many other ones that can use the same treatment. Perhaps the Cobra maintainers would accept a contribution to make their condition less specific and to look for binaries ending with a `.test` suffix (which is what compiled binaries usually are named as). Signed-off-by: Sebastiaan van Stijn <github@gone.nl> (cherry picked from commit ab230240ad44fdffa03558a3dbb47971f6336911) Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2024-07-03 19:29:04 -04:00
tc := tc
t.Run(tc.expectedError, func(t *testing.T) {
cmd := newConfigCreateCommand(
test.NewFakeCli(&fakeClient{
configCreateFunc: tc.configCreateFunc,
}),
)
cmd.SetArgs(tc.args)
cmd.SetOut(io.Discard)
cmd.SetErr(io.Discard)
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
})
}
}
func TestConfigCreateWithName(t *testing.T) {
name := "foo"
var actual []byte
cli := test.NewFakeCli(&fakeClient{
configCreateFunc: func(_ context.Context, spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
if spec.Name != name {
return types.ConfigCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name)
}
actual = spec.Data
return types.ConfigCreateResponse{
ID: "ID-" + spec.Name,
}, nil
},
})
cmd := newConfigCreateCommand(cli)
cmd.SetArgs([]string{name, filepath.Join("testdata", configDataFile)})
assert.NilError(t, cmd.Execute())
golden.Assert(t, string(actual), configDataFile)
assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String())))
}
func TestConfigCreateWithLabels(t *testing.T) {
expectedLabels := map[string]string{
"lbl1": "Label-foo",
"lbl2": "Label-bar",
}
name := "foo"
data, err := os.ReadFile(filepath.Join("testdata", configDataFile))
assert.NilError(t, err)
expected := swarm.ConfigSpec{
Annotations: swarm.Annotations{
Name: name,
Labels: expectedLabels,
},
Data: data,
}
cli := test.NewFakeCli(&fakeClient{
configCreateFunc: func(_ context.Context, spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
if !reflect.DeepEqual(spec, expected) {
return types.ConfigCreateResponse{}, errors.Errorf("expected %+v, got %+v", expected, spec)
}
return types.ConfigCreateResponse{
ID: "ID-" + spec.Name,
}, nil
},
})
cmd := newConfigCreateCommand(cli)
cmd.SetArgs([]string{name, filepath.Join("testdata", configDataFile)})
cmd.Flags().Set("label", "lbl1=Label-foo")
cmd.Flags().Set("label", "lbl2=Label-bar")
assert.NilError(t, cmd.Execute())
assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String())))
}
func TestConfigCreateWithTemplatingDriver(t *testing.T) {
expectedDriver := &swarm.Driver{
Name: "template-driver",
}
name := "foo"
cli := test.NewFakeCli(&fakeClient{
configCreateFunc: func(_ context.Context, spec swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
if spec.Name != name {
return types.ConfigCreateResponse{}, errors.Errorf("expected name %q, got %q", name, spec.Name)
}
if spec.Templating.Name != expectedDriver.Name {
return types.ConfigCreateResponse{}, errors.Errorf("expected driver %v, got %v", expectedDriver, spec.Labels)
}
return types.ConfigCreateResponse{
ID: "ID-" + spec.Name,
}, nil
},
})
cmd := newConfigCreateCommand(cli)
cmd.SetArgs([]string{name, filepath.Join("testdata", configDataFile)})
cmd.Flags().Set("template-driver", expectedDriver.Name)
assert.NilError(t, cmd.Execute())
assert.Check(t, is.Equal("ID-"+name, strings.TrimSpace(cli.OutBuffer().String())))
}