2018-04-18 19:36:26 -04:00
|
|
|
package image
|
|
|
|
|
|
|
|
import (
|
2018-07-02 14:48:32 -04:00
|
|
|
"bytes"
|
2018-04-19 13:07:27 -04:00
|
|
|
"context"
|
2018-08-11 15:04:13 -04:00
|
|
|
"encoding/csv"
|
2018-04-19 13:07:27 -04:00
|
|
|
"encoding/json"
|
2018-07-02 14:48:32 -04:00
|
|
|
"fmt"
|
2018-04-18 19:36:26 -04:00
|
|
|
"io"
|
2018-05-18 22:56:22 -04:00
|
|
|
"io/ioutil"
|
2019-04-03 15:06:12 -04:00
|
|
|
"net"
|
2018-04-18 19:36:26 -04:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2018-07-02 14:48:32 -04:00
|
|
|
"strings"
|
2018-04-18 19:36:26 -04:00
|
|
|
|
2018-04-19 13:07:27 -04:00
|
|
|
"github.com/containerd/console"
|
2019-05-07 18:25:40 -04:00
|
|
|
"github.com/containerd/containerd/platforms"
|
2018-04-19 13:07:27 -04:00
|
|
|
"github.com/docker/cli/cli"
|
2018-04-18 19:36:26 -04:00
|
|
|
"github.com/docker/cli/cli/command"
|
2018-05-18 22:56:22 -04:00
|
|
|
"github.com/docker/cli/cli/command/image/build"
|
2018-08-06 18:44:28 -04:00
|
|
|
"github.com/docker/cli/opts"
|
2018-04-18 19:36:26 -04:00
|
|
|
"github.com/docker/docker/api/types"
|
2018-04-19 13:07:27 -04:00
|
|
|
"github.com/docker/docker/pkg/jsonmessage"
|
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2018-04-18 19:36:26 -04:00
|
|
|
"github.com/docker/docker/pkg/urlutil"
|
2018-04-19 13:07:27 -04:00
|
|
|
controlapi "github.com/moby/buildkit/api/services/control"
|
|
|
|
"github.com/moby/buildkit/client"
|
2018-08-11 15:04:13 -04:00
|
|
|
"github.com/moby/buildkit/session"
|
2018-04-18 19:36:26 -04:00
|
|
|
"github.com/moby/buildkit/session/auth/authprovider"
|
|
|
|
"github.com/moby/buildkit/session/filesync"
|
2018-08-11 15:04:13 -04:00
|
|
|
"github.com/moby/buildkit/session/secrets/secretsprovider"
|
2018-10-05 05:35:09 -04:00
|
|
|
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
2018-04-18 19:36:26 -04:00
|
|
|
"github.com/moby/buildkit/util/appcontext"
|
2018-04-19 13:07:27 -04:00
|
|
|
"github.com/moby/buildkit/util/progress/progressui"
|
2018-04-18 19:36:26 -04:00
|
|
|
"github.com/pkg/errors"
|
2018-10-05 05:05:42 -04:00
|
|
|
fsutiltypes "github.com/tonistiigi/fsutil/types"
|
2018-04-18 19:36:26 -04:00
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
)
|
|
|
|
|
2018-05-18 20:52:09 -04:00
|
|
|
const uploadRequestRemote = "upload-request"
|
|
|
|
|
2018-05-23 02:15:11 -04:00
|
|
|
var errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
|
|
|
|
2018-06-09 13:25:32 -04:00
|
|
|
//nolint: gocyclo
|
2018-04-18 19:36:26 -04:00
|
|
|
func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
|
|
|
ctx := appcontext.Context()
|
|
|
|
|
2018-10-22 16:43:36 -04:00
|
|
|
s, err := trySession(dockerCli, options.context, false)
|
2018-04-18 19:36:26 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if s == nil {
|
|
|
|
return errors.Errorf("buildkit not supported by daemon")
|
|
|
|
}
|
|
|
|
|
2018-07-02 14:48:32 -04:00
|
|
|
if options.imageIDFile != "" {
|
|
|
|
// Avoid leaving a stale file if we eventually fail
|
|
|
|
if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) {
|
|
|
|
return errors.Wrap(err, "removing image ID file")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 21:44:30 -04:00
|
|
|
var (
|
2018-05-23 02:15:11 -04:00
|
|
|
remote string
|
|
|
|
body io.Reader
|
|
|
|
dockerfileName = options.dockerfileName
|
|
|
|
dockerfileReader io.ReadCloser
|
|
|
|
dockerfileDir string
|
|
|
|
contextDir string
|
2018-05-21 21:44:30 -04:00
|
|
|
)
|
2018-05-23 02:15:11 -04:00
|
|
|
|
2019-04-03 02:23:23 -04:00
|
|
|
stdoutUsed := false
|
|
|
|
|
2018-04-18 19:36:26 -04:00
|
|
|
switch {
|
|
|
|
case options.contextFromStdin():
|
2018-05-22 19:23:15 -04:00
|
|
|
if options.dockerfileFromStdin() {
|
|
|
|
return errStdinConflict
|
|
|
|
}
|
2018-05-18 22:56:22 -04:00
|
|
|
rc, isArchive, err := build.DetectArchiveReader(os.Stdin)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if isArchive {
|
|
|
|
body = rc
|
|
|
|
remote = uploadRequestRemote
|
|
|
|
} else {
|
2018-05-23 02:15:11 -04:00
|
|
|
if options.dockerfileName != "" {
|
|
|
|
return errDockerfileConflict
|
2018-05-18 22:56:22 -04:00
|
|
|
}
|
2018-05-23 02:15:11 -04:00
|
|
|
dockerfileReader = rc
|
2018-05-18 22:56:22 -04:00
|
|
|
remote = clientSessionRemote
|
2018-05-23 02:15:11 -04:00
|
|
|
// TODO: make fssync handle empty contextdir
|
|
|
|
contextDir, _ = ioutil.TempDir("", "empty-dir")
|
|
|
|
defer os.RemoveAll(contextDir)
|
2018-05-18 22:56:22 -04:00
|
|
|
}
|
2018-04-18 19:36:26 -04:00
|
|
|
case isLocalDir(options.context):
|
2018-05-23 02:15:11 -04:00
|
|
|
contextDir = options.context
|
2018-05-21 21:44:30 -04:00
|
|
|
if options.dockerfileFromStdin() {
|
2018-05-23 02:15:11 -04:00
|
|
|
dockerfileReader = os.Stdin
|
|
|
|
} else if options.dockerfileName != "" {
|
|
|
|
dockerfileName = filepath.Base(options.dockerfileName)
|
|
|
|
dockerfileDir = filepath.Dir(options.dockerfileName)
|
|
|
|
} else {
|
|
|
|
dockerfileDir = options.context
|
2018-05-21 21:44:30 -04:00
|
|
|
}
|
2018-05-23 02:15:11 -04:00
|
|
|
remote = clientSessionRemote
|
|
|
|
case urlutil.IsGitURL(options.context):
|
|
|
|
remote = options.context
|
|
|
|
case urlutil.IsURL(options.context):
|
|
|
|
remote = options.context
|
|
|
|
default:
|
|
|
|
return errors.Errorf("unable to prepare context: path %q not found", options.context)
|
|
|
|
}
|
|
|
|
|
|
|
|
if dockerfileReader != nil {
|
|
|
|
dockerfileName = build.DefaultDockerfileName
|
|
|
|
dockerfileDir, err = build.WriteTempDockerfile(dockerfileReader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dockerfileDir)
|
|
|
|
}
|
|
|
|
|
2019-03-18 13:33:59 -04:00
|
|
|
outputs, err := parseOutputs(options.outputs)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "failed to parse outputs")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, out := range outputs {
|
2019-04-03 02:23:23 -04:00
|
|
|
switch out.Type {
|
|
|
|
case "local":
|
2019-03-18 13:33:59 -04:00
|
|
|
// dest is handled on client side for local exporter
|
|
|
|
outDir, ok := out.Attrs["dest"]
|
|
|
|
if !ok {
|
|
|
|
return errors.Errorf("dest is required for local output")
|
|
|
|
}
|
|
|
|
delete(out.Attrs, "dest")
|
|
|
|
s.Allow(filesync.NewFSSyncTargetDir(outDir))
|
2019-04-03 02:23:23 -04:00
|
|
|
case "tar":
|
|
|
|
// dest is handled on client side for tar exporter
|
|
|
|
outFile, ok := out.Attrs["dest"]
|
|
|
|
if !ok {
|
|
|
|
return errors.Errorf("dest is required for tar output")
|
|
|
|
}
|
|
|
|
var w io.WriteCloser
|
|
|
|
if outFile == "-" {
|
|
|
|
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
|
|
|
return errors.Errorf("refusing to write output to console")
|
|
|
|
}
|
|
|
|
w = os.Stdout
|
|
|
|
stdoutUsed = true
|
|
|
|
} else {
|
|
|
|
f, err := os.Create(outFile)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "failed to open %s", outFile)
|
|
|
|
}
|
|
|
|
w = f
|
|
|
|
}
|
|
|
|
s.Allow(filesync.NewFSSyncTarget(w))
|
2019-03-18 13:33:59 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-23 02:15:11 -04:00
|
|
|
if dockerfileDir != "" {
|
2018-05-18 22:56:22 -04:00
|
|
|
s.Allow(filesync.NewFSSyncProvider([]filesync.SyncedDir{
|
|
|
|
{
|
|
|
|
Name: "context",
|
2018-05-23 02:15:11 -04:00
|
|
|
Dir: contextDir,
|
2018-05-18 22:56:22 -04:00
|
|
|
Map: resetUIDAndGID,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "dockerfile",
|
2018-05-21 21:44:30 -04:00
|
|
|
Dir: dockerfileDir,
|
2018-05-18 22:56:22 -04:00
|
|
|
},
|
|
|
|
}))
|
2018-04-18 19:36:26 -04:00
|
|
|
}
|
|
|
|
|
2019-05-13 21:24:27 -04:00
|
|
|
s.Allow(authprovider.NewDockerAuthProvider(os.Stderr))
|
2018-08-11 15:04:13 -04:00
|
|
|
if len(options.secrets) > 0 {
|
|
|
|
sp, err := parseSecretSpecs(options.secrets)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "could not parse secrets: %v", options.secrets)
|
|
|
|
}
|
|
|
|
s.Allow(sp)
|
|
|
|
}
|
2018-10-05 05:35:09 -04:00
|
|
|
if len(options.ssh) > 0 {
|
|
|
|
sshp, err := parseSSHSpecs(options.ssh)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "could not parse ssh: %v", options.ssh)
|
|
|
|
}
|
|
|
|
s.Allow(sshp)
|
|
|
|
}
|
2018-04-18 19:36:26 -04:00
|
|
|
|
|
|
|
eg, ctx := errgroup.WithContext(ctx)
|
|
|
|
|
2019-04-03 15:06:12 -04:00
|
|
|
dialSession := func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
|
|
|
|
return dockerCli.Client().DialHijack(ctx, "/session", proto, meta)
|
|
|
|
}
|
2018-04-18 19:36:26 -04:00
|
|
|
eg.Go(func() error {
|
2019-04-03 15:06:12 -04:00
|
|
|
return s.Run(context.TODO(), dialSession)
|
2018-04-18 19:36:26 -04:00
|
|
|
})
|
|
|
|
|
2018-06-13 18:35:15 -04:00
|
|
|
buildID := stringid.GenerateRandomID()
|
2018-05-18 20:52:09 -04:00
|
|
|
if body != nil {
|
|
|
|
eg.Go(func() error {
|
|
|
|
buildOptions := types.ImageBuildOptions{
|
|
|
|
Version: types.BuilderBuildKit,
|
|
|
|
BuildID: uploadRequestRemote + ":" + buildID,
|
|
|
|
}
|
|
|
|
|
|
|
|
response, err := dockerCli.Client().ImageBuild(context.Background(), body, buildOptions)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer response.Body.Close()
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-05-07 18:15:54 -04:00
|
|
|
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && options.progress == "auto" {
|
|
|
|
options.progress = v
|
|
|
|
}
|
|
|
|
|
2019-05-07 18:25:40 -04:00
|
|
|
if strings.EqualFold(options.platform, "local") {
|
|
|
|
options.platform = platforms.DefaultString()
|
|
|
|
}
|
|
|
|
|
2018-06-09 13:25:32 -04:00
|
|
|
eg.Go(func() error {
|
2018-04-18 19:36:26 -04:00
|
|
|
defer func() { // make sure the Status ends cleanly on build errors
|
|
|
|
s.Close()
|
|
|
|
}()
|
|
|
|
|
2018-06-09 13:25:32 -04:00
|
|
|
buildOptions := imageBuildOptions(dockerCli, options)
|
|
|
|
buildOptions.Version = types.BuilderBuildKit
|
|
|
|
buildOptions.Dockerfile = dockerfileName
|
|
|
|
//buildOptions.AuthConfigs = authConfigs // handled by session
|
|
|
|
buildOptions.RemoteContext = remote
|
|
|
|
buildOptions.SessionID = s.ID()
|
|
|
|
buildOptions.BuildID = buildID
|
2019-03-18 13:33:59 -04:00
|
|
|
buildOptions.Outputs = outputs
|
2019-04-03 02:23:23 -04:00
|
|
|
return doBuild(ctx, eg, dockerCli, stdoutUsed, options, buildOptions)
|
2018-06-09 13:25:32 -04:00
|
|
|
})
|
|
|
|
|
|
|
|
return eg.Wait()
|
|
|
|
}
|
|
|
|
|
2018-07-02 14:48:32 -04:00
|
|
|
//nolint: gocyclo
|
2019-04-03 02:23:23 -04:00
|
|
|
func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, stdoutUsed bool, options buildOptions, buildOptions types.ImageBuildOptions) (finalErr error) {
|
2018-06-09 13:25:32 -04:00
|
|
|
response, err := dockerCli.Client().ImageBuild(context.Background(), nil, buildOptions)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer response.Body.Close()
|
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
defer close(done)
|
|
|
|
eg.Go(func() error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return dockerCli.Client().BuildCancel(context.TODO(), buildOptions.BuildID)
|
|
|
|
case <-done:
|
2018-04-18 19:36:26 -04:00
|
|
|
}
|
2018-06-09 13:25:32 -04:00
|
|
|
return nil
|
|
|
|
})
|
2018-04-18 19:36:26 -04:00
|
|
|
|
2018-06-09 13:25:32 -04:00
|
|
|
t := newTracer()
|
|
|
|
ssArr := []*client.SolveStatus{}
|
|
|
|
|
2018-08-06 18:44:28 -04:00
|
|
|
if err := opts.ValidateProgressOutput(options.progress); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-02 14:48:32 -04:00
|
|
|
displayStatus := func(out *os.File, displayCh chan *client.SolveStatus) {
|
2018-06-09 13:25:32 -04:00
|
|
|
var c console.Console
|
2018-08-06 18:44:28 -04:00
|
|
|
// TODO: Handle tty output in non-tty environment.
|
|
|
|
if cons, err := console.ConsoleFromFile(out); err == nil && (options.progress == "auto" || options.progress == "tty") {
|
2018-06-09 13:25:32 -04:00
|
|
|
c = cons
|
2018-04-18 19:36:26 -04:00
|
|
|
}
|
2018-06-09 13:25:32 -04:00
|
|
|
// not using shared context to not disrupt display but let is finish reporting errors
|
|
|
|
eg.Go(func() error {
|
2018-08-11 15:04:13 -04:00
|
|
|
return progressui.DisplaySolveStatus(context.TODO(), "", c, out, displayCh)
|
2018-06-09 13:25:32 -04:00
|
|
|
})
|
|
|
|
}
|
2018-04-18 19:36:26 -04:00
|
|
|
|
2018-06-09 13:25:32 -04:00
|
|
|
if options.quiet {
|
2018-04-19 13:07:27 -04:00
|
|
|
eg.Go(func() error {
|
2018-06-09 13:25:32 -04:00
|
|
|
// TODO: make sure t.displayCh closes
|
|
|
|
for ss := range t.displayCh {
|
|
|
|
ssArr = append(ssArr, ss)
|
|
|
|
}
|
|
|
|
<-done
|
|
|
|
// TODO: verify that finalErr is indeed set when error occurs
|
|
|
|
if finalErr != nil {
|
|
|
|
displayCh := make(chan *client.SolveStatus)
|
|
|
|
go func() {
|
|
|
|
for _, ss := range ssArr {
|
|
|
|
displayCh <- ss
|
|
|
|
}
|
|
|
|
close(displayCh)
|
|
|
|
}()
|
2018-07-02 14:48:32 -04:00
|
|
|
displayStatus(os.Stderr, displayCh)
|
2018-04-19 13:07:27 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2018-06-09 13:25:32 -04:00
|
|
|
} else {
|
2019-04-03 02:23:23 -04:00
|
|
|
displayStatus(os.Stderr, t.displayCh)
|
2018-06-09 13:25:32 -04:00
|
|
|
}
|
|
|
|
defer close(t.displayCh)
|
2018-07-02 14:48:32 -04:00
|
|
|
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
|
|
|
|
imageID := ""
|
|
|
|
writeAux := func(msg jsonmessage.JSONMessage) {
|
|
|
|
if msg.ID == "moby.image.id" {
|
|
|
|
var result types.BuildResult
|
|
|
|
if err := json.Unmarshal(*msg.Aux, &result); err != nil {
|
|
|
|
fmt.Fprintf(dockerCli.Err(), "failed to parse aux message: %v", err)
|
|
|
|
}
|
|
|
|
imageID = result.ID
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t.write(msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buf, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), writeAux)
|
2018-06-09 13:25:32 -04:00
|
|
|
if err != nil {
|
|
|
|
if jerr, ok := err.(*jsonmessage.JSONError); ok {
|
|
|
|
// If no error code is set, default to 1
|
|
|
|
if jerr.Code == 0 {
|
|
|
|
jerr.Code = 1
|
2018-04-19 13:07:27 -04:00
|
|
|
}
|
2018-06-09 13:25:32 -04:00
|
|
|
return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
2018-04-18 19:36:26 -04:00
|
|
|
}
|
2018-06-09 13:25:32 -04:00
|
|
|
}
|
2018-07-02 14:48:32 -04:00
|
|
|
|
|
|
|
// Everything worked so if -q was provided the output from the daemon
|
|
|
|
// should be just the image ID and we'll print that to stdout.
|
|
|
|
//
|
|
|
|
// TODO: we may want to use Aux messages with ID "moby.image.id" regardless of options.quiet (i.e. don't send HTTP param q=1)
|
|
|
|
// instead of assuming that output is image ID if options.quiet.
|
2019-04-03 02:23:23 -04:00
|
|
|
if options.quiet && !stdoutUsed {
|
2018-07-02 14:48:32 -04:00
|
|
|
imageID = buf.String()
|
|
|
|
fmt.Fprint(dockerCli.Out(), imageID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if options.imageIDFile != "" {
|
|
|
|
if imageID == "" {
|
|
|
|
return errors.Errorf("cannot write %s because server did not provide an image ID", options.imageIDFile)
|
|
|
|
}
|
|
|
|
imageID = strings.TrimSpace(imageID)
|
|
|
|
if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil {
|
|
|
|
return errors.Wrap(err, "cannot write image ID file")
|
|
|
|
}
|
|
|
|
}
|
2018-06-13 18:35:15 -04:00
|
|
|
return err
|
2018-04-18 19:36:26 -04:00
|
|
|
}
|
|
|
|
|
2019-04-03 02:23:23 -04:00
|
|
|
func resetUIDAndGID(_ string, s *fsutiltypes.Stat) bool {
|
2018-06-13 18:35:15 -04:00
|
|
|
s.Uid = 0
|
|
|
|
s.Gid = 0
|
2018-04-18 19:36:26 -04:00
|
|
|
return true
|
|
|
|
}
|
2018-04-19 13:07:27 -04:00
|
|
|
|
|
|
|
type tracer struct {
|
|
|
|
displayCh chan *client.SolveStatus
|
|
|
|
}
|
|
|
|
|
|
|
|
func newTracer() *tracer {
|
|
|
|
return &tracer{
|
|
|
|
displayCh: make(chan *client.SolveStatus),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *tracer) write(msg jsonmessage.JSONMessage) {
|
|
|
|
var resp controlapi.StatusResponse
|
|
|
|
|
2018-06-09 16:48:13 -04:00
|
|
|
if msg.ID != "moby.buildkit.trace" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-04-19 13:07:27 -04:00
|
|
|
var dt []byte
|
2018-06-13 18:35:15 -04:00
|
|
|
// ignoring all messages that are not understood
|
2018-04-19 13:07:27 -04:00
|
|
|
if err := json.Unmarshal(*msg.Aux, &dt); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := (&resp).Unmarshal(dt); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s := client.SolveStatus{}
|
|
|
|
for _, v := range resp.Vertexes {
|
|
|
|
s.Vertexes = append(s.Vertexes, &client.Vertex{
|
|
|
|
Digest: v.Digest,
|
|
|
|
Inputs: v.Inputs,
|
|
|
|
Name: v.Name,
|
|
|
|
Started: v.Started,
|
|
|
|
Completed: v.Completed,
|
|
|
|
Error: v.Error,
|
|
|
|
Cached: v.Cached,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
for _, v := range resp.Statuses {
|
|
|
|
s.Statuses = append(s.Statuses, &client.VertexStatus{
|
|
|
|
ID: v.ID,
|
|
|
|
Vertex: v.Vertex,
|
|
|
|
Name: v.Name,
|
|
|
|
Total: v.Total,
|
|
|
|
Current: v.Current,
|
|
|
|
Timestamp: v.Timestamp,
|
|
|
|
Started: v.Started,
|
|
|
|
Completed: v.Completed,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
for _, v := range resp.Logs {
|
|
|
|
s.Logs = append(s.Logs, &client.VertexLog{
|
|
|
|
Vertex: v.Vertex,
|
|
|
|
Stream: int(v.Stream),
|
|
|
|
Data: v.Msg,
|
|
|
|
Timestamp: v.Timestamp,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
t.displayCh <- &s
|
|
|
|
}
|
2018-08-11 15:04:13 -04:00
|
|
|
|
|
|
|
func parseSecretSpecs(sl []string) (session.Attachable, error) {
|
|
|
|
fs := make([]secretsprovider.FileSource, 0, len(sl))
|
|
|
|
for _, v := range sl {
|
|
|
|
s, err := parseSecret(v)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fs = append(fs, *s)
|
|
|
|
}
|
|
|
|
store, err := secretsprovider.NewFileStore(fs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return secretsprovider.NewSecretProvider(store), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseSecret(value string) (*secretsprovider.FileSource, error) {
|
|
|
|
csvReader := csv.NewReader(strings.NewReader(value))
|
|
|
|
fields, err := csvReader.Read()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to parse csv secret")
|
|
|
|
}
|
|
|
|
|
|
|
|
fs := secretsprovider.FileSource{}
|
|
|
|
|
|
|
|
for _, field := range fields {
|
|
|
|
parts := strings.SplitN(field, "=", 2)
|
|
|
|
key := strings.ToLower(parts[0])
|
|
|
|
|
|
|
|
if len(parts) != 2 {
|
|
|
|
return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field)
|
|
|
|
}
|
|
|
|
|
|
|
|
value := parts[1]
|
|
|
|
switch key {
|
|
|
|
case "type":
|
|
|
|
if value != "file" {
|
|
|
|
return nil, errors.Errorf("unsupported secret type %q", value)
|
|
|
|
}
|
|
|
|
case "id":
|
|
|
|
fs.ID = value
|
|
|
|
case "source", "src":
|
|
|
|
fs.FilePath = value
|
|
|
|
default:
|
|
|
|
return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return &fs, nil
|
|
|
|
}
|
2018-10-05 05:35:09 -04:00
|
|
|
|
|
|
|
func parseSSHSpecs(sl []string) (session.Attachable, error) {
|
|
|
|
configs := make([]sshprovider.AgentConfig, 0, len(sl))
|
|
|
|
for _, v := range sl {
|
|
|
|
c, err := parseSSH(v)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
configs = append(configs, *c)
|
|
|
|
}
|
|
|
|
return sshprovider.NewSSHAgentProvider(configs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseSSH(value string) (*sshprovider.AgentConfig, error) {
|
|
|
|
parts := strings.SplitN(value, "=", 2)
|
|
|
|
cfg := sshprovider.AgentConfig{
|
|
|
|
ID: parts[0],
|
|
|
|
}
|
|
|
|
if len(parts) > 1 {
|
|
|
|
cfg.Paths = strings.Split(parts[1], ",")
|
|
|
|
}
|
|
|
|
return &cfg, nil
|
|
|
|
}
|