From 0f9764291505177a37fae50d2a6328b2c7d8ff5e Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 19 Apr 2018 10:07:27 -0700 Subject: [PATCH] build: basic buildkit progress support Signed-off-by: Tonis Tiigi --- cli/command/image/build.go | 5 +- cli/command/image/build_buildkit.go | 120 +- cli/command/image/trust.go | 4 +- vendor.conf | 5 +- vendor/github.com/containerd/console/LICENSE | 201 + .../github.com/containerd/console/README.md | 17 + .../github.com/containerd/console/console.go | 78 + .../containerd/console/console_linux.go | 271 + .../containerd/console/console_unix.go | 158 + .../containerd/console/console_windows.go | 216 + .../containerd/console/tc_darwin.go | 53 + .../containerd/console/tc_freebsd.go | 45 + .../github.com/containerd/console/tc_linux.go | 49 + .../containerd/console/tc_openbsd_cgo.go | 51 + .../containerd/console/tc_openbsd_nocgo.go | 47 + .../containerd/console/tc_solaris_cgo.go | 51 + .../containerd/console/tc_solaris_nocgo.go | 47 + .../github.com/containerd/console/tc_unix.go | 91 + .../docker/docker/api/types/client.go | 16 + .../docker/docker/api/types/types.go | 17 +- .../docker/docker/client/build_cancel.go | 21 + .../github.com/docker/docker/client/hijack.go | 105 +- .../docker/docker/client/image_build.go | 4 + .../docker/docker/client/interface.go | 1 + .../docker/docker/client/tlsconfig_clone.go | 11 - .../docker/client/tlsconfig_clone_go17.go | 33 - vendor/github.com/docker/docker/vendor.conf | 17 +- vendor/github.com/google/shlex/COPYING | 202 + vendor/github.com/google/shlex/README | 2 + vendor/github.com/google/shlex/shlex.go | 417 ++ .../api/services/control/control.pb.go | 4871 +++++++++++++++++ .../api/services/control/control.proto | 121 + .../buildkit/api/services/control/generate.go | 3 + .../github.com/moby/buildkit/client/client.go | 132 + .../moby/buildkit/client/client_unix.go | 19 + .../moby/buildkit/client/client_windows.go | 24 + .../moby/buildkit/client/diskusage.go | 73 + .../moby/buildkit/client/exporters.go | 8 + .../github.com/moby/buildkit/client/graph.go | 45 + .../moby/buildkit/client/llb/exec.go | 372 ++ .../moby/buildkit/client/llb/marshal.go | 60 + .../moby/buildkit/client/llb/meta.go | 152 + .../moby/buildkit/client/llb/resolver.go | 17 + .../moby/buildkit/client/llb/source.go | 344 ++ .../moby/buildkit/client/llb/state.go | 312 ++ .../github.com/moby/buildkit/client/prune.go | 50 + .../github.com/moby/buildkit/client/solve.go | 251 + .../moby/buildkit/client/workers.go | 49 + .../moby/buildkit/session/grpchijack/dial.go | 156 + .../buildkit/session/grpchijack/hijack.go | 14 + .../moby/buildkit/solver/pb/attr.go | 15 + .../moby/buildkit/solver/pb/const.go | 12 + .../moby/buildkit/solver/pb/generate.go | 3 + .../moby/buildkit/solver/pb/ops.pb.go | 4490 +++++++++++++++ .../moby/buildkit/solver/pb/ops.proto | 136 + .../util/appdefaults/appdefaults_unix.go | 55 + .../util/appdefaults/appdefaults_windows.go | 18 + .../util/progress/progressui/display.go | 310 ++ .../moby/buildkit/util/system/path_unix.go | 14 + .../moby/buildkit/util/system/path_windows.go | 37 + .../buildkit/util/system/seccomp_linux.go | 29 + .../buildkit/util/system/seccomp_nolinux.go | 7 + vendor/github.com/tonistiigi/units/LICENSE | 21 + vendor/github.com/tonistiigi/units/bytes.go | 117 + vendor/github.com/tonistiigi/units/readme.md | 29 + 65 files changed, 14551 insertions(+), 170 deletions(-) create mode 100644 vendor/github.com/containerd/console/LICENSE create mode 100644 vendor/github.com/containerd/console/README.md create mode 100644 vendor/github.com/containerd/console/console.go create mode 100644 vendor/github.com/containerd/console/console_linux.go create mode 100644 vendor/github.com/containerd/console/console_unix.go create mode 100644 vendor/github.com/containerd/console/console_windows.go create mode 100644 vendor/github.com/containerd/console/tc_darwin.go create mode 100644 vendor/github.com/containerd/console/tc_freebsd.go create mode 100644 vendor/github.com/containerd/console/tc_linux.go create mode 100644 vendor/github.com/containerd/console/tc_openbsd_cgo.go create mode 100644 vendor/github.com/containerd/console/tc_openbsd_nocgo.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_cgo.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_nocgo.go create mode 100644 vendor/github.com/containerd/console/tc_unix.go create mode 100644 vendor/github.com/docker/docker/client/build_cancel.go delete mode 100644 vendor/github.com/docker/docker/client/tlsconfig_clone.go delete mode 100644 vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go create mode 100644 vendor/github.com/google/shlex/COPYING create mode 100644 vendor/github.com/google/shlex/README create mode 100644 vendor/github.com/google/shlex/shlex.go create mode 100644 vendor/github.com/moby/buildkit/api/services/control/control.pb.go create mode 100644 vendor/github.com/moby/buildkit/api/services/control/control.proto create mode 100644 vendor/github.com/moby/buildkit/api/services/control/generate.go create mode 100644 vendor/github.com/moby/buildkit/client/client.go create mode 100644 vendor/github.com/moby/buildkit/client/client_unix.go create mode 100644 vendor/github.com/moby/buildkit/client/client_windows.go create mode 100644 vendor/github.com/moby/buildkit/client/diskusage.go create mode 100644 vendor/github.com/moby/buildkit/client/exporters.go create mode 100644 vendor/github.com/moby/buildkit/client/graph.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/exec.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/marshal.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/meta.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/resolver.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/source.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/state.go create mode 100644 vendor/github.com/moby/buildkit/client/prune.go create mode 100644 vendor/github.com/moby/buildkit/client/solve.go create mode 100644 vendor/github.com/moby/buildkit/client/workers.go create mode 100644 vendor/github.com/moby/buildkit/session/grpchijack/dial.go create mode 100644 vendor/github.com/moby/buildkit/session/grpchijack/hijack.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/attr.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/const.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/generate.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/ops.pb.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/ops.proto create mode 100644 vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go create mode 100644 vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go create mode 100644 vendor/github.com/moby/buildkit/util/progress/progressui/display.go create mode 100644 vendor/github.com/moby/buildkit/util/system/path_unix.go create mode 100644 vendor/github.com/moby/buildkit/util/system/path_windows.go create mode 100644 vendor/github.com/moby/buildkit/util/system/seccomp_linux.go create mode 100644 vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go create mode 100644 vendor/github.com/tonistiigi/units/LICENSE create mode 100644 vendor/github.com/tonistiigi/units/bytes.go create mode 100644 vendor/github.com/tonistiigi/units/readme.md diff --git a/cli/command/image/build.go b/cli/command/image/build.go index ad7dad4176..b8dd40894e 100644 --- a/cli/command/image/build.go +++ b/cli/command/image/build.go @@ -396,6 +396,7 @@ func runBuild(dockerCli command.Cli, options buildOptions) error { Target: options.target, RemoteContext: remote, Platform: options.platform, + Version: types.BuilderV1, } if s != nil { @@ -420,9 +421,9 @@ func runBuild(dockerCli command.Cli, options buildOptions) error { defer response.Body.Close() imageID := "" - aux := func(m jsonmessage.JSONMessage) { + aux := func(msg jsonmessage.JSONMessage) { var result types.BuildResult - if err := json.Unmarshal(*m.Aux, &result); err != nil { + if err := json.Unmarshal(*msg.Aux, &result); err != nil { fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err) } else { imageID = result.ID diff --git a/cli/command/image/build_buildkit.go b/cli/command/image/build_buildkit.go index 1dfac64336..0ac3f0d5c6 100644 --- a/cli/command/image/build_buildkit.go +++ b/cli/command/image/build_buildkit.go @@ -1,18 +1,27 @@ package image import ( + "context" + "encoding/json" "io" "os" "path/filepath" + "github.com/containerd/console" + "github.com/docker/cli/cli" "github.com/docker/cli/cli/command" "github.com/docker/cli/opts" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/urlutil" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client" "github.com/moby/buildkit/session/auth/authprovider" "github.com/moby/buildkit/session/filesync" "github.com/moby/buildkit/util/appcontext" + "github.com/moby/buildkit/util/progress/progressui" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" "golang.org/x/sync/errgroup" @@ -29,13 +38,13 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error { return errors.Errorf("buildkit not supported by daemon") } - remote := clientSessionRemote - local := false + var remote string + var body io.Reader switch { case options.contextFromStdin(): - return errors.Errorf("stdin not implemented") + body = os.Stdin case isLocalDir(options.context): - local = true + remote = clientSessionRemote case urlutil.IsGitURL(options.context): remote = options.context case urlutil.IsURL(options.context): @@ -51,7 +60,7 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error { // statusContext = opentracing.ContextWithSpan(statusContext, span) // } - if local { + if remote == clientSessionRemote { s.Allow(filesync.NewFSSyncProvider([]filesync.SyncedDir{ { Name: "context", @@ -70,7 +79,7 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error { eg, ctx := errgroup.WithContext(ctx) eg.Go(func() error { - return s.Run(ctx, dockerCli.Client().DialSession) + return s.Run(context.TODO(), dockerCli.Client().DialSession) }) eg.Go(func() error { @@ -78,6 +87,8 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error { s.Close() }() + buildID := stringid.GenerateRandomID() + configFile := dockerCli.ConfigFile() buildOptions := types.ImageBuildOptions{ Memory: options.memory.Value(), @@ -109,16 +120,50 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error { Target: options.target, RemoteContext: remote, Platform: options.platform, - SessionID: "buildkit:" + s.ID(), + SessionID: s.ID(), + Version: types.BuilderBuildKit, + BuildID: buildID, } - response, err := dockerCli.Client().ImageBuild(ctx, nil, buildOptions) + response, err := dockerCli.Client().ImageBuild(context.Background(), body, buildOptions) if err != nil { return err } defer response.Body.Close() - if _, err := io.Copy(os.Stdout, response.Body); err != nil { + done := make(chan struct{}) + defer close(done) + eg.Go(func() error { + select { + case <-ctx.Done(): + return dockerCli.Client().BuildCancel(context.TODO(), buildID) + case <-done: + } + return nil + }) + + t := newTracer() + var auxCb func(jsonmessage.JSONMessage) + if c, err := console.ConsoleFromFile(os.Stderr); err == nil { + // not using shared context to not disrupt display but let is finish reporting errors + auxCb = t.write + eg.Go(func() error { + return progressui.DisplaySolveStatus(context.TODO(), c, t.displayCh) + }) + defer close(t.displayCh) + } + err = jsonmessage.DisplayJSONMessagesStream(response.Body, os.Stdout, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), auxCb) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + // if options.quiet { + // fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) + // } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } return err } @@ -133,3 +178,60 @@ func resetUIDAndGID(s *fsutil.Stat) bool { s.Gid = uint32(0) return true } + +type tracer struct { + displayCh chan *client.SolveStatus +} + +func newTracer() *tracer { + return &tracer{ + displayCh: make(chan *client.SolveStatus), + } +} + +func (t *tracer) write(msg jsonmessage.JSONMessage) { + var resp controlapi.StatusResponse + + var dt []byte + if err := json.Unmarshal(*msg.Aux, &dt); err != nil { + return + } + if err := (&resp).Unmarshal(dt); err != nil { + return + } + + s := client.SolveStatus{} + for _, v := range resp.Vertexes { + s.Vertexes = append(s.Vertexes, &client.Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + }) + } + for _, v := range resp.Statuses { + s.Statuses = append(s.Statuses, &client.VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Total: v.Total, + Current: v.Current, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range resp.Logs { + s.Logs = append(s.Logs, &client.VertexLog{ + Vertex: v.Vertex, + Stream: int(v.Stream), + Data: v.Msg, + Timestamp: v.Timestamp, + }) + } + + t.displayCh <- &s +} diff --git a/cli/command/image/trust.go b/cli/command/image/trust.go index 5d1bc04641..230420d862 100644 --- a/cli/command/image/trust.go +++ b/cli/command/image/trust.go @@ -49,7 +49,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository // Count the times of calling for handleTarget, // if it is called more that once, that should be considered an error in a trusted push. cnt := 0 - handleTarget := func(m jsonmessage.JSONMessage) { + handleTarget := func(msg jsonmessage.JSONMessage) { cnt++ if cnt > 1 { // handleTarget should only be called once. This will be treated as an error. @@ -57,7 +57,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository } var pushResult types.PushResult - err := json.Unmarshal(*m.Aux, &pushResult) + err := json.Unmarshal(*msg.Aux, &pushResult) if err == nil && pushResult.Tag != "" { if dgst, err := digest.Parse(pushResult.Digest); err == nil { h, err := hex.DecodeString(dgst.Hex()) diff --git a/vendor.conf b/vendor.conf index 55296c4c44..a925ba3b54 100755 --- a/vendor.conf +++ b/vendor.conf @@ -6,7 +6,7 @@ github.com/coreos/etcd v3.2.1 github.com/cpuguy83/go-md2man v1.0.8 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5 -github.com/docker/docker 162ba6016def672690ee4a1f3978368853a1e149 +github.com/docker/docker experimental-buildkit https://github.com/tonistiigi/docker github.com/docker/docker-credential-helpers 3c90bd29a46b943b2a9842987b58fb91a7c1819b # the docker/go package contains a customized version of canonical/json # and is used by Notary. The package is periodically rebased on current Go versions. @@ -91,3 +91,6 @@ k8s.io/client-go kubernetes-1.8.2 k8s.io/kubernetes v1.8.2 k8s.io/kube-openapi 61b46af70dfed79c6d24530cd23b41440a7f22a5 vbom.ml/util 928aaa586d7718c70f4090ddf83f2b34c16fdc8d +github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925 +github.com/tonistiigi/units 29de085e9400559bd68aea2e7bc21566e7b8281d +github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/containerd/console/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md new file mode 100644 index 0000000000..4c56d9d134 --- /dev/null +++ b/vendor/github.com/containerd/console/README.md @@ -0,0 +1,17 @@ +# console + +[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console) + +Golang package for dealing with consoles. Light on deps and a simple API. + +## Modifying the current process + +```go +current := console.Current() +defer current.Reset() + +if err := current.SetRaw(); err != nil { +} +ws, err := current.Size() +current.Resize(ws) +``` diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go new file mode 100644 index 0000000000..c187a9b412 --- /dev/null +++ b/vendor/github.com/containerd/console/console.go @@ -0,0 +1,78 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "errors" + "io" + "os" +) + +var ErrNotAConsole = errors.New("provided file is not a console") + +type Console interface { + io.Reader + io.Writer + io.Closer + + // Resize resizes the console to the provided window size + Resize(WinSize) error + // ResizeFrom resizes the calling console to the size of the + // provided console + ResizeFrom(Console) error + // SetRaw sets the console in raw mode + SetRaw() error + // DisableEcho disables echo on the console + DisableEcho() error + // Reset restores the console to its orignal state + Reset() error + // Size returns the window size of the console + Size() (WinSize, error) + // Fd returns the console's file descriptor + Fd() uintptr + // Name returns the console's file name + Name() string +} + +// WinSize specifies the window size of the console +type WinSize struct { + // Height of the console + Height uint16 + // Width of the console + Width uint16 + x uint16 + y uint16 +} + +// Current returns the current processes console +func Current() Console { + c, err := ConsoleFromFile(os.Stdin) + if err != nil { + // stdin should always be a console for the design + // of this function + panic(err) + } + return c +} + +// ConsoleFromFile returns a console using the provided file +func ConsoleFromFile(f *os.File) (Console, error) { + if err := checkConsole(f); err != nil { + return nil, err + } + return newMaster(f) +} diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go new file mode 100644 index 0000000000..312bce17d7 --- /dev/null +++ b/vendor/github.com/containerd/console/console_linux.go @@ -0,0 +1,271 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "io" + "os" + "sync" + + "golang.org/x/sys/unix" +) + +const ( + maxEvents = 128 +) + +// Epoller manages multiple epoll consoles using edge-triggered epoll api so we +// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP. +// For more details, see: +// - https://github.com/systemd/systemd/pull/4262 +// - https://github.com/moby/moby/issues/27202 +// +// Example usage of Epoller and EpollConsole can be as follow: +// +// epoller, _ := NewEpoller() +// epollConsole, _ := epoller.Add(console) +// go epoller.Wait() +// var ( +// b bytes.Buffer +// wg sync.WaitGroup +// ) +// wg.Add(1) +// go func() { +// io.Copy(&b, epollConsole) +// wg.Done() +// }() +// // perform I/O on the console +// epollConsole.Shutdown(epoller.CloseConsole) +// wg.Wait() +// epollConsole.Close() +type Epoller struct { + efd int + mu sync.Mutex + fdMapping map[int]*EpollConsole +} + +// NewEpoller returns an instance of epoller with a valid epoll fd. +func NewEpoller() (*Epoller, error) { + efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if err != nil { + return nil, err + } + return &Epoller{ + efd: efd, + fdMapping: make(map[int]*EpollConsole), + }, nil +} + +// Add creates a epoll console based on the provided console. The console will +// be registered with EPOLLET (i.e. using edge-triggered notification) and its +// file descriptor will be set to non-blocking mode. After this, user should use +// the return console to perform I/O. +func (e *Epoller) Add(console Console) (*EpollConsole, error) { + sysfd := int(console.Fd()) + // Set sysfd to non-blocking mode + if err := unix.SetNonblock(sysfd, true); err != nil { + return nil, err + } + + ev := unix.EpollEvent{ + Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET, + Fd: int32(sysfd), + } + if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil { + return nil, err + } + ef := &EpollConsole{ + Console: console, + sysfd: sysfd, + readc: sync.NewCond(&sync.Mutex{}), + writec: sync.NewCond(&sync.Mutex{}), + } + e.mu.Lock() + e.fdMapping[sysfd] = ef + e.mu.Unlock() + return ef, nil +} + +// Wait starts the loop to wait for its consoles' notifications and signal +// appropriate console that it can perform I/O. +func (e *Epoller) Wait() error { + events := make([]unix.EpollEvent, maxEvents) + for { + n, err := unix.EpollWait(e.efd, events, -1) + if err != nil { + // EINTR: The call was interrupted by a signal handler before either + // any of the requested events occurred or the timeout expired + if err == unix.EINTR { + continue + } + return err + } + for i := 0; i < n; i++ { + ev := &events[i] + // the console is ready to be read from + if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalRead() + } + } + // the console is ready to be written to + if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalWrite() + } + } + } + } +} + +// Close unregister the console's file descriptor from epoll interface +func (e *Epoller) CloseConsole(fd int) error { + e.mu.Lock() + defer e.mu.Unlock() + delete(e.fdMapping, fd) + return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{}) +} + +func (e *Epoller) getConsole(sysfd int) *EpollConsole { + e.mu.Lock() + f := e.fdMapping[sysfd] + e.mu.Unlock() + return f +} + +// Close the epoll fd +func (e *Epoller) Close() error { + return unix.Close(e.efd) +} + +// EpollConsole acts like a console but register its file descriptor with a +// epoll fd and uses epoll API to perform I/O. +type EpollConsole struct { + Console + readc *sync.Cond + writec *sync.Cond + sysfd int + closed bool +} + +// Read reads up to len(p) bytes into p. It returns the number of bytes read +// (0 <= n <= len(p)) and any error encountered. +// +// If the console's read returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Read(p []byte) (n int, err error) { + var read int + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + for { + read, err = ec.Console.Read(p[n:]) + n += read + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. Unless we didnt read anything and the + // console is already marked as closed then we should exit + if hangup && !(n == 0 && len(p) > 0 && ec.closed) { + ec.readc.Wait() + continue + } + } + break + } + // if we didnt read anything then return io.EOF to end gracefully + if n == 0 && len(p) > 0 && err == nil { + err = io.EOF + } + // signal for others that we finished the read + ec.readc.Signal() + return n, err +} + +// Writes len(p) bytes from p to the console. It returns the number of bytes +// written from p (0 <= n <= len(p)) and any error encountered that caused +// the write to stop early. +// +// If writes to the console returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Write(p []byte) (n int, err error) { + var written int + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + for { + written, err = ec.Console.Write(p[n:]) + n += written + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. + if hangup { + ec.writec.Wait() + continue + } + } + // unrecoverable error, break the loop and return the error + break + } + if n < len(p) && err == nil { + err = io.ErrShortWrite + } + // signal for others that we finished the write + ec.writec.Signal() + return n, err +} + +// Close closed the file descriptor and signal call waiters for this fd. +// It accepts a callback which will be called with the console's fd. The +// callback typically will be used to do further cleanup such as unregister the +// console's fd from the epoll interface. +// User should call Shutdown and wait for all I/O operation to be finished +// before closing the console. +func (ec *EpollConsole) Shutdown(close func(int) error) error { + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + + ec.readc.Broadcast() + ec.writec.Broadcast() + ec.closed = true + return close(ec.sysfd) +} + +// signalRead signals that the console is readable. +func (ec *EpollConsole) signalRead() { + ec.readc.Signal() +} + +// signalWrite signals that the console is writable. +func (ec *EpollConsole) signalWrite() { + ec.writec.Signal() +} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go new file mode 100644 index 0000000000..a4a8d1267b --- /dev/null +++ b/vendor/github.com/containerd/console/console_unix.go @@ -0,0 +1,158 @@ +// +build darwin freebsd linux openbsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// NewPty creates a new pty pair +// The master is returned as the first console and a string +// with the path to the pty slave is returned as the second +func NewPty() (Console, string, error) { + f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + slave, err := ptsname(f) + if err != nil { + return nil, "", err + } + if err := unlockpt(f); err != nil { + return nil, "", err + } + m, err := newMaster(f) + if err != nil { + return nil, "", err + } + return m, slave, nil +} + +type master struct { + f *os.File + original *unix.Termios +} + +func (m *master) Read(b []byte) (int, error) { + return m.f.Read(b) +} + +func (m *master) Write(b []byte) (int, error) { + return m.f.Write(b) +} + +func (m *master) Close() error { + return m.f.Close() +} + +func (m *master) Resize(ws WinSize) error { + return tcswinsz(m.f.Fd(), ws) +} + +func (m *master) ResizeFrom(c Console) error { + ws, err := c.Size() + if err != nil { + return err + } + return m.Resize(ws) +} + +func (m *master) Reset() error { + if m.original == nil { + return nil + } + return tcset(m.f.Fd(), m.original) +} + +func (m *master) getCurrent() (unix.Termios, error) { + var termios unix.Termios + if err := tcget(m.f.Fd(), &termios); err != nil { + return unix.Termios{}, err + } + return termios, nil +} + +func (m *master) SetRaw() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState = cfmakeraw(rawState) + rawState.Oflag = rawState.Oflag | unix.OPOST + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) DisableEcho() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState.Lflag = rawState.Lflag &^ unix.ECHO + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) Size() (WinSize, error) { + return tcgwinsz(m.f.Fd()) +} + +func (m *master) Fd() uintptr { + return m.f.Fd() +} + +func (m *master) Name() string { + return m.f.Name() +} + +// checkConsole checks if the provided file is a console +func checkConsole(f *os.File) error { + var termios unix.Termios + if tcget(f.Fd(), &termios) != nil { + return ErrNotAConsole + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + m := &master{ + f: f, + } + t, err := m.getCurrent() + if err != nil { + return nil, err + } + m.original = &t + return m, nil +} + +// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts normally. In particular, a not-very-well-known default of +// Linux unix98 ptys is that they have +onlcr by default. While this isn't a +// problem for terminal emulators, because we relay data from the terminal we +// also relay that funky line discipline. +func ClearONLCR(fd uintptr) error { + return setONLCR(fd, false) +} + +// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts as intended for a terminal emulator. +func SetONLCR(fd uintptr) error { + return setONLCR(fd, true) +} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go new file mode 100644 index 0000000000..7aa726f995 --- /dev/null +++ b/vendor/github.com/containerd/console/console_windows.go @@ -0,0 +1,216 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +var ( + vtInputSupported bool + ErrNotImplemented = errors.New("not implemented") +) + +func (m *master) initStdios() { + m.in = windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { + // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + windows.SetConsoleMode(m.in, m.inMode) + } else { + fmt.Printf("failed to get console mode for stdin: %v\n", err) + } + + m.out = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { + if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.out, m.outMode) + } + } else { + fmt.Printf("failed to get console mode for stdout: %v\n", err) + } + + m.err = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { + if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.err, m.errMode) + } + } else { + fmt.Printf("failed to get console mode for stderr: %v\n", err) + } +} + +type master struct { + in windows.Handle + inMode uint32 + + out windows.Handle + outMode uint32 + + err windows.Handle + errMode uint32 +} + +func (m *master) SetRaw() error { + if err := makeInputRaw(m.in, m.inMode); err != nil { + return err + } + + // Set StdOut and StdErr to raw mode, we ignore failures since + // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of + // Windows. + + windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + return nil +} + +func (m *master) Reset() error { + for _, s := range []struct { + fd windows.Handle + mode uint32 + }{ + {m.in, m.inMode}, + {m.out, m.outMode}, + {m.err, m.errMode}, + } { + if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { + return errors.Wrap(err, "unable to restore console mode") + } + } + + return nil +} + +func (m *master) Size() (WinSize, error) { + var info windows.ConsoleScreenBufferInfo + err := windows.GetConsoleScreenBufferInfo(m.out, &info) + if err != nil { + return WinSize{}, errors.Wrap(err, "unable to get console info") + } + + winsize := WinSize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +func (m *master) Resize(ws WinSize) error { + return ErrNotImplemented +} + +func (m *master) ResizeFrom(c Console) error { + return ErrNotImplemented +} + +func (m *master) DisableEcho() error { + mode := m.inMode &^ windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT + mode |= windows.ENABLE_LINE_INPUT + + if err := windows.SetConsoleMode(m.in, mode); err != nil { + return errors.Wrap(err, "unable to set console to disable echo") + } + + return nil +} + +func (m *master) Close() error { + return nil +} + +func (m *master) Read(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Write(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Fd() uintptr { + return uintptr(m.in) +} + +// on windows, console can only be made from os.Std{in,out,err}, hence there +// isnt a single name here we can use. Return a dummy "console" value in this +// case should be sufficient. +func (m *master) Name() string { + return "console" +} + +// makeInputRaw puts the terminal (Windows Console) connected to the given +// file descriptor into raw mode +func makeInputRaw(fd windows.Handle, mode uint32) error { + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + if err := windows.SetConsoleMode(fd, mode); err != nil { + return errors.Wrap(err, "unable to set console to raw mode") + } + + return nil +} + +func checkConsole(f *os.File) error { + var mode uint32 + if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil { + return err + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + if f != os.Stdin && f != os.Stdout && f != os.Stderr { + return nil, errors.New("creating a console from a file is not supported on windows") + } + m := &master{} + m.initStdios() + return m, nil +} diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go new file mode 100644 index 0000000000..b0128abb0c --- /dev/null +++ b/vendor/github.com/containerd/console/tc_darwin.go @@ -0,0 +1,53 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go new file mode 100644 index 0000000000..04583a6156 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_freebsd.go @@ -0,0 +1,45 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +// This does not exist on FreeBSD, it does not allocate controlling terminals on open +func unlockpt(f *os.File) error { + return nil +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go new file mode 100644 index 0000000000..1bdd68e6d5 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_linux.go @@ -0,0 +1,49 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 { + return err + } + return nil +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + var u uint32 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", u), nil +} diff --git a/vendor/github.com/containerd/console/tc_openbsd_cgo.go b/vendor/github.com/containerd/console/tc_openbsd_cgo.go new file mode 100644 index 0000000000..f0cec06a72 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_openbsd_cgo.go @@ -0,0 +1,51 @@ +// +build openbsd,cgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +//#include +import "C" + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + ptspath, err := C.ptsname(C.int(f.Fd())) + if err != nil { + return "", err + } + return C.GoString(ptspath), nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + if _, err := C.grantpt(C.int(f.Fd())); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go new file mode 100644 index 0000000000..daccce2058 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go @@ -0,0 +1,47 @@ +// +build openbsd,!cgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// +// Implementing the functions below requires cgo support. Non-cgo stubs +// versions are defined below to enable cross-compilation of source code +// that depends on these functions, but the resultant cross-compiled +// binaries cannot actually be used. If the stub function(s) below are +// actually invoked they will display an error message and cause the +// calling process to exit. +// + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +func ptsname(f *os.File) (string, error) { + panic("ptsname() support requires cgo.") +} + +func unlockpt(f *os.File) error { + panic("unlockpt() support requires cgo.") +} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go new file mode 100644 index 0000000000..e36a68edd1 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_cgo.go @@ -0,0 +1,51 @@ +// +build solaris,cgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +//#include +import "C" + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + ptspath, err := C.ptsname(C.int(f.Fd())) + if err != nil { + return "", err + } + return C.GoString(ptspath), nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + if _, err := C.grantpt(C.int(f.Fd())); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go new file mode 100644 index 0000000000..eb0bd2c36b --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_nocgo.go @@ -0,0 +1,47 @@ +// +build solaris,!cgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// +// Implementing the functions below requires cgo support. Non-cgo stubs +// versions are defined below to enable cross-compilation of source code +// that depends on these functions, but the resultant cross-compiled +// binaries cannot actually be used. If the stub function(s) below are +// actually invoked they will display an error message and cause the +// calling process to exit. +// + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ptsname(f *os.File) (string, error) { + panic("ptsname() support requires cgo.") +} + +func unlockpt(f *os.File) error { + panic("unlockpt() support requires cgo.") +} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go new file mode 100644 index 0000000000..7ae773c53e --- /dev/null +++ b/vendor/github.com/containerd/console/tc_unix.go @@ -0,0 +1,91 @@ +// +build darwin freebsd linux openbsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *unix.Termios) error { + termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet) + if err != nil { + return err + } + *p = *termios + return nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), cmdTcSet, p) +} + +func tcgwinsz(fd uintptr) (WinSize, error) { + var ws WinSize + + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + if err != nil { + return ws, err + } + + // Translate from unix.Winsize to console.WinSize + ws.Height = uws.Row + ws.Width = uws.Col + ws.x = uws.Xpixel + ws.y = uws.Ypixel + return ws, nil +} + +func tcswinsz(fd uintptr, ws WinSize) error { + // Translate from console.WinSize to unix.Winsize + + var uws unix.Winsize + uws.Row = ws.Height + uws.Col = ws.Width + uws.Xpixel = ws.x + uws.Ypixel = ws.y + + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws) +} + +func setONLCR(fd uintptr, enable bool) error { + var termios unix.Termios + if err := tcget(fd, &termios); err != nil { + return err + } + if enable { + // Set +onlcr so we can act like a real terminal + termios.Oflag |= unix.ONLCR + } else { + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + } + return tcset(fd, &termios) +} + +func cfmakeraw(t unix.Termios) unix.Termios { + t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + t.Oflag &^= unix.OPOST + t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + t.Cflag &^= (unix.CSIZE | unix.PARENB) + t.Cflag &^= unix.CS8 + t.Cc[unix.VMIN] = 1 + t.Cc[unix.VTIME] = 0 + + return t +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 3d2e057c9a..3df8d23368 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -181,8 +181,24 @@ type ImageBuildOptions struct { Target string SessionID string Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string } +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit = "2" +) + // ImageBuildResponse holds information // returned by a server after building // an image. diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index 729f4eb6c4..06c0ca3a69 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -512,7 +512,8 @@ type DiskUsage struct { Images []*ImageSummary Containers []*Container Volumes []*Volume - BuilderSize int64 + BuildCache []*BuildCache + BuilderSize int64 // deprecated } // ContainersPruneReport contains the response for Engine API: @@ -585,3 +586,17 @@ type PushResult struct { type BuildResult struct { ID string } + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Mutable bool + InUse bool + Size int64 + + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int + Parent string + Description string +} diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 0000000000..4cf8c980a9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// BuildCancel requests the daemon to cancel ongoing build request +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + if err != nil { + return err + } + defer ensureReaderClosed(serverResp) + + return nil +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 2b14831fd2..35f5dd86dc 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -9,7 +9,6 @@ import ( "net/http" "net/http/httputil" "net/url" - "strings" "time" "github.com/docker/docker/api/types" @@ -17,21 +16,6 @@ import ( "github.com/pkg/errors" ) -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - // postHijacked sends a POST request and hijacks the connection. func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { bodyEncoded, err := encodeData(body) @@ -54,96 +38,9 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err } -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := time.Until(dialer.Deadline) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - config = tlsConfigClone(config) - config.ServerName = hostname - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) + return tls.Dial(proto, addr, tlsConfig) } if proto == "npipe" { return sockets.DialPipe(addr, 32*time.Second) diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 6721460316..dff19b989f 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -133,5 +133,9 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur if options.Platform != "" { query.Set("platform", strings.ToLower(options.Platform)) } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) return query, nil } diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 0487a0b9f3..9250c468a6 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -86,6 +86,7 @@ type DistributionAPIClient interface { type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone.go b/vendor/github.com/docker/docker/client/tlsconfig_clone.go deleted file mode 100644 index 88200e92c3..0000000000 --- a/vendor/github.com/docker/docker/client/tlsconfig_clone.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.8 - -package client // import "github.com/docker/docker/client" - -import "crypto/tls" - -// tlsConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func tlsConfigClone(c *tls.Config) *tls.Config { - return c.Clone() -} diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go deleted file mode 100644 index e298542367..0000000000 --- a/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.7,!go1.8 - -package client // import "github.com/docker/docker/client" - -import "crypto/tls" - -// tlsConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func tlsConfigClone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf index 858182b841..4273e14b7c 100644 --- a/vendor/github.com/docker/docker/vendor.conf +++ b/vendor/github.com/docker/docker/vendor.conf @@ -27,10 +27,13 @@ github.com/imdario/mergo 0.2.1 golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 # buildkit -github.com/moby/buildkit 43e758232a0ac7d50c6a11413186e16684fc1e4f -github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f +github.com/moby/buildkit 4b8dc5b08bdd1b9a29d0f767d94a1360e668da14 +github.com/tonistiigi/fsutil 8839685ae8c3c8bd67d0ce28e9b3157b23c1c7a5 github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 +github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 +github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc +github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b #get libnetwork packages @@ -72,8 +75,8 @@ github.com/pborman/uuid v1.0 google.golang.org/grpc v1.12.0 -# When updating, also update RUNC_COMMIT in hack/dockerfile/install/runc accordingly -github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340 +# This does not need to match RUNC_COMMIT as it is used for helper packages but should be newer or equal +github.com/opencontainers/runc 0e561642f81e84ebd0b3afd6ec510c75a2ccb71b github.com/opencontainers/runtime-spec v1.0.1 github.com/opencontainers/image-spec v1.0.1 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 @@ -112,11 +115,11 @@ github.com/googleapis/gax-go v2.0.0 google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9 # containerd -github.com/containerd/containerd c7083eed5d8633d54c25fe81aa609010a4f2e495 +github.com/containerd/containerd 63522d9eaa5a0443d225642c4b6f4f5fdedf932b github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 -github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925 +github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08 github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 @@ -131,7 +134,7 @@ github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b65068 golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491 golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad -github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/google/shlex/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README new file mode 100644 index 0000000000..c86bcc066f --- /dev/null +++ b/vendor/github.com/google/shlex/README @@ -0,0 +1,2 @@ +go-shlex is a simple lexer for go that supports shell-style quoting, +commenting, and escaping. diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go new file mode 100644 index 0000000000..3cb37b7e48 --- /dev/null +++ b/vendor/github.com/google/shlex/shlex.go @@ -0,0 +1,417 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package shlex implements a simple lexer which splits input in to tokens using +shell-style rules for quoting and commenting. + +The basic use case uses the default ASCII lexer to split a string into sub-strings: + + shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"} + +To process a stream of strings: + + l := NewLexer(os.Stdin) + for ; token, err := l.Next(); err != nil { + // process token + } + +To access the raw token stream (which includes tokens for comments): + + t := NewTokenizer(os.Stdin) + for ; token, err := t.Next(); err != nil { + // process token + } + +*/ +package shlex + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +// TokenType is a top-level token classification: A word, space, comment, unknown. +type TokenType int + +// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape. +type runeTokenClass int + +// the internal state used by the lexer state machine +type lexerState int + +// Token is a (type, value) pair representing a lexographical token. +type Token struct { + tokenType TokenType + value string +} + +// Equal reports whether tokens a, and b, are equal. +// Two tokens are equal if both their types and values are equal. A nil token can +// never be equal to another token. +func (a *Token) Equal(b *Token) bool { + if a == nil || b == nil { + return false + } + if a.tokenType != b.tokenType { + return false + } + return a.value == b.value +} + +// Named classes of UTF-8 runes +const ( + spaceRunes = " \t\r\n" + escapingQuoteRunes = `"` + nonEscapingQuoteRunes = "'" + escapeRunes = `\` + commentRunes = "#" +) + +// Classes of rune token +const ( + unknownRuneClass runeTokenClass = iota + spaceRuneClass + escapingQuoteRuneClass + nonEscapingQuoteRuneClass + escapeRuneClass + commentRuneClass + eofRuneClass +) + +// Classes of lexographic token +const ( + UnknownToken TokenType = iota + WordToken + SpaceToken + CommentToken +) + +// Lexer state machine states +const ( + startState lexerState = iota // no runes have been seen + inWordState // processing regular runes in a word + escapingState // we have just consumed an escape rune; the next rune is literal + escapingQuotedState // we have just consumed an escape rune within a quoted string + quotingEscapingState // we are within a quoted string that supports escaping ("...") + quotingState // we are within a string that does not support escaping ('...') + commentState // we are within a comment (everything following an unquoted or unescaped # +) + +// tokenClassifier is used for classifying rune characters. +type tokenClassifier map[rune]runeTokenClass + +func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) { + for _, runeChar := range runes { + typeMap[runeChar] = tokenType + } +} + +// newDefaultClassifier creates a new classifier for ASCII characters. +func newDefaultClassifier() tokenClassifier { + t := tokenClassifier{} + t.addRuneClass(spaceRunes, spaceRuneClass) + t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass) + t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass) + t.addRuneClass(escapeRunes, escapeRuneClass) + t.addRuneClass(commentRunes, commentRuneClass) + return t +} + +// ClassifyRune classifiees a rune +func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass { + return t[runeVal] +} + +// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped. +type Lexer Tokenizer + +// NewLexer creates a new lexer from an input stream. +func NewLexer(r io.Reader) *Lexer { + + return (*Lexer)(NewTokenizer(r)) +} + +// Next returns the next word, or an error. If there are no more words, +// the error will be io.EOF. +func (l *Lexer) Next() (string, error) { + for { + token, err := (*Tokenizer)(l).Next() + if err != nil { + return "", err + } + switch token.tokenType { + case WordToken: + return token.value, nil + case CommentToken: + // skip comments + default: + return "", fmt.Errorf("Unknown token type: %v", token.tokenType) + } + } +} + +// Tokenizer turns an input stream into a sequence of typed tokens +type Tokenizer struct { + input bufio.Reader + classifier tokenClassifier +} + +// NewTokenizer creates a new tokenizer from an input stream. +func NewTokenizer(r io.Reader) *Tokenizer { + input := bufio.NewReader(r) + classifier := newDefaultClassifier() + return &Tokenizer{ + input: *input, + classifier: classifier} +} + +// scanStream scans the stream for the next token using the internal state machine. +// It will panic if it encounters a rune which it does not know how to handle. +func (t *Tokenizer) scanStream() (*Token, error) { + state := startState + var tokenType TokenType + var value []rune + var nextRune rune + var nextRuneType runeTokenClass + var err error + + for { + nextRune, _, err = t.input.ReadRune() + nextRuneType = t.classifier.ClassifyRune(nextRune) + + if err == io.EOF { + nextRuneType = eofRuneClass + err = nil + } else if err != nil { + return nil, err + } + + switch state { + case startState: // no runes read yet + { + switch nextRuneType { + case eofRuneClass: + { + return nil, io.EOF + } + case spaceRuneClass: + { + } + case escapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingState + } + case escapeRuneClass: + { + tokenType = WordToken + state = escapingState + } + case commentRuneClass: + { + tokenType = CommentToken + state = commentState + } + default: + { + tokenType = WordToken + value = append(value, nextRune) + state = inWordState + } + } + } + case inWordState: // in a regular word + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + t.input.UnreadRune() + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + state = quotingState + } + case escapeRuneClass: + { + state = escapingState + } + default: + { + value = append(value, nextRune) + } + } + } + case escapingState: // the rune after an escape character + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = inWordState + value = append(value, nextRune) + } + } + } + case escapingQuotedState: // the next rune after an escape character, in double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = quotingEscapingState + value = append(value, nextRune) + } + } + } + case quotingEscapingState: // in escaping double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = inWordState + } + case escapeRuneClass: + { + state = escapingQuotedState + } + default: + { + value = append(value, nextRune) + } + } + } + case quotingState: // in non-escaping single quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case nonEscapingQuoteRuneClass: + { + state = inWordState + } + default: + { + value = append(value, nextRune) + } + } + } + case commentState: // in a comment + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + if nextRune == '\n' { + state = startState + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } else { + value = append(value, nextRune) + } + } + default: + { + value = append(value, nextRune) + } + } + } + default: + { + return nil, fmt.Errorf("Unexpected state: %v", state) + } + } + } +} + +// Next returns the next token in the stream. +func (t *Tokenizer) Next() (*Token, error) { + return t.scanStream() +} + +// Split partitions a string into a slice of strings. +func Split(s string) ([]string, error) { + l := NewLexer(strings.NewReader(s)) + subStrings := make([]string, 0) + for { + word, err := l.Next() + if err != nil { + if err == io.EOF { + return subStrings, nil + } + return subStrings, err + } + subStrings = append(subStrings, word) + } +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go new file mode 100644 index 0000000000..e92bd7f04d --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go @@ -0,0 +1,4871 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: control.proto + +/* + Package moby_buildkit_v1 is a generated protocol buffer package. + + It is generated from these files: + control.proto + + It has these top-level messages: + PruneRequest + DiskUsageRequest + DiskUsageResponse + UsageRecord + SolveRequest + CacheOptions + SolveResponse + StatusRequest + StatusResponse + Vertex + VertexStatus + VertexLog + BytesMessage + ListWorkersRequest + ListWorkersResponse + WorkerRecord +*/ +package moby_buildkit_v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/golang/protobuf/ptypes/timestamp" +import pb "github.com/moby/buildkit/solver/pb" + +import time "time" +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import types "github.com/gogo/protobuf/types" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type PruneRequest struct { +} + +func (m *PruneRequest) Reset() { *m = PruneRequest{} } +func (m *PruneRequest) String() string { return proto.CompactTextString(m) } +func (*PruneRequest) ProtoMessage() {} +func (*PruneRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{0} } + +type DiskUsageRequest struct { + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` +} + +func (m *DiskUsageRequest) Reset() { *m = DiskUsageRequest{} } +func (m *DiskUsageRequest) String() string { return proto.CompactTextString(m) } +func (*DiskUsageRequest) ProtoMessage() {} +func (*DiskUsageRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{1} } + +func (m *DiskUsageRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +type DiskUsageResponse struct { + Record []*UsageRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"` +} + +func (m *DiskUsageResponse) Reset() { *m = DiskUsageResponse{} } +func (m *DiskUsageResponse) String() string { return proto.CompactTextString(m) } +func (*DiskUsageResponse) ProtoMessage() {} +func (*DiskUsageResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{2} } + +func (m *DiskUsageResponse) GetRecord() []*UsageRecord { + if m != nil { + return m.Record + } + return nil +} + +type UsageRecord struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Mutable bool `protobuf:"varint,2,opt,name=Mutable,proto3" json:"Mutable,omitempty"` + InUse bool `protobuf:"varint,3,opt,name=InUse,proto3" json:"InUse,omitempty"` + Size_ int64 `protobuf:"varint,4,opt,name=Size,proto3" json:"Size,omitempty"` + Parent string `protobuf:"bytes,5,opt,name=Parent,proto3" json:"Parent,omitempty"` + CreatedAt time.Time `protobuf:"bytes,6,opt,name=CreatedAt,stdtime" json:"CreatedAt"` + LastUsedAt *time.Time `protobuf:"bytes,7,opt,name=LastUsedAt,stdtime" json:"LastUsedAt,omitempty"` + UsageCount int64 `protobuf:"varint,8,opt,name=UsageCount,proto3" json:"UsageCount,omitempty"` + Description string `protobuf:"bytes,9,opt,name=Description,proto3" json:"Description,omitempty"` +} + +func (m *UsageRecord) Reset() { *m = UsageRecord{} } +func (m *UsageRecord) String() string { return proto.CompactTextString(m) } +func (*UsageRecord) ProtoMessage() {} +func (*UsageRecord) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{3} } + +func (m *UsageRecord) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *UsageRecord) GetMutable() bool { + if m != nil { + return m.Mutable + } + return false +} + +func (m *UsageRecord) GetInUse() bool { + if m != nil { + return m.InUse + } + return false +} + +func (m *UsageRecord) GetSize_() int64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *UsageRecord) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *UsageRecord) GetCreatedAt() time.Time { + if m != nil { + return m.CreatedAt + } + return time.Time{} +} + +func (m *UsageRecord) GetLastUsedAt() *time.Time { + if m != nil { + return m.LastUsedAt + } + return nil +} + +func (m *UsageRecord) GetUsageCount() int64 { + if m != nil { + return m.UsageCount + } + return 0 +} + +func (m *UsageRecord) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type SolveRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition" json:"Definition,omitempty"` + Exporter string `protobuf:"bytes,3,opt,name=Exporter,proto3" json:"Exporter,omitempty"` + ExporterAttrs map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrs" json:"ExporterAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"` + Frontend string `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"` + FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache" json:"Cache"` +} + +func (m *SolveRequest) Reset() { *m = SolveRequest{} } +func (m *SolveRequest) String() string { return proto.CompactTextString(m) } +func (*SolveRequest) ProtoMessage() {} +func (*SolveRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} } + +func (m *SolveRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *SolveRequest) GetDefinition() *pb.Definition { + if m != nil { + return m.Definition + } + return nil +} + +func (m *SolveRequest) GetExporter() string { + if m != nil { + return m.Exporter + } + return "" +} + +func (m *SolveRequest) GetExporterAttrs() map[string]string { + if m != nil { + return m.ExporterAttrs + } + return nil +} + +func (m *SolveRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *SolveRequest) GetFrontend() string { + if m != nil { + return m.Frontend + } + return "" +} + +func (m *SolveRequest) GetFrontendAttrs() map[string]string { + if m != nil { + return m.FrontendAttrs + } + return nil +} + +func (m *SolveRequest) GetCache() CacheOptions { + if m != nil { + return m.Cache + } + return CacheOptions{} +} + +type CacheOptions struct { + ExportRef string `protobuf:"bytes,1,opt,name=ExportRef,proto3" json:"ExportRef,omitempty"` + ImportRefs []string `protobuf:"bytes,2,rep,name=ImportRefs" json:"ImportRefs,omitempty"` + ExportAttrs map[string]string `protobuf:"bytes,3,rep,name=ExportAttrs" json:"ExportAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *CacheOptions) Reset() { *m = CacheOptions{} } +func (m *CacheOptions) String() string { return proto.CompactTextString(m) } +func (*CacheOptions) ProtoMessage() {} +func (*CacheOptions) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} } + +func (m *CacheOptions) GetExportRef() string { + if m != nil { + return m.ExportRef + } + return "" +} + +func (m *CacheOptions) GetImportRefs() []string { + if m != nil { + return m.ImportRefs + } + return nil +} + +func (m *CacheOptions) GetExportAttrs() map[string]string { + if m != nil { + return m.ExportAttrs + } + return nil +} + +type SolveResponse struct { + ExporterResponse map[string]string `protobuf:"bytes,1,rep,name=ExporterResponse" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *SolveResponse) Reset() { *m = SolveResponse{} } +func (m *SolveResponse) String() string { return proto.CompactTextString(m) } +func (*SolveResponse) ProtoMessage() {} +func (*SolveResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} } + +func (m *SolveResponse) GetExporterResponse() map[string]string { + if m != nil { + return m.ExporterResponse + } + return nil +} + +type StatusRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} } + +func (m *StatusRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +type StatusResponse struct { + Vertexes []*Vertex `protobuf:"bytes,1,rep,name=vertexes" json:"vertexes,omitempty"` + Statuses []*VertexStatus `protobuf:"bytes,2,rep,name=statuses" json:"statuses,omitempty"` + Logs []*VertexLog `protobuf:"bytes,3,rep,name=logs" json:"logs,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} } + +func (m *StatusResponse) GetVertexes() []*Vertex { + if m != nil { + return m.Vertexes + } + return nil +} + +func (m *StatusResponse) GetStatuses() []*VertexStatus { + if m != nil { + return m.Statuses + } + return nil +} + +func (m *StatusResponse) GetLogs() []*VertexLog { + if m != nil { + return m.Logs + } + return nil +} + +type Vertex struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Inputs []github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,rep,name=inputs,customtype=github.com/opencontainers/go-digest.Digest" json:"inputs"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Cached bool `protobuf:"varint,4,opt,name=cached,proto3" json:"cached,omitempty"` + Started *time.Time `protobuf:"bytes,5,opt,name=started,stdtime" json:"started,omitempty"` + Completed *time.Time `protobuf:"bytes,6,opt,name=completed,stdtime" json:"completed,omitempty"` + Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *Vertex) Reset() { *m = Vertex{} } +func (m *Vertex) String() string { return proto.CompactTextString(m) } +func (*Vertex) ProtoMessage() {} +func (*Vertex) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} } + +func (m *Vertex) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Vertex) GetCached() bool { + if m != nil { + return m.Cached + } + return false +} + +func (m *Vertex) GetStarted() *time.Time { + if m != nil { + return m.Started + } + return nil +} + +func (m *Vertex) GetCompleted() *time.Time { + if m != nil { + return m.Completed + } + return nil +} + +func (m *Vertex) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type VertexStatus struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Current int64 `protobuf:"varint,4,opt,name=current,proto3" json:"current,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + // TODO: add started, completed + Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,stdtime" json:"timestamp"` + Started *time.Time `protobuf:"bytes,7,opt,name=started,stdtime" json:"started,omitempty"` + Completed *time.Time `protobuf:"bytes,8,opt,name=completed,stdtime" json:"completed,omitempty"` +} + +func (m *VertexStatus) Reset() { *m = VertexStatus{} } +func (m *VertexStatus) String() string { return proto.CompactTextString(m) } +func (*VertexStatus) ProtoMessage() {} +func (*VertexStatus) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} } + +func (m *VertexStatus) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *VertexStatus) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VertexStatus) GetCurrent() int64 { + if m != nil { + return m.Current + } + return 0 +} + +func (m *VertexStatus) GetTotal() int64 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *VertexStatus) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *VertexStatus) GetStarted() *time.Time { + if m != nil { + return m.Started + } + return nil +} + +func (m *VertexStatus) GetCompleted() *time.Time { + if m != nil { + return m.Completed + } + return nil +} + +type VertexLog struct { + Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` + Timestamp time.Time `protobuf:"bytes,2,opt,name=timestamp,stdtime" json:"timestamp"` + Stream int64 `protobuf:"varint,3,opt,name=stream,proto3" json:"stream,omitempty"` + Msg []byte `protobuf:"bytes,4,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (m *VertexLog) Reset() { *m = VertexLog{} } +func (m *VertexLog) String() string { return proto.CompactTextString(m) } +func (*VertexLog) ProtoMessage() {} +func (*VertexLog) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{11} } + +func (m *VertexLog) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *VertexLog) GetStream() int64 { + if m != nil { + return m.Stream + } + return 0 +} + +func (m *VertexLog) GetMsg() []byte { + if m != nil { + return m.Msg + } + return nil +} + +type BytesMessage struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *BytesMessage) Reset() { *m = BytesMessage{} } +func (m *BytesMessage) String() string { return proto.CompactTextString(m) } +func (*BytesMessage) ProtoMessage() {} +func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{12} } + +func (m *BytesMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type ListWorkersRequest struct { + Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"` +} + +func (m *ListWorkersRequest) Reset() { *m = ListWorkersRequest{} } +func (m *ListWorkersRequest) String() string { return proto.CompactTextString(m) } +func (*ListWorkersRequest) ProtoMessage() {} +func (*ListWorkersRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{13} } + +func (m *ListWorkersRequest) GetFilter() []string { + if m != nil { + return m.Filter + } + return nil +} + +type ListWorkersResponse struct { + Record []*WorkerRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"` +} + +func (m *ListWorkersResponse) Reset() { *m = ListWorkersResponse{} } +func (m *ListWorkersResponse) String() string { return proto.CompactTextString(m) } +func (*ListWorkersResponse) ProtoMessage() {} +func (*ListWorkersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} } + +func (m *ListWorkersResponse) GetRecord() []*WorkerRecord { + if m != nil { + return m.Record + } + return nil +} + +type WorkerRecord struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *WorkerRecord) Reset() { *m = WorkerRecord{} } +func (m *WorkerRecord) String() string { return proto.CompactTextString(m) } +func (*WorkerRecord) ProtoMessage() {} +func (*WorkerRecord) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{15} } + +func (m *WorkerRecord) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *WorkerRecord) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest") + proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest") + proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse") + proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord") + proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest") + proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions") + proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse") + proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse") + proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex") + proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus") + proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog") + proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage") + proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest") + proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse") + proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.WorkerRecord") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Control service + +type ControlClient interface { + DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) + Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) + Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) + Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) + ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) +} + +type controlClient struct { + cc *grpc.ClientConn +} + +func NewControlClient(cc *grpc.ClientConn) ControlClient { + return &controlClient{cc} +} + +func (c *controlClient) DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) { + out := new(DiskUsageResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.Control/DiskUsage", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[0], c.cc, "/moby.buildkit.v1.Control/Prune", opts...) + if err != nil { + return nil, err + } + x := &controlPruneClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Control_PruneClient interface { + Recv() (*UsageRecord, error) + grpc.ClientStream +} + +type controlPruneClient struct { + grpc.ClientStream +} + +func (x *controlPruneClient) Recv() (*UsageRecord, error) { + m := new(UsageRecord) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { + out := new(SolveResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.Control/Solve", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[1], c.cc, "/moby.buildkit.v1.Control/Status", opts...) + if err != nil { + return nil, err + } + x := &controlStatusClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Control_StatusClient interface { + Recv() (*StatusResponse, error) + grpc.ClientStream +} + +type controlStatusClient struct { + grpc.ClientStream +} + +func (x *controlStatusClient) Recv() (*StatusResponse, error) { + m := new(StatusResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Control_serviceDesc.Streams[2], c.cc, "/moby.buildkit.v1.Control/Session", opts...) + if err != nil { + return nil, err + } + x := &controlSessionClient{stream} + return x, nil +} + +type Control_SessionClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type controlSessionClient struct { + grpc.ClientStream +} + +func (x *controlSessionClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *controlSessionClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) { + out := new(ListWorkersResponse) + err := grpc.Invoke(ctx, "/moby.buildkit.v1.Control/ListWorkers", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Control service + +type ControlServer interface { + DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error) + Prune(*PruneRequest, Control_PruneServer) error + Solve(context.Context, *SolveRequest) (*SolveResponse, error) + Status(*StatusRequest, Control_StatusServer) error + Session(Control_SessionServer) error + ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) +} + +func RegisterControlServer(s *grpc.Server, srv ControlServer) { + s.RegisterService(&_Control_serviceDesc, srv) +} + +func _Control_DiskUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiskUsageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).DiskUsage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/DiskUsage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).DiskUsage(ctx, req.(*DiskUsageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_Prune_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(PruneRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControlServer).Prune(m, &controlPruneServer{stream}) +} + +type Control_PruneServer interface { + Send(*UsageRecord) error + grpc.ServerStream +} + +type controlPruneServer struct { + grpc.ServerStream +} + +func (x *controlPruneServer) Send(m *UsageRecord) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SolveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).Solve(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/Solve", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).Solve(ctx, req.(*SolveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_Status_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StatusRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControlServer).Status(m, &controlStatusServer{stream}) +} + +type Control_StatusServer interface { + Send(*StatusResponse) error + grpc.ServerStream +} + +type controlStatusServer struct { + grpc.ServerStream +} + +func (x *controlStatusServer) Send(m *StatusResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ControlServer).Session(&controlSessionServer{stream}) +} + +type Control_SessionServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type controlSessionServer struct { + grpc.ServerStream +} + +func (x *controlSessionServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *controlSessionServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListWorkersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListWorkers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/ListWorkers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListWorkers(ctx, req.(*ListWorkersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Control_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.buildkit.v1.Control", + HandlerType: (*ControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DiskUsage", + Handler: _Control_DiskUsage_Handler, + }, + { + MethodName: "Solve", + Handler: _Control_Solve_Handler, + }, + { + MethodName: "ListWorkers", + Handler: _Control_ListWorkers_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Prune", + Handler: _Control_Prune_Handler, + ServerStreams: true, + }, + { + StreamName: "Status", + Handler: _Control_Status_Handler, + ServerStreams: true, + }, + { + StreamName: "Session", + Handler: _Control_Session_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "control.proto", +} + +func (m *PruneRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *DiskUsageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filter) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Filter))) + i += copy(dAtA[i:], m.Filter) + } + return i, nil +} + +func (m *DiskUsageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiskUsageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Record) > 0 { + for _, msg := range m.Record { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UsageRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Mutable { + dAtA[i] = 0x10 + i++ + if m.Mutable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.InUse { + dAtA[i] = 0x18 + i++ + if m.InUse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Size_ != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Size_)) + } + if len(m.Parent) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Parent))) + i += copy(dAtA[i:], m.Parent) + } + dAtA[i] = 0x32 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(m.CreatedAt))) + n1, err := types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.LastUsedAt != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.LastUsedAt))) + n2, err := types.StdTimeMarshalTo(*m.LastUsedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.UsageCount != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.UsageCount)) + } + if len(m.Description) > 0 { + dAtA[i] = 0x4a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + return i, nil +} + +func (m *SolveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ref) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + if m.Definition != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Definition.Size())) + n3, err := m.Definition.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if len(m.Exporter) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter))) + i += copy(dAtA[i:], m.Exporter) + } + if len(m.ExporterAttrs) > 0 { + for k, _ := range m.ExporterAttrs { + dAtA[i] = 0x22 + i++ + v := m.ExporterAttrs[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Session) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Session))) + i += copy(dAtA[i:], m.Session) + } + if len(m.Frontend) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend))) + i += copy(dAtA[i:], m.Frontend) + } + if len(m.FrontendAttrs) > 0 { + for k, _ := range m.FrontendAttrs { + dAtA[i] = 0x3a + i++ + v := m.FrontendAttrs[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cache.Size())) + n4, err := m.Cache.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *CacheOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ExportRef) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRef))) + i += copy(dAtA[i:], m.ExportRef) + } + if len(m.ImportRefs) > 0 { + for _, s := range m.ImportRefs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ExportAttrs) > 0 { + for k, _ := range m.ExportAttrs { + dAtA[i] = 0x1a + i++ + v := m.ExportAttrs[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *SolveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ExporterResponse) > 0 { + for k, _ := range m.ExporterResponse { + dAtA[i] = 0xa + i++ + v := m.ExporterResponse[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ref) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i += copy(dAtA[i:], m.Ref) + } + return i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Vertexes) > 0 { + for _, msg := range m.Vertexes { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Statuses) > 0 { + for _, msg := range m.Statuses { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Logs) > 0 { + for _, msg := range m.Logs { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Vertex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vertex) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if len(m.Inputs) > 0 { + for _, s := range m.Inputs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Name) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Cached { + dAtA[i] = 0x20 + i++ + if m.Cached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Started != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Started))) + n5, err := types.StdTimeMarshalTo(*m.Started, dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Completed != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Completed))) + n6, err := types.StdTimeMarshalTo(*m.Completed, dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if len(m.Error) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func (m *VertexStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Vertex) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i += copy(dAtA[i:], m.Vertex) + } + if len(m.Name) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Current != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Current)) + } + if m.Total != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Total)) + } + dAtA[i] = 0x32 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(m.Timestamp))) + n7, err := types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if m.Started != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Started))) + n8, err := types.StdTimeMarshalTo(*m.Started, dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Completed != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(*m.Completed))) + n9, err := types.StdTimeMarshalTo(*m.Completed, dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *VertexLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Vertex) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i += copy(dAtA[i:], m.Vertex) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(types.SizeOfStdTime(m.Timestamp))) + n10, err := types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.Stream != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Stream)) + } + if len(m.Msg) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Msg))) + i += copy(dAtA[i:], m.Msg) + } + return i, nil +} + +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListWorkersRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Record) > 0 { + for _, msg := range m.Record { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WorkerRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func encodeVarintControl(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PruneRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *DiskUsageRequest) Size() (n int) { + var l int + _ = l + l = len(m.Filter) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *DiskUsageResponse) Size() (n int) { + var l int + _ = l + if len(m.Record) > 0 { + for _, e := range m.Record { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *UsageRecord) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Mutable { + n += 2 + } + if m.InUse { + n += 2 + } + if m.Size_ != 0 { + n += 1 + sovControl(uint64(m.Size_)) + } + l = len(m.Parent) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = types.SizeOfStdTime(m.CreatedAt) + n += 1 + l + sovControl(uint64(l)) + if m.LastUsedAt != nil { + l = types.SizeOfStdTime(*m.LastUsedAt) + n += 1 + l + sovControl(uint64(l)) + } + if m.UsageCount != 0 { + n += 1 + sovControl(uint64(m.UsageCount)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *SolveRequest) Size() (n int) { + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Definition != nil { + l = m.Definition.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Exporter) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ExporterAttrs) > 0 { + for k, v := range m.ExporterAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + l = len(m.Session) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Frontend) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.FrontendAttrs) > 0 { + for k, v := range m.FrontendAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + l = m.Cache.Size() + n += 1 + l + sovControl(uint64(l)) + return n +} + +func (m *CacheOptions) Size() (n int) { + var l int + _ = l + l = len(m.ExportRef) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ImportRefs) > 0 { + for _, s := range m.ImportRefs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.ExportAttrs) > 0 { + for k, v := range m.ExportAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SolveResponse) Size() (n int) { + var l int + _ = l + if len(m.ExporterResponse) > 0 { + for k, v := range m.ExporterResponse { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *StatusResponse) Size() (n int) { + var l int + _ = l + if len(m.Vertexes) > 0 { + for _, e := range m.Vertexes { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Logs) > 0 { + for _, e := range m.Logs { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *Vertex) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Inputs) > 0 { + for _, s := range m.Inputs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Cached { + n += 2 + } + if m.Started != nil { + l = types.SizeOfStdTime(*m.Started) + n += 1 + l + sovControl(uint64(l)) + } + if m.Completed != nil { + l = types.SizeOfStdTime(*m.Completed) + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *VertexStatus) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Current != 0 { + n += 1 + sovControl(uint64(m.Current)) + } + if m.Total != 0 { + n += 1 + sovControl(uint64(m.Total)) + } + l = types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovControl(uint64(l)) + if m.Started != nil { + l = types.SizeOfStdTime(*m.Started) + n += 1 + l + sovControl(uint64(l)) + } + if m.Completed != nil { + l = types.SizeOfStdTime(*m.Completed) + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *VertexLog) Size() (n int) { + var l int + _ = l + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovControl(uint64(l)) + if m.Stream != 0 { + n += 1 + sovControl(uint64(m.Stream)) + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *BytesMessage) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListWorkersRequest) Size() (n int) { + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListWorkersResponse) Size() (n int) { + var l int + _ = l + if len(m.Record) > 0 { + for _, e := range m.Record { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *WorkerRecord) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + return n +} + +func sovControl(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozControl(x uint64) (n int) { + return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PruneRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PruneRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskUsageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskUsageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskUsageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskUsageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Record = append(m.Record, &UsageRecord{}) + if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UsageRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UsageRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UsageRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mutable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mutable = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InUse", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InUse = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUsedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastUsedAt == nil { + m.LastUsedAt = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.LastUsedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UsageCount", wireType) + } + m.UsageCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UsageCount |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definition == nil { + m.Definition = &pb.Definition{} + } + if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exporter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExporterAttrs == nil { + m.ExporterAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExporterAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Session = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frontend = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendAttrs == nil { + m.FrontendAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExportRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportRefs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImportRefs = append(m.ImportRefs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExportAttrs == nil { + m.ExportAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExportAttrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExporterResponse == nil { + m.ExporterResponse = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExporterResponse[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertexes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertexes = append(m.Vertexes, &Vertex{}) + if err := m.Vertexes[len(m.Vertexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statuses = append(m.Statuses, &VertexStatus{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logs = append(m.Logs, &VertexLog{}) + if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Vertex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vertex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vertex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inputs = append(m.Inputs, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Cached = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Started == nil { + m.Started = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Completed == nil { + m.Completed = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VertexStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VertexStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VertexStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + m.Current = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Current |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Started == nil { + m.Started = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Completed == nil { + m.Completed = new(time.Time) + } + if err := types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VertexLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VertexLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VertexLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + m.Stream = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stream |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...) + if m.Msg == nil { + m.Msg = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListWorkersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListWorkersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListWorkersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListWorkersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Record = append(m.Record, &WorkerRecord{}) + if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkerRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipControl(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthControl + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipControl(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowControl = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("control.proto", fileDescriptorControl) } + +var fileDescriptorControl = []byte{ + // 1192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x6e, 0x23, 0x45, + 0x10, 0x66, 0x6c, 0xc7, 0x3f, 0x65, 0x27, 0x0a, 0x0d, 0xac, 0x46, 0x03, 0x24, 0x66, 0x00, 0xc9, + 0x8a, 0x76, 0xc7, 0xd9, 0xc0, 0x22, 0xc8, 0x61, 0xb5, 0xeb, 0x78, 0x11, 0x89, 0x12, 0xb1, 0x74, + 0x36, 0xac, 0xc4, 0x6d, 0x6c, 0x77, 0xbc, 0xa3, 0xd8, 0xd3, 0xa6, 0xbb, 0x27, 0xda, 0xf0, 0x14, + 0x1c, 0xb8, 0xf2, 0x14, 0x1c, 0x38, 0x73, 0x40, 0xda, 0x23, 0x67, 0x0e, 0x59, 0x94, 0x3b, 0x3c, + 0x03, 0xea, 0x9f, 0xb1, 0xdb, 0x1e, 0xe7, 0xc7, 0xd9, 0x53, 0xba, 0x3a, 0x5f, 0x7d, 0x53, 0x5d, + 0x5f, 0xb9, 0xaa, 0x60, 0xb9, 0x4b, 0x63, 0xc1, 0xe8, 0x20, 0x18, 0x31, 0x2a, 0x28, 0x5a, 0x1d, + 0xd2, 0xce, 0x59, 0xd0, 0x49, 0xa2, 0x41, 0xef, 0x24, 0x12, 0xc1, 0xe9, 0x7d, 0xef, 0x5e, 0x3f, + 0x12, 0x2f, 0x92, 0x4e, 0xd0, 0xa5, 0xc3, 0x66, 0x9f, 0xf6, 0x69, 0x53, 0x01, 0x3b, 0xc9, 0xb1, + 0xb2, 0x94, 0xa1, 0x4e, 0x9a, 0xc0, 0x5b, 0xef, 0x53, 0xda, 0x1f, 0x90, 0x09, 0x4a, 0x44, 0x43, + 0xc2, 0x45, 0x38, 0x1c, 0x19, 0xc0, 0x5d, 0x8b, 0x4f, 0x7e, 0xac, 0x99, 0x7e, 0xac, 0xc9, 0xe9, + 0xe0, 0x94, 0xb0, 0xe6, 0xa8, 0xd3, 0xa4, 0x23, 0xae, 0xd1, 0xfe, 0x0a, 0xd4, 0x9e, 0xb2, 0x24, + 0x26, 0x98, 0xfc, 0x98, 0x10, 0x2e, 0xfc, 0x0d, 0x58, 0x6d, 0x47, 0xfc, 0xe4, 0x88, 0x87, 0xfd, + 0xf4, 0x0e, 0xdd, 0x81, 0xe2, 0x71, 0x34, 0x10, 0x84, 0xb9, 0x4e, 0xdd, 0x69, 0x54, 0xb0, 0xb1, + 0xfc, 0x3d, 0x78, 0xdb, 0xc2, 0xf2, 0x11, 0x8d, 0x39, 0x41, 0x0f, 0xa0, 0xc8, 0x48, 0x97, 0xb2, + 0x9e, 0xeb, 0xd4, 0xf3, 0x8d, 0xea, 0xd6, 0x87, 0xc1, 0xec, 0x8b, 0x03, 0xe3, 0x20, 0x41, 0xd8, + 0x80, 0xfd, 0x3f, 0x72, 0x50, 0xb5, 0xee, 0xd1, 0x0a, 0xe4, 0x76, 0xdb, 0xe6, 0x7b, 0xb9, 0xdd, + 0x36, 0x72, 0xa1, 0x74, 0x90, 0x88, 0xb0, 0x33, 0x20, 0x6e, 0xae, 0xee, 0x34, 0xca, 0x38, 0x35, + 0xd1, 0xbb, 0xb0, 0xb4, 0x1b, 0x1f, 0x71, 0xe2, 0xe6, 0xd5, 0xbd, 0x36, 0x10, 0x82, 0xc2, 0x61, + 0xf4, 0x13, 0x71, 0x0b, 0x75, 0xa7, 0x91, 0xc7, 0xea, 0x2c, 0xdf, 0xf1, 0x34, 0x64, 0x24, 0x16, + 0xee, 0x92, 0x7e, 0x87, 0xb6, 0x50, 0x0b, 0x2a, 0x3b, 0x8c, 0x84, 0x82, 0xf4, 0x1e, 0x0b, 0xb7, + 0x58, 0x77, 0x1a, 0xd5, 0x2d, 0x2f, 0xd0, 0x69, 0x0e, 0xd2, 0x34, 0x07, 0xcf, 0xd2, 0x34, 0xb7, + 0xca, 0xaf, 0xce, 0xd7, 0xdf, 0xfa, 0xf9, 0xf5, 0xba, 0x83, 0x27, 0x6e, 0xe8, 0x11, 0xc0, 0x7e, + 0xc8, 0xc5, 0x11, 0x57, 0x24, 0xa5, 0x6b, 0x49, 0x0a, 0x8a, 0xc0, 0xf2, 0x41, 0x6b, 0x00, 0x2a, + 0x01, 0x3b, 0x34, 0x89, 0x85, 0x5b, 0x56, 0x71, 0x5b, 0x37, 0xa8, 0x0e, 0xd5, 0x36, 0xe1, 0x5d, + 0x16, 0x8d, 0x44, 0x44, 0x63, 0xb7, 0xa2, 0x9e, 0x60, 0x5f, 0xf9, 0xbf, 0x14, 0xa0, 0x76, 0x28, + 0x35, 0x4e, 0x85, 0x5b, 0x85, 0x3c, 0x26, 0xc7, 0x26, 0x8b, 0xf2, 0x88, 0x02, 0x80, 0x36, 0x39, + 0x8e, 0xe2, 0x48, 0x71, 0xe4, 0x54, 0x98, 0x2b, 0xc1, 0xa8, 0x13, 0x4c, 0x6e, 0xb1, 0x85, 0x40, + 0x1e, 0x94, 0x9f, 0xbc, 0x1c, 0x51, 0x26, 0xc5, 0xcf, 0x2b, 0x9a, 0xb1, 0x8d, 0x9e, 0xc3, 0x72, + 0x7a, 0x7e, 0x2c, 0x04, 0xe3, 0x6e, 0x41, 0x09, 0x7e, 0x3f, 0x2b, 0xb8, 0x1d, 0x54, 0x30, 0xe5, + 0xf3, 0x24, 0x16, 0xec, 0x0c, 0x4f, 0xf3, 0x48, 0xad, 0x0f, 0x09, 0xe7, 0x32, 0x42, 0x2d, 0x54, + 0x6a, 0xca, 0x70, 0xbe, 0x66, 0x34, 0x16, 0x24, 0xee, 0x29, 0xa1, 0x2a, 0x78, 0x6c, 0xcb, 0x70, + 0xd2, 0xb3, 0x0e, 0xa7, 0x74, 0xa3, 0x70, 0xa6, 0x7c, 0x4c, 0x38, 0x53, 0x77, 0x68, 0x1b, 0x96, + 0x76, 0xc2, 0xee, 0x0b, 0xa2, 0x34, 0xa9, 0x6e, 0xad, 0x65, 0x09, 0xd5, 0xbf, 0xbf, 0x55, 0x22, + 0xf0, 0x56, 0x41, 0x96, 0x07, 0xd6, 0x2e, 0xde, 0x23, 0x40, 0xd9, 0xf7, 0x4a, 0x5d, 0x4e, 0xc8, + 0x59, 0xaa, 0xcb, 0x09, 0x39, 0x93, 0x45, 0x7c, 0x1a, 0x0e, 0x12, 0x5d, 0xdc, 0x15, 0xac, 0x8d, + 0xed, 0xdc, 0x97, 0x8e, 0x64, 0xc8, 0x86, 0xb8, 0x08, 0x83, 0xff, 0xda, 0x81, 0x9a, 0x1d, 0x21, + 0xfa, 0x00, 0x2a, 0x3a, 0xa8, 0x49, 0x71, 0x4c, 0x2e, 0x64, 0x1d, 0xee, 0x0e, 0x8d, 0xc1, 0xdd, + 0x5c, 0x3d, 0xdf, 0xa8, 0x60, 0xeb, 0x06, 0x7d, 0x07, 0x55, 0x0d, 0xd6, 0x59, 0xce, 0xab, 0x2c, + 0x37, 0xaf, 0x4e, 0x4a, 0x60, 0x79, 0xe8, 0x1c, 0xdb, 0x1c, 0xde, 0x43, 0x58, 0x9d, 0x05, 0x2c, + 0xf4, 0xc2, 0xdf, 0x1d, 0x58, 0x36, 0xa2, 0x9a, 0x2e, 0x14, 0xa6, 0x8c, 0x84, 0xa5, 0x77, 0xa6, + 0x1f, 0x3d, 0xb8, 0xb4, 0x1e, 0x34, 0x2c, 0x98, 0xf5, 0xd3, 0xf1, 0x66, 0xe8, 0xbc, 0x1d, 0x78, + 0x6f, 0x2e, 0x74, 0xa1, 0xc8, 0x3f, 0x82, 0xe5, 0x43, 0x11, 0x8a, 0x84, 0x5f, 0xfa, 0x93, 0xf5, + 0x7f, 0x73, 0x60, 0x25, 0xc5, 0x98, 0xd7, 0x7d, 0x0e, 0xe5, 0x53, 0xc2, 0x04, 0x79, 0x49, 0xb8, + 0x79, 0x95, 0x9b, 0x7d, 0xd5, 0xf7, 0x0a, 0x81, 0xc7, 0x48, 0xb4, 0x0d, 0x65, 0xae, 0x78, 0x88, + 0x96, 0x75, 0x6e, 0x29, 0x6b, 0x2f, 0xf3, 0xbd, 0x31, 0x1e, 0x35, 0xa1, 0x30, 0xa0, 0xfd, 0x54, + 0xed, 0xf7, 0x2f, 0xf3, 0xdb, 0xa7, 0x7d, 0xac, 0x80, 0xfe, 0x79, 0x0e, 0x8a, 0xfa, 0x0e, 0xed, + 0x41, 0xb1, 0x17, 0xf5, 0x09, 0x17, 0xfa, 0x55, 0xad, 0x2d, 0xf9, 0x03, 0xf9, 0xfb, 0x7c, 0x7d, + 0xc3, 0x1a, 0x54, 0x74, 0x44, 0x62, 0x39, 0x28, 0xc3, 0x28, 0x26, 0x8c, 0x37, 0xfb, 0xf4, 0x9e, + 0x76, 0x09, 0xda, 0xea, 0x0f, 0x36, 0x0c, 0x92, 0x2b, 0x8a, 0x47, 0x89, 0x30, 0x85, 0x79, 0x3b, + 0x2e, 0xcd, 0x20, 0x47, 0x44, 0x1c, 0x0e, 0x89, 0xe9, 0x6b, 0xea, 0x2c, 0x47, 0x44, 0x57, 0xd6, + 0x6d, 0x4f, 0x0d, 0x8e, 0x32, 0x36, 0x16, 0xda, 0x86, 0x12, 0x17, 0x21, 0x13, 0xa4, 0xa7, 0x5a, + 0xd2, 0x4d, 0x7a, 0x7b, 0xea, 0x80, 0x1e, 0x42, 0xa5, 0x4b, 0x87, 0xa3, 0x01, 0x91, 0xde, 0xc5, + 0x1b, 0x7a, 0x4f, 0x5c, 0x64, 0xf5, 0x10, 0xc6, 0x28, 0x53, 0x53, 0xa5, 0x82, 0xb5, 0xe1, 0xff, + 0x97, 0x83, 0x9a, 0x2d, 0x56, 0x66, 0x62, 0xee, 0x41, 0x51, 0x4b, 0xaf, 0xab, 0xee, 0x76, 0xa9, + 0xd2, 0x0c, 0x73, 0x53, 0xe5, 0x42, 0xa9, 0x9b, 0x30, 0x35, 0x4e, 0xf5, 0x90, 0x4d, 0x4d, 0x19, + 0xb0, 0xa0, 0x22, 0x1c, 0xa8, 0x54, 0xe5, 0xb1, 0x36, 0xe4, 0x94, 0x1d, 0xaf, 0x2a, 0x8b, 0x4d, + 0xd9, 0xb1, 0x9b, 0x2d, 0x43, 0xe9, 0x8d, 0x64, 0x28, 0x2f, 0x2c, 0x83, 0xff, 0xa7, 0x03, 0x95, + 0x71, 0x95, 0x5b, 0xd9, 0x75, 0xde, 0x38, 0xbb, 0x53, 0x99, 0xc9, 0xdd, 0x2e, 0x33, 0x77, 0xa0, + 0xc8, 0x05, 0x23, 0xe1, 0x50, 0x69, 0x94, 0xc7, 0xc6, 0x92, 0xfd, 0x64, 0xc8, 0xfb, 0x4a, 0xa1, + 0x1a, 0x96, 0x47, 0xdf, 0x87, 0x5a, 0xeb, 0x4c, 0x10, 0x7e, 0x40, 0xb8, 0x5c, 0x2e, 0xa4, 0xb6, + 0xbd, 0x50, 0x84, 0xea, 0x1d, 0x35, 0xac, 0xce, 0xfe, 0x5d, 0x40, 0xfb, 0x11, 0x17, 0xcf, 0x29, + 0x3b, 0x21, 0x8c, 0xcf, 0xdb, 0x03, 0xf3, 0xd6, 0x1e, 0x78, 0x00, 0xef, 0x4c, 0xa1, 0x4d, 0x97, + 0xfa, 0x62, 0x66, 0x13, 0x9c, 0xd3, 0x6d, 0xb4, 0xcb, 0xcc, 0x2a, 0xf8, 0xab, 0x03, 0x35, 0xfb, + 0x1f, 0x99, 0xca, 0x6e, 0x41, 0x71, 0x3f, 0xec, 0x90, 0x41, 0xda, 0xc6, 0x36, 0xae, 0x26, 0x0e, + 0x34, 0x58, 0xf7, 0x71, 0xe3, 0xe9, 0x7d, 0x05, 0x55, 0xeb, 0x7a, 0x91, 0x9e, 0xbd, 0xf5, 0x6f, + 0x1e, 0x4a, 0x3b, 0x7a, 0xa9, 0x47, 0xcf, 0xa0, 0x32, 0x5e, 0x81, 0x91, 0x9f, 0x8d, 0x63, 0x76, + 0x97, 0xf6, 0x3e, 0xbe, 0x12, 0x63, 0x32, 0xf7, 0x0d, 0x2c, 0xa9, 0xa5, 0x1c, 0xcd, 0x49, 0x99, + 0xbd, 0xad, 0x7b, 0x57, 0x2f, 0xd7, 0x9b, 0x8e, 0x64, 0x52, 0xd3, 0x6d, 0x1e, 0x93, 0xbd, 0x06, + 0x79, 0xeb, 0xd7, 0x8c, 0x45, 0x74, 0x00, 0x45, 0xd3, 0x68, 0xe6, 0x41, 0xed, 0x19, 0xe6, 0xd5, + 0x2f, 0x07, 0x68, 0xb2, 0x4d, 0x07, 0x1d, 0x8c, 0x77, 0xbc, 0x79, 0xa1, 0xd9, 0x05, 0xea, 0x5d, + 0xf3, 0xff, 0x86, 0xb3, 0xe9, 0xa0, 0x1f, 0xa0, 0x6a, 0x95, 0x20, 0xfa, 0x24, 0xeb, 0x92, 0xad, + 0x67, 0xef, 0xd3, 0x6b, 0x50, 0x3a, 0xd8, 0x56, 0xed, 0xd5, 0xc5, 0x9a, 0xf3, 0xd7, 0xc5, 0x9a, + 0xf3, 0xcf, 0xc5, 0x9a, 0xd3, 0x29, 0xaa, 0x5f, 0xe4, 0x67, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, + 0x4d, 0x94, 0x5a, 0xb6, 0xd8, 0x0d, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto new file mode 100644 index 0000000000..7944ce89a8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/services/control/control.proto @@ -0,0 +1,121 @@ +syntax = "proto3"; + +package moby.buildkit.v1; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/moby/buildkit/solver/pb/ops.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service Control { + rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse); + rpc Prune(PruneRequest) returns (stream UsageRecord); + rpc Solve(SolveRequest) returns (SolveResponse); + rpc Status(StatusRequest) returns (stream StatusResponse); + rpc Session(stream BytesMessage) returns (stream BytesMessage); + rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse); +} + +message PruneRequest { + // TODO: filter +} + +message DiskUsageRequest { + string filter = 1; // FIXME: this should be containerd-compatible repeated string? +} + +message DiskUsageResponse { + repeated UsageRecord record = 1; +} + +message UsageRecord { + string ID = 1; + bool Mutable = 2; + bool InUse = 3; + int64 Size = 4; + string Parent = 5; + google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true]; + int64 UsageCount = 8; + string Description = 9; +} + +message SolveRequest { + string Ref = 1; + pb.Definition Definition = 2; + string Exporter = 3; + map ExporterAttrs = 4; + string Session = 5; + string Frontend = 6; + map FrontendAttrs = 7; + CacheOptions Cache = 8 [(gogoproto.nullable) = false]; +} + +message CacheOptions { + string ExportRef = 1; + repeated string ImportRefs = 2; + map ExportAttrs = 3; +} + +message SolveResponse { + map ExporterResponse = 1; +} + +message StatusRequest { + string Ref = 1; +} + +message StatusResponse { + repeated Vertex vertexes = 1; + repeated VertexStatus statuses = 2; + repeated VertexLog logs = 3; +} + +message Vertex { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + repeated string inputs = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string name = 3; + bool cached = 4; + google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ]; + google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ]; + string error = 7; // typed errors? +} + +message VertexStatus { + string ID = 1; + string vertex = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string name = 3; + int64 current = 4; + int64 total = 5; + // TODO: add started, completed + google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ]; + google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ]; +} + +message VertexLog { + string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + int64 stream = 3; + bytes msg = 4; +} + +message BytesMessage { + bytes data = 1; +} + +message ListWorkersRequest { + repeated string filter = 1; // containerd style +} + +message ListWorkersResponse { + repeated WorkerRecord record = 1; +} + +message WorkerRecord { + string ID = 1; + map Labels = 2; +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/generate.go b/vendor/github.com/moby/buildkit/api/services/control/generate.go new file mode 100644 index 0000000000..1c161155f5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/services/control/generate.go @@ -0,0 +1,3 @@ +package moby_buildkit_v1 + +//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go new file mode 100644 index 0000000000..ca4b34fdd5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/client.go @@ -0,0 +1,132 @@ +package client + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "time" + + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/util/appdefaults" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +type Client struct { + conn *grpc.ClientConn +} + +type ClientOpt interface{} + +// New returns a new buildkit client. Address can be empty for the system-default address. +func New(address string, opts ...ClientOpt) (*Client, error) { + gopts := []grpc.DialOption{ + grpc.WithTimeout(30 * time.Second), + grpc.WithDialer(dialer), + grpc.FailOnNonTempDialError(true), + } + needWithInsecure := true + for _, o := range opts { + if _, ok := o.(*withBlockOpt); ok { + gopts = append(gopts, grpc.WithBlock(), grpc.FailOnNonTempDialError(true)) + } + if credInfo, ok := o.(*withCredentials); ok { + opt, err := loadCredentials(credInfo) + if err != nil { + return nil, err + } + gopts = append(gopts, opt) + needWithInsecure = false + } + if wt, ok := o.(*withTracer); ok { + gopts = append(gopts, + grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())), + grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))) + } + } + if needWithInsecure { + gopts = append(gopts, grpc.WithInsecure()) + } + if address == "" { + address = appdefaults.Address + } + conn, err := grpc.Dial(address, gopts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address) + } + c := &Client{ + conn: conn, + } + return c, nil +} + +func (c *Client) controlClient() controlapi.ControlClient { + return controlapi.NewControlClient(c.conn) +} + +func (c *Client) Close() error { + return c.conn.Close() +} + +type withBlockOpt struct{} + +func WithBlock() ClientOpt { + return &withBlockOpt{} +} + +type withCredentials struct { + ServerName string + CACert string + Cert string + Key string +} + +// WithCredentials configures the TLS parameters of the client. +// Arguments: +// * serverName: specifies the name of the target server +// * ca: specifies the filepath of the CA certificate to use for verification +// * cert: specifies the filepath of the client certificate +// * key: specifies the filepath of the client key +func WithCredentials(serverName, ca, cert, key string) ClientOpt { + return &withCredentials{serverName, ca, cert, key} +} + +func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { + ca, err := ioutil.ReadFile(opts.CACert) + if err != nil { + return nil, errors.Wrap(err, "could not read ca certificate") + } + + certPool := x509.NewCertPool() + if ok := certPool.AppendCertsFromPEM(ca); !ok { + return nil, errors.New("failed to append ca certs") + } + + cfg := &tls.Config{ + ServerName: opts.ServerName, + RootCAs: certPool, + } + + // we will produce an error if the user forgot about either cert or key if at least one is specified + if opts.Cert != "" || opts.Key != "" { + cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key) + if err != nil { + return nil, errors.Wrap(err, "could not read certificate/key") + } + cfg.Certificates = []tls.Certificate{cert} + cfg.BuildNameToCertificate() + } + + return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil +} + +func WithTracer(t opentracing.Tracer) ClientOpt { + return &withTracer{t} +} + +type withTracer struct { + tracer opentracing.Tracer +} diff --git a/vendor/github.com/moby/buildkit/client/client_unix.go b/vendor/github.com/moby/buildkit/client/client_unix.go new file mode 100644 index 0000000000..93afb956f1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/client_unix.go @@ -0,0 +1,19 @@ +// +build !windows + +package client + +import ( + "net" + "strings" + "time" + + "github.com/pkg/errors" +) + +func dialer(address string, timeout time.Duration) (net.Conn, error) { + addrParts := strings.SplitN(address, "://", 2) + if len(addrParts) != 2 { + return nil, errors.Errorf("invalid address %s", address) + } + return net.DialTimeout(addrParts[0], addrParts[1], timeout) +} diff --git a/vendor/github.com/moby/buildkit/client/client_windows.go b/vendor/github.com/moby/buildkit/client/client_windows.go new file mode 100644 index 0000000000..75905f520b --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/client_windows.go @@ -0,0 +1,24 @@ +package client + +import ( + "net" + "strings" + "time" + + "github.com/Microsoft/go-winio" + "github.com/pkg/errors" +) + +func dialer(address string, timeout time.Duration) (net.Conn, error) { + addrParts := strings.SplitN(address, "://", 2) + if len(addrParts) != 2 { + return nil, errors.Errorf("invalid address %s", address) + } + switch addrParts[0] { + case "npipe": + address = strings.Replace(addrParts[1], "/", "\\", 0) + return winio.DialPipe(address, &timeout) + default: + return net.DialTimeout(addrParts[0], addrParts[1], timeout) + } +} diff --git a/vendor/github.com/moby/buildkit/client/diskusage.go b/vendor/github.com/moby/buildkit/client/diskusage.go new file mode 100644 index 0000000000..5ed5043223 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/diskusage.go @@ -0,0 +1,73 @@ +package client + +import ( + "context" + "sort" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/pkg/errors" +) + +type UsageInfo struct { + ID string + Mutable bool + InUse bool + Size int64 + + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int + Parent string + Description string +} + +func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) { + info := &DiskUsageInfo{} + for _, o := range opts { + o(info) + } + + req := &controlapi.DiskUsageRequest{Filter: info.Filter} + resp, err := c.controlClient().DiskUsage(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "failed to call diskusage") + } + + var du []*UsageInfo + + for _, d := range resp.Record { + du = append(du, &UsageInfo{ + ID: d.ID, + Mutable: d.Mutable, + InUse: d.InUse, + Size: d.Size_, + Parent: d.Parent, + CreatedAt: d.CreatedAt, + Description: d.Description, + UsageCount: int(d.UsageCount), + LastUsedAt: d.LastUsedAt, + }) + } + + sort.Slice(du, func(i, j int) bool { + if du[i].Size == du[j].Size { + return du[i].ID > du[j].ID + } + return du[i].Size > du[j].Size + }) + + return du, nil +} + +type DiskUsageOption func(*DiskUsageInfo) + +type DiskUsageInfo struct { + Filter string +} + +func WithFilter(f string) DiskUsageOption { + return func(di *DiskUsageInfo) { + di.Filter = f + } +} diff --git a/vendor/github.com/moby/buildkit/client/exporters.go b/vendor/github.com/moby/buildkit/client/exporters.go new file mode 100644 index 0000000000..4160d92a73 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/exporters.go @@ -0,0 +1,8 @@ +package client + +const ( + ExporterImage = "image" + ExporterLocal = "local" + ExporterOCI = "oci" + ExporterDocker = "docker" +) diff --git a/vendor/github.com/moby/buildkit/client/graph.go b/vendor/github.com/moby/buildkit/client/graph.go new file mode 100644 index 0000000000..141a393cf9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/graph.go @@ -0,0 +1,45 @@ +package client + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +type Vertex struct { + Digest digest.Digest + Inputs []digest.Digest + Name string + Started *time.Time + Completed *time.Time + Cached bool + Error string +} + +type VertexStatus struct { + ID string + Vertex digest.Digest + Name string + Total int64 + Current int64 + Timestamp time.Time + Started *time.Time + Completed *time.Time +} + +type VertexLog struct { + Vertex digest.Digest + Stream int + Data []byte + Timestamp time.Time +} + +type SolveStatus struct { + Vertexes []*Vertex + Statuses []*VertexStatus + Logs []*VertexLog +} + +type SolveResponse struct { + ExporterResponse map[string]string +} diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go new file mode 100644 index 0000000000..abb9885434 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -0,0 +1,372 @@ +package llb + +import ( + _ "crypto/sha256" + "sort" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type Meta struct { + Args []string + Env EnvList + Cwd string + User string + ProxyEnv *ProxyEnv +} + +func NewExecOp(root Output, meta Meta, readOnly bool, md OpMetadata) *ExecOp { + e := &ExecOp{meta: meta, cachedOpMetadata: md} + rootMount := &mount{ + target: pb.RootMount, + source: root, + readonly: readOnly, + } + e.mounts = append(e.mounts, rootMount) + if readOnly { + e.root = root + } else { + e.root = &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)} + } + rootMount.output = e.root + + return e +} + +type mount struct { + target string + readonly bool + source Output + output Output + selector string + cacheID string + // hasOutput bool +} + +type ExecOp struct { + root Output + mounts []*mount + meta Meta + cachedPBDigest digest.Digest + cachedPB []byte + cachedOpMetadata OpMetadata + isValidated bool +} + +func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output { + m := &mount{ + target: target, + source: source, + } + for _, o := range opt { + o(m) + } + e.mounts = append(e.mounts, m) + if m.readonly { + m.output = source + } else { + m.output = &output{vertex: e, getIndex: e.getMountIndexFn(m)} + } + e.cachedPB = nil + e.isValidated = false + return m.output +} + +func (e *ExecOp) GetMount(target string) Output { + for _, m := range e.mounts { + if m.target == target { + return m.output + } + } + return nil +} + +func (e *ExecOp) Validate() error { + if e.isValidated { + return nil + } + if len(e.meta.Args) == 0 { + return errors.Errorf("arguments are required") + } + if e.meta.Cwd == "" { + return errors.Errorf("working directory is required") + } + for _, m := range e.mounts { + if m.source != nil { + if err := m.source.Vertex().Validate(); err != nil { + return err + } + } + } + e.isValidated = true + return nil +} + +func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) { + if e.cachedPB != nil { + return e.cachedPBDigest, e.cachedPB, &e.cachedOpMetadata, nil + } + if err := e.Validate(); err != nil { + return "", nil, nil, err + } + // make sure mounts are sorted + sort.Slice(e.mounts, func(i, j int) bool { + return e.mounts[i].target < e.mounts[j].target + }) + + peo := &pb.ExecOp{ + Meta: &pb.Meta{ + Args: e.meta.Args, + Env: e.meta.Env.ToArray(), + Cwd: e.meta.Cwd, + User: e.meta.User, + }, + } + + if p := e.meta.ProxyEnv; p != nil { + peo.Meta.ProxyEnv = &pb.ProxyEnv{ + HttpProxy: p.HttpProxy, + HttpsProxy: p.HttpsProxy, + FtpProxy: p.FtpProxy, + NoProxy: p.NoProxy, + } + } + + pop := &pb.Op{ + Op: &pb.Op_Exec{ + Exec: peo, + }, + } + + outIndex := 0 + for _, m := range e.mounts { + inputIndex := pb.InputIndex(len(pop.Inputs)) + if m.source != nil { + inp, err := m.source.ToInput() + if err != nil { + return "", nil, nil, err + } + + newInput := true + + for i, inp2 := range pop.Inputs { + if *inp == *inp2 { + inputIndex = pb.InputIndex(i) + newInput = false + break + } + } + + if newInput { + pop.Inputs = append(pop.Inputs, inp) + } + } else { + inputIndex = pb.Empty + } + + outputIndex := pb.OutputIndex(-1) + if !m.readonly && m.cacheID == "" { + outputIndex = pb.OutputIndex(outIndex) + outIndex++ + } + + pm := &pb.Mount{ + Input: inputIndex, + Dest: m.target, + Readonly: m.readonly, + Output: outputIndex, + Selector: m.selector, + } + if m.cacheID != "" { + pm.MountType = pb.MountType_CACHE + pm.CacheOpt = &pb.CacheOpt{ + ID: m.cacheID, + } + } + peo.Mounts = append(peo.Mounts, pm) + } + + dt, err := pop.Marshal() + if err != nil { + return "", nil, nil, err + } + e.cachedPBDigest = digest.FromBytes(dt) + e.cachedPB = dt + return e.cachedPBDigest, dt, &e.cachedOpMetadata, nil +} + +func (e *ExecOp) Output() Output { + return e.root +} + +func (e *ExecOp) Inputs() (inputs []Output) { + mm := map[Output]struct{}{} + for _, m := range e.mounts { + if m.source != nil { + mm[m.source] = struct{}{} + } + } + for o := range mm { + inputs = append(inputs, o) + } + return +} + +func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) { + return func() (pb.OutputIndex, error) { + // make sure mounts are sorted + sort.Slice(e.mounts, func(i, j int) bool { + return e.mounts[i].target < e.mounts[j].target + }) + + i := 0 + for _, m2 := range e.mounts { + if m2.readonly || m2.cacheID != "" { + continue + } + if m == m2 { + return pb.OutputIndex(i), nil + } + i++ + } + return pb.OutputIndex(0), errors.Errorf("invalid mount: %s", m.target) + } +} + +type ExecState struct { + State + exec *ExecOp +} + +func (e ExecState) AddMount(target string, source State, opt ...MountOption) State { + return source.WithOutput(e.exec.AddMount(target, source.Output(), opt...)) +} + +func (e ExecState) GetMount(target string) State { + return NewState(e.exec.GetMount(target)) +} + +func (e ExecState) Root() State { + return e.State +} + +type MountOption func(*mount) + +func Readonly(m *mount) { + m.readonly = true +} + +func SourcePath(src string) MountOption { + return func(m *mount) { + m.selector = src + } +} + +func AsPersistentCacheDir(id string) MountOption { + return func(m *mount) { + m.cacheID = id + } +} + +type RunOption interface { + SetRunOption(es *ExecInfo) +} + +type runOptionFunc func(*ExecInfo) + +func (fn runOptionFunc) SetRunOption(ei *ExecInfo) { + fn(ei) +} + +func Shlex(str string) RunOption { + return Shlexf(str) +} +func Shlexf(str string, v ...interface{}) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = shlexf(str, v...)(ei.State) + }) +} + +func Args(a []string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = args(a...)(ei.State) + }) +} + +func AddEnv(key, value string) RunOption { + return AddEnvf(key, value) +} + +func AddEnvf(key, value string, v ...interface{}) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.AddEnvf(key, value, v...) + }) +} + +func User(str string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.User(str) + }) +} + +func Dir(str string) RunOption { + return Dirf(str) +} +func Dirf(str string, v ...interface{}) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.Dirf(str, v...) + }) +} + +func Reset(s State) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.Reset(s) + }) +} + +func With(so ...StateOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.With(so...) + }) +} + +func AddMount(dest string, mountState State, opts ...MountOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.Mounts = append(ei.Mounts, MountInfo{dest, mountState.Output(), opts}) + }) +} + +func ReadonlyRootFS() RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.ReadonlyRootFS = true + }) +} + +func WithProxy(ps ProxyEnv) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.ProxyEnv = &ps + }) +} + +type ExecInfo struct { + opMetaWrapper + State State + Mounts []MountInfo + ReadonlyRootFS bool + ProxyEnv *ProxyEnv +} + +type MountInfo struct { + Target string + Source Output + Opts []MountOption +} + +type ProxyEnv struct { + HttpProxy string + HttpsProxy string + FtpProxy string + NoProxy string +} diff --git a/vendor/github.com/moby/buildkit/client/llb/marshal.go b/vendor/github.com/moby/buildkit/client/llb/marshal.go new file mode 100644 index 0000000000..4d8ad5557a --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/marshal.go @@ -0,0 +1,60 @@ +package llb + +import ( + "io" + "io/ioutil" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" +) + +// Definition is the LLB definition structure with per-vertex metadata entries +// Corresponds to the Definition structure defined in solver/pb.Definition. +type Definition struct { + Def [][]byte + Metadata map[digest.Digest]OpMetadata +} + +func (def *Definition) ToPB() *pb.Definition { + md := make(map[digest.Digest]OpMetadata) + for k, v := range def.Metadata { + md[k] = v + } + return &pb.Definition{ + Def: def.Def, + Metadata: md, + } +} + +func (def *Definition) FromPB(x *pb.Definition) { + def.Def = x.Def + def.Metadata = make(map[digest.Digest]OpMetadata) + for k, v := range x.Metadata { + def.Metadata[k] = v + } +} + +type OpMetadata = pb.OpMetadata + +func WriteTo(def *Definition, w io.Writer) error { + b, err := def.ToPB().Marshal() + if err != nil { + return err + } + _, err = w.Write(b) + return err +} + +func ReadFrom(r io.Reader) (*Definition, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + var pbDef pb.Definition + if err := pbDef.Unmarshal(b); err != nil { + return nil, err + } + var def Definition + def.FromPB(&pbDef) + return &def, nil +} diff --git a/vendor/github.com/moby/buildkit/client/llb/meta.go b/vendor/github.com/moby/buildkit/client/llb/meta.go new file mode 100644 index 0000000000..54449ff606 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/meta.go @@ -0,0 +1,152 @@ +package llb + +import ( + "fmt" + "path" + + "github.com/google/shlex" +) + +type contextKeyT string + +var ( + keyArgs = contextKeyT("llb.exec.args") + keyDir = contextKeyT("llb.exec.dir") + keyEnv = contextKeyT("llb.exec.env") + keyUser = contextKeyT("llb.exec.user") +) + +func addEnv(key, value string) StateOption { + return addEnvf(key, value) +} + +func addEnvf(key, value string, v ...interface{}) StateOption { + return func(s State) State { + return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, fmt.Sprintf(value, v...))) + } +} + +func dir(str string) StateOption { + return dirf(str) +} + +func dirf(str string, v ...interface{}) StateOption { + return func(s State) State { + value := fmt.Sprintf(str, v...) + if !path.IsAbs(value) { + prev := getDir(s) + if prev == "" { + prev = "/" + } + value = path.Join(prev, value) + } + return s.WithValue(keyDir, value) + } +} + +func user(str string) StateOption { + return func(s State) State { + return s.WithValue(keyUser, str) + } +} + +func reset(s_ State) StateOption { + return func(s State) State { + s = NewState(s.Output()) + s.ctx = s_.ctx + return s + } +} + +func getEnv(s State) EnvList { + v := s.Value(keyEnv) + if v != nil { + return v.(EnvList) + } + return EnvList{} +} + +func getDir(s State) string { + v := s.Value(keyDir) + if v != nil { + return v.(string) + } + return "" +} + +func getArgs(s State) []string { + v := s.Value(keyArgs) + if v != nil { + return v.([]string) + } + return nil +} + +func getUser(s State) string { + v := s.Value(keyUser) + if v != nil { + return v.(string) + } + return "" +} + +func args(args ...string) StateOption { + return func(s State) State { + return s.WithValue(keyArgs, args) + } +} + +func shlexf(str string, v ...interface{}) StateOption { + return func(s State) State { + arg, err := shlex.Split(fmt.Sprintf(str, v...)) + if err != nil { + // TODO: handle error + } + return args(arg...)(s) + } +} + +type EnvList []KeyValue + +type KeyValue struct { + key string + value string +} + +func (e EnvList) AddOrReplace(k, v string) EnvList { + e = e.Delete(k) + e = append(e, KeyValue{key: k, value: v}) + return e +} + +func (e EnvList) Delete(k string) EnvList { + e = append([]KeyValue(nil), e...) + if i, ok := e.Index(k); ok { + return append(e[:i], e[i+1:]...) + } + return e +} + +func (e EnvList) Get(k string) (string, bool) { + if index, ok := e.Index(k); ok { + return e[index].value, true + } + return "", false +} + +func (e EnvList) Index(k string) (int, bool) { + for i, kv := range e { + if kv.key == k { + return i, true + } + } + return -1, false +} + +func (e EnvList) ToArray() []string { + out := make([]string, 0, len(e)) + for _, kv := range e { + out = append(out, kv.key+"="+kv.value) + } + return out +} diff --git a/vendor/github.com/moby/buildkit/client/llb/resolver.go b/vendor/github.com/moby/buildkit/client/llb/resolver.go new file mode 100644 index 0000000000..bac738c967 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/resolver.go @@ -0,0 +1,17 @@ +package llb + +import ( + "context" + + digest "github.com/opencontainers/go-digest" +) + +func WithMetaResolver(mr ImageMetaResolver) ImageOption { + return ImageOptionFunc(func(ii *ImageInfo) { + ii.metaResolver = mr + }) +} + +type ImageMetaResolver interface { + ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error) +} diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go new file mode 100644 index 0000000000..e7e92abede --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/source.go @@ -0,0 +1,344 @@ +package llb + +import ( + "context" + _ "crypto/sha256" + "encoding/json" + "os" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type SourceOp struct { + id string + attrs map[string]string + output Output + cachedPBDigest digest.Digest + cachedPB []byte + cachedOpMetadata OpMetadata + err error +} + +func NewSource(id string, attrs map[string]string, md OpMetadata) *SourceOp { + s := &SourceOp{ + id: id, + attrs: attrs, + cachedOpMetadata: md, + } + s.output = &output{vertex: s} + return s +} + +func (s *SourceOp) Validate() error { + if s.err != nil { + return s.err + } + if s.id == "" { + return errors.Errorf("source identifier can't be empty") + } + return nil +} + +func (s *SourceOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) { + if s.cachedPB != nil { + return s.cachedPBDigest, s.cachedPB, &s.cachedOpMetadata, nil + } + if err := s.Validate(); err != nil { + return "", nil, nil, err + } + + proto := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs}, + }, + } + dt, err := proto.Marshal() + if err != nil { + return "", nil, nil, err + } + s.cachedPB = dt + s.cachedPBDigest = digest.FromBytes(dt) + return s.cachedPBDigest, dt, &s.cachedOpMetadata, nil +} + +func (s *SourceOp) Output() Output { + return s.output +} + +func (s *SourceOp) Inputs() []Output { + return nil +} + +func Source(id string) State { + return NewState(NewSource(id, nil, OpMetadata{}).Output()) +} + +func Image(ref string, opts ...ImageOption) State { + r, err := reference.ParseNormalizedNamed(ref) + if err == nil { + ref = reference.TagNameOnly(r).String() + } + var info ImageInfo + for _, opt := range opts { + opt.SetImageOption(&info) + } + src := NewSource("docker-image://"+ref, nil, info.Metadata()) // controversial + if err != nil { + src.err = err + } + if info.metaResolver != nil { + _, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref) + if err != nil { + src.err = err + } else { + var img struct { + Config struct { + Env []string `json:"Env,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + User string `json:"User,omitempty"` + } `json:"config,omitempty"` + } + if err := json.Unmarshal(dt, &img); err != nil { + src.err = err + } else { + st := NewState(src.Output()) + for _, env := range img.Config.Env { + parts := strings.SplitN(env, "=", 2) + if len(parts[0]) > 0 { + var v string + if len(parts) > 1 { + v = parts[1] + } + st = st.AddEnv(parts[0], v) + } + } + st = st.Dir(img.Config.WorkingDir) + return st + } + } + } + return NewState(src.Output()) +} + +type ImageOption interface { + SetImageOption(*ImageInfo) +} + +type ImageOptionFunc func(*ImageInfo) + +func (fn ImageOptionFunc) SetImageOption(ii *ImageInfo) { + fn(ii) +} + +type ImageInfo struct { + opMetaWrapper + metaResolver ImageMetaResolver +} + +func Git(remote, ref string, opts ...GitOption) State { + url := "" + + for _, prefix := range []string{ + "http://", "https://", "git://", "git@", + } { + if strings.HasPrefix(remote, prefix) { + url = strings.Split(remote, "#")[0] + remote = strings.TrimPrefix(remote, prefix) + } + } + + id := remote + + if ref != "" { + id += "#" + ref + } + + gi := &GitInfo{} + for _, o := range opts { + o.SetGitOption(gi) + } + attrs := map[string]string{} + if gi.KeepGitDir { + attrs[pb.AttrKeepGitDir] = "true" + } + if url != "" { + attrs[pb.AttrFullRemoteURL] = url + } + source := NewSource("git://"+id, attrs, gi.Metadata()) + return NewState(source.Output()) +} + +type GitOption interface { + SetGitOption(*GitInfo) +} +type gitOptionFunc func(*GitInfo) + +func (fn gitOptionFunc) SetGitOption(gi *GitInfo) { + fn(gi) +} + +type GitInfo struct { + opMetaWrapper + KeepGitDir bool +} + +func KeepGitDir() GitOption { + return gitOptionFunc(func(gi *GitInfo) { + gi.KeepGitDir = true + }) +} + +func Scratch() State { + return NewState(nil) +} + +func Local(name string, opts ...LocalOption) State { + gi := &LocalInfo{} + + for _, o := range opts { + o.SetLocalOption(gi) + } + attrs := map[string]string{} + if gi.SessionID != "" { + attrs[pb.AttrLocalSessionID] = gi.SessionID + } + if gi.IncludePatterns != "" { + attrs[pb.AttrIncludePatterns] = gi.IncludePatterns + } + if gi.ExcludePatterns != "" { + attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns + } + if gi.SharedKeyHint != "" { + attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint + } + + source := NewSource("local://"+name, attrs, gi.Metadata()) + return NewState(source.Output()) +} + +type LocalOption interface { + SetLocalOption(*LocalInfo) +} + +type localOptionFunc func(*LocalInfo) + +func (fn localOptionFunc) SetLocalOption(li *LocalInfo) { + fn(li) +} + +func SessionID(id string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + li.SessionID = id + }) +} + +func IncludePatterns(p []string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + if len(p) == 0 { + li.IncludePatterns = "" + return + } + dt, _ := json.Marshal(p) // empty on error + li.IncludePatterns = string(dt) + }) +} + +func ExcludePatterns(p []string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + if len(p) == 0 { + li.ExcludePatterns = "" + return + } + dt, _ := json.Marshal(p) // empty on error + li.ExcludePatterns = string(dt) + }) +} + +func SharedKeyHint(h string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + li.SharedKeyHint = h + }) +} + +type LocalInfo struct { + opMetaWrapper + SessionID string + IncludePatterns string + ExcludePatterns string + SharedKeyHint string +} + +func HTTP(url string, opts ...HTTPOption) State { + hi := &HTTPInfo{} + for _, o := range opts { + o.SetHTTPOption(hi) + } + attrs := map[string]string{} + if hi.Checksum != "" { + attrs[pb.AttrHTTPChecksum] = hi.Checksum.String() + } + if hi.Filename != "" { + attrs[pb.AttrHTTPFilename] = hi.Filename + } + if hi.Perm != 0 { + attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8) + } + if hi.UID != 0 { + attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID) + } + if hi.UID != 0 { + attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID) + } + + source := NewSource(url, attrs, hi.Metadata()) + return NewState(source.Output()) +} + +type HTTPInfo struct { + opMetaWrapper + Checksum digest.Digest + Filename string + Perm int + UID int + GID int +} + +type HTTPOption interface { + SetHTTPOption(*HTTPInfo) +} + +type httpOptionFunc func(*HTTPInfo) + +func (fn httpOptionFunc) SetHTTPOption(hi *HTTPInfo) { + fn(hi) +} + +func Checksum(dgst digest.Digest) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Checksum = dgst + }) +} + +func Chmod(perm os.FileMode) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Perm = int(perm) & 0777 + }) +} + +func Filename(name string) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Filename = name + }) +} + +func Chown(uid, gid int) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.UID = uid + hi.GID = gid + }) +} diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go new file mode 100644 index 0000000000..83dbf99202 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/state.go @@ -0,0 +1,312 @@ +package llb + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/system" + digest "github.com/opencontainers/go-digest" +) + +type StateOption func(State) State + +type Output interface { + ToInput() (*pb.Input, error) + Vertex() Vertex +} + +type Vertex interface { + Validate() error + Marshal() (digest.Digest, []byte, *OpMetadata, error) + Output() Output + Inputs() []Output +} + +func NewState(o Output) State { + s := State{ + out: o, + ctx: context.Background(), + } + s = dir("/")(s) + s = addEnv("PATH", system.DefaultPathEnv)(s) + return s +} + +type State struct { + out Output + ctx context.Context +} + +func (s State) WithValue(k, v interface{}) State { + return State{ + out: s.out, + ctx: context.WithValue(s.ctx, k, v), + } +} + +func (s State) Value(k interface{}) interface{} { + return s.ctx.Value(k) +} + +func (s State) Marshal(md ...MetadataOpt) (*Definition, error) { + def := &Definition{ + Metadata: make(map[digest.Digest]OpMetadata, 0), + } + if s.Output() == nil { + return def, nil + } + def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, md) + if err != nil { + return def, err + } + inp, err := s.Output().ToInput() + if err != nil { + return def, err + } + proto := &pb.Op{Inputs: []*pb.Input{inp}} + dt, err := proto.Marshal() + if err != nil { + return def, err + } + def.Def = append(def.Def, dt) + return def, nil +} + +func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, md []MetadataOpt) (*Definition, error) { + if _, ok := vertexCache[v]; ok { + return def, nil + } + for _, inp := range v.Inputs() { + var err error + def, err = marshal(inp.Vertex(), def, cache, vertexCache, md) + if err != nil { + return def, err + } + } + + dgst, dt, opMeta, err := v.Marshal() + if err != nil { + return def, err + } + vertexCache[v] = struct{}{} + if opMeta != nil { + m := mergeMetadata(def.Metadata[dgst], *opMeta) + for _, f := range md { + f.SetMetadataOption(&m) + } + def.Metadata[dgst] = m + } + if _, ok := cache[dgst]; ok { + return def, nil + } + def.Def = append(def.Def, dt) + cache[dgst] = struct{}{} + return def, nil +} + +func (s State) Validate() error { + return s.Output().Vertex().Validate() +} + +func (s State) Output() Output { + return s.out +} + +func (s State) WithOutput(o Output) State { + return State{ + out: o, + ctx: s.ctx, + } +} + +func (s State) Run(ro ...RunOption) ExecState { + ei := &ExecInfo{State: s} + for _, o := range ro { + o.SetRunOption(ei) + } + meta := Meta{ + Args: getArgs(ei.State), + Cwd: getDir(ei.State), + Env: getEnv(ei.State), + User: getUser(ei.State), + ProxyEnv: ei.ProxyEnv, + } + + exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Metadata()) + for _, m := range ei.Mounts { + exec.AddMount(m.Target, m.Source, m.Opts...) + } + + return ExecState{ + State: s.WithOutput(exec.Output()), + exec: exec, + } +} + +func (s State) AddEnv(key, value string) State { + return s.AddEnvf(key, value) +} + +func (s State) AddEnvf(key, value string, v ...interface{}) State { + return addEnvf(key, value, v...)(s) +} + +func (s State) Dir(str string) State { + return s.Dirf(str) +} +func (s State) Dirf(str string, v ...interface{}) State { + return dirf(str, v...)(s) +} + +func (s State) GetEnv(key string) (string, bool) { + return getEnv(s).Get(key) +} + +func (s State) GetDir() string { + return getDir(s) +} + +func (s State) GetArgs() []string { + return getArgs(s) +} + +func (s State) Reset(s2 State) State { + return reset(s2)(s) +} + +func (s State) User(v string) State { + return user(v)(s) +} + +func (s State) With(so ...StateOption) State { + for _, o := range so { + s = o(s) + } + return s +} + +type output struct { + vertex Vertex + getIndex func() (pb.OutputIndex, error) +} + +func (o *output) ToInput() (*pb.Input, error) { + var index pb.OutputIndex + if o.getIndex != nil { + var err error + index, err = o.getIndex() + if err != nil { + return nil, err + } + } + dgst, _, _, err := o.vertex.Marshal() + if err != nil { + return nil, err + } + return &pb.Input{Digest: dgst, Index: index}, nil +} + +func (o *output) Vertex() Vertex { + return o.vertex +} + +type MetadataOpt interface { + SetMetadataOption(*OpMetadata) + RunOption + LocalOption + HTTPOption + ImageOption + GitOption +} + +type metadataOptFunc func(m *OpMetadata) + +func (fn metadataOptFunc) SetMetadataOption(m *OpMetadata) { + fn(m) +} + +func (fn metadataOptFunc) SetRunOption(ei *ExecInfo) { + ei.ApplyMetadata(fn) +} + +func (fn metadataOptFunc) SetLocalOption(li *LocalInfo) { + li.ApplyMetadata(fn) +} + +func (fn metadataOptFunc) SetHTTPOption(hi *HTTPInfo) { + hi.ApplyMetadata(fn) +} + +func (fn metadataOptFunc) SetImageOption(ii *ImageInfo) { + ii.ApplyMetadata(fn) +} + +func (fn metadataOptFunc) SetGitOption(gi *GitInfo) { + gi.ApplyMetadata(fn) +} + +func mergeMetadata(m1, m2 OpMetadata) OpMetadata { + if m2.IgnoreCache { + m1.IgnoreCache = true + } + if len(m2.Description) > 0 { + if m1.Description == nil { + m1.Description = make(map[string]string) + } + for k, v := range m2.Description { + m1.Description[k] = v + } + } + if m2.ExportCache != nil { + m1.ExportCache = m2.ExportCache + } + + return m1 +} + +var IgnoreCache = metadataOptFunc(func(md *OpMetadata) { + md.IgnoreCache = true +}) + +func WithDescription(m map[string]string) MetadataOpt { + return metadataOptFunc(func(md *OpMetadata) { + md.Description = m + }) +} + +// WithExportCache forces results for this vertex to be exported with the cache +func WithExportCache() MetadataOpt { + return metadataOptFunc(func(md *OpMetadata) { + md.ExportCache = &pb.ExportCache{Value: true} + }) +} + +// WithoutExportCache sets results for this vertex to be not exported with +// the cache +func WithoutExportCache() MetadataOpt { + return metadataOptFunc(func(md *OpMetadata) { + // ExportCache with value false means to disable exporting + md.ExportCache = &pb.ExportCache{Value: false} + }) +} + +// WithoutDefaultExportCache resets the cache export for the vertex to use +// the default defined by the build configuration. +func WithoutDefaultExportCache() MetadataOpt { + return metadataOptFunc(func(md *OpMetadata) { + // nil means no vertex based config has been set + md.ExportCache = nil + }) +} + +type opMetaWrapper struct { + OpMetadata +} + +func (mw *opMetaWrapper) ApplyMetadata(f func(m *OpMetadata)) { + f(&mw.OpMetadata) +} + +func (mw *opMetaWrapper) Metadata() OpMetadata { + return mw.OpMetadata +} diff --git a/vendor/github.com/moby/buildkit/client/prune.go b/vendor/github.com/moby/buildkit/client/prune.go new file mode 100644 index 0000000000..b3c1edcd2b --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/prune.go @@ -0,0 +1,50 @@ +package client + +import ( + "context" + "io" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/pkg/errors" +) + +func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error { + info := &PruneInfo{} + for _, o := range opts { + o(info) + } + + req := &controlapi.PruneRequest{} + cl, err := c.controlClient().Prune(ctx, req) + if err != nil { + return errors.Wrap(err, "failed to call prune") + } + + for { + d, err := cl.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + if ch != nil { + ch <- UsageInfo{ + ID: d.ID, + Mutable: d.Mutable, + InUse: d.InUse, + Size: d.Size_, + Parent: d.Parent, + CreatedAt: d.CreatedAt, + Description: d.Description, + UsageCount: int(d.UsageCount), + LastUsedAt: d.LastUsedAt, + } + } + } +} + +type PruneOption func(*PruneInfo) + +type PruneInfo struct { +} diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go new file mode 100644 index 0000000000..972b6b3ec9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/solve.go @@ -0,0 +1,251 @@ +package client + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/session/grpchijack" + "github.com/moby/buildkit/solver/pb" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +type SolveOpt struct { + Exporter string + ExporterAttrs map[string]string + ExporterOutput io.WriteCloser // for ExporterOCI and ExporterDocker + ExporterOutputDir string // for ExporterLocal + LocalDirs map[string]string + SharedKey string + Frontend string + FrontendAttrs map[string]string + ExportCache string + ExportCacheAttrs map[string]string + ImportCache []string + Session []session.Attachable +} + +// Solve calls Solve on the controller. +// def must be nil if (and only if) opt.Frontend is set. +func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) { + defer func() { + if statusChan != nil { + close(statusChan) + } + }() + + if opt.Frontend == "" && def == nil { + return nil, errors.New("invalid empty definition") + } + if opt.Frontend != "" && def != nil { + return nil, errors.Errorf("invalid definition for frontend %s", opt.Frontend) + } + + syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs) + if err != nil { + return nil, err + } + + ref := identity.NewID() + eg, ctx := errgroup.WithContext(ctx) + + statusContext, cancelStatus := context.WithCancel(context.Background()) + defer cancelStatus() + + if span := opentracing.SpanFromContext(ctx); span != nil { + statusContext = opentracing.ContextWithSpan(statusContext, span) + } + + s, err := session.NewSession(statusContext, defaultSessionName(), opt.SharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create session") + } + + if len(syncedDirs) > 0 { + s.Allow(filesync.NewFSSyncProvider(syncedDirs)) + } + + for _, a := range opt.Session { + s.Allow(a) + } + + switch opt.Exporter { + case ExporterLocal: + if opt.ExporterOutput != nil { + return nil, errors.New("output file writer is not supported by local exporter") + } + if opt.ExporterOutputDir == "" { + return nil, errors.New("output directory is required for local exporter") + } + s.Allow(filesync.NewFSSyncTargetDir(opt.ExporterOutputDir)) + case ExporterOCI, ExporterDocker: + if opt.ExporterOutputDir != "" { + return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter) + } + if opt.ExporterOutput == nil { + return nil, errors.Errorf("output file writer is required for %s exporter", opt.Exporter) + } + s.Allow(filesync.NewFSSyncTarget(opt.ExporterOutput)) + default: + if opt.ExporterOutput != nil { + return nil, errors.Errorf("output file writer is not supported by %s exporter", opt.Exporter) + } + if opt.ExporterOutputDir != "" { + return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter) + } + } + + eg.Go(func() error { + return s.Run(statusContext, grpchijack.Dialer(c.controlClient())) + }) + + var res *SolveResponse + eg.Go(func() error { + defer func() { // make sure the Status ends cleanly on build errors + go func() { + <-time.After(3 * time.Second) + cancelStatus() + }() + logrus.Debugf("stopping session") + s.Close() + }() + var pbd *pb.Definition + if def != nil { + pbd = def.ToPB() + } + resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{ + Ref: ref, + Definition: pbd, + Exporter: opt.Exporter, + ExporterAttrs: opt.ExporterAttrs, + Session: s.ID(), + Frontend: opt.Frontend, + FrontendAttrs: opt.FrontendAttrs, + Cache: controlapi.CacheOptions{ + ExportRef: opt.ExportCache, + ImportRefs: opt.ImportCache, + ExportAttrs: opt.ExportCacheAttrs, + }, + }) + if err != nil { + return errors.Wrap(err, "failed to solve") + } + res = &SolveResponse{ + ExporterResponse: resp.ExporterResponse, + } + return nil + }) + + eg.Go(func() error { + stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{ + Ref: ref, + }) + if err != nil { + return errors.Wrap(err, "failed to get status") + } + for { + resp, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return errors.Wrap(err, "failed to receive status") + } + s := SolveStatus{} + for _, v := range resp.Vertexes { + s.Vertexes = append(s.Vertexes, &Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + }) + } + for _, v := range resp.Statuses { + s.Statuses = append(s.Statuses, &VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Total: v.Total, + Current: v.Current, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range resp.Logs { + s.Logs = append(s.Logs, &VertexLog{ + Vertex: v.Vertex, + Stream: int(v.Stream), + Data: v.Msg, + Timestamp: v.Timestamp, + }) + } + if statusChan != nil { + statusChan <- &s + } + } + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + return res, nil +} + +func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) { + for _, d := range localDirs { + fi, err := os.Stat(d) + if err != nil { + return nil, errors.Wrapf(err, "could not find %s", d) + } + if !fi.IsDir() { + return nil, errors.Errorf("%s not a directory", d) + } + } + dirs := make([]filesync.SyncedDir, 0, len(localDirs)) + if def == nil { + for name, d := range localDirs { + dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d}) + } + } else { + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return nil, errors.Wrap(err, "failed to parse llb proto op") + } + if src := op.GetSource(); src != nil { + if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property + name := strings.TrimPrefix(src.Identifier, "local://") + d, ok := localDirs[name] + if !ok { + return nil, errors.Errorf("local directory %s not enabled", name) + } + dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d}) // TODO: excludes + } + } + } + } + return dirs, nil +} + +func defaultSessionName() string { + wd, err := os.Getwd() + if err != nil { + return "unknown" + } + return filepath.Base(wd) +} diff --git a/vendor/github.com/moby/buildkit/client/workers.go b/vendor/github.com/moby/buildkit/client/workers.go new file mode 100644 index 0000000000..b4ccb82d4c --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/workers.go @@ -0,0 +1,49 @@ +package client + +import ( + "context" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/pkg/errors" +) + +type WorkerInfo struct { + ID string + Labels map[string]string +} + +func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) { + info := &ListWorkersInfo{} + for _, o := range opts { + o(info) + } + + req := &controlapi.ListWorkersRequest{Filter: info.Filter} + resp, err := c.controlClient().ListWorkers(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "failed to list workers") + } + + var wi []*WorkerInfo + + for _, w := range resp.Record { + wi = append(wi, &WorkerInfo{ + ID: w.ID, + Labels: w.Labels, + }) + } + + return wi, nil +} + +type ListWorkersOption func(*ListWorkersInfo) + +type ListWorkersInfo struct { + Filter []string +} + +func WithWorkerFilter(f []string) ListWorkersOption { + return func(wi *ListWorkersInfo) { + wi.Filter = f + } +} diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go new file mode 100644 index 0000000000..151ab5498f --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go @@ -0,0 +1,156 @@ +package grpchijack + +import ( + "context" + "io" + "net" + "strings" + "sync" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/session" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func Dialer(api controlapi.ControlClient) session.Dialer { + return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + + meta = lowerHeaders(meta) + + md := metadata.MD(meta) + + ctx = metadata.NewOutgoingContext(ctx, md) + + stream, err := api.Session(ctx) + if err != nil { + return nil, err + } + + c, _ := streamToConn(stream) + return c, nil + } +} + +func streamToConn(stream grpc.Stream) (net.Conn, <-chan struct{}) { + closeCh := make(chan struct{}) + c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh} + return c, closeCh +} + +type conn struct { + stream grpc.Stream + buf []byte + lastBuf []byte + + closedOnce sync.Once + readMu sync.Mutex + err error + closeCh chan struct{} +} + +func (c *conn) Read(b []byte) (n int, err error) { + c.readMu.Lock() + defer c.readMu.Unlock() + + if c.lastBuf != nil { + n := copy(b, c.lastBuf) + c.lastBuf = c.lastBuf[n:] + if len(c.lastBuf) == 0 { + c.lastBuf = nil + } + return n, nil + } + m := new(controlapi.BytesMessage) + m.Data = c.buf + + if err := c.stream.RecvMsg(m); err != nil { + return 0, err + } + c.buf = m.Data[:cap(m.Data)] + + n = copy(b, m.Data) + if n < len(m.Data) { + c.lastBuf = m.Data[n:] + } + + return n, nil +} + +func (c *conn) Write(b []byte) (int, error) { + m := &controlapi.BytesMessage{Data: b} + if err := c.stream.SendMsg(m); err != nil { + return 0, err + } + return len(b), nil +} + +func (c *conn) Close() (err error) { + c.closedOnce.Do(func() { + defer func() { + close(c.closeCh) + }() + + if cs, ok := c.stream.(grpc.ClientStream); ok { + err = cs.CloseSend() + if err != nil { + return + } + } + + c.readMu.Lock() + for { + m := new(controlapi.BytesMessage) + m.Data = c.buf + err = c.stream.RecvMsg(m) + if err != nil { + if err != io.EOF { + return + } + err = nil + break + } + c.buf = m.Data[:cap(m.Data)] + c.lastBuf = append(c.lastBuf, c.buf...) + } + c.readMu.Unlock() + + }) + return nil +} + +func (c *conn) LocalAddr() net.Addr { + return dummyAddr{} +} +func (c *conn) RemoteAddr() net.Addr { + return dummyAddr{} +} +func (c *conn) SetDeadline(t time.Time) error { + return nil +} +func (c *conn) SetReadDeadline(t time.Time) error { + return nil +} +func (c *conn) SetWriteDeadline(t time.Time) error { + return nil +} + +type dummyAddr struct { +} + +func (d dummyAddr) Network() string { + return "tcp" +} + +func (d dummyAddr) String() string { + return "localhost" +} + +func lowerHeaders(in map[string][]string) map[string][]string { + out := map[string][]string{} + for k := range in { + out[strings.ToLower(k)] = in[k] + } + return out +} diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go b/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go new file mode 100644 index 0000000000..6e34b2164e --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go @@ -0,0 +1,14 @@ +package grpchijack + +import ( + "net" + + controlapi "github.com/moby/buildkit/api/services/control" + "google.golang.org/grpc/metadata" +) + +func Hijack(stream controlapi.Control_SessionServer) (net.Conn, <-chan struct{}, map[string][]string) { + md, _ := metadata.FromIncomingContext(stream.Context()) + c, closeCh := streamToConn(stream) + return c, closeCh, md +} diff --git a/vendor/github.com/moby/buildkit/solver/pb/attr.go b/vendor/github.com/moby/buildkit/solver/pb/attr.go new file mode 100644 index 0000000000..ffbb67a778 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/attr.go @@ -0,0 +1,15 @@ +package pb + +const AttrKeepGitDir = "git.keepgitdir" +const AttrFullRemoteURL = "git.fullurl" +const AttrLocalSessionID = "local.session" +const AttrIncludePatterns = "local.includepattern" +const AttrExcludePatterns = "local.excludepatterns" +const AttrSharedKeyHint = "local.sharedkeyhint" +const AttrLLBDefinitionFilename = "llbbuild.filename" + +const AttrHTTPChecksum = "http.checksum" +const AttrHTTPFilename = "http.filename" +const AttrHTTPPerm = "http.perm" +const AttrHTTPUID = "http.uid" +const AttrHTTPGID = "http.gid" diff --git a/vendor/github.com/moby/buildkit/solver/pb/const.go b/vendor/github.com/moby/buildkit/solver/pb/const.go new file mode 100644 index 0000000000..2cb9951082 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/const.go @@ -0,0 +1,12 @@ +package pb + +type InputIndex int64 +type OutputIndex int64 + +const RootMount = "/" +const SkipOutput OutputIndex = -1 +const Empty InputIndex = -1 +const LLBBuilder InputIndex = -1 + +const LLBDefinitionInput = "buildkit.llb.definition" +const LLBDefaultDefinitionFile = LLBDefinitionInput diff --git a/vendor/github.com/moby/buildkit/solver/pb/generate.go b/vendor/github.com/moby/buildkit/solver/pb/generate.go new file mode 100644 index 0000000000..c31e148f2a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/generate.go @@ -0,0 +1,3 @@ +package pb + +//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go new file mode 100644 index 0000000000..408a566016 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -0,0 +1,4490 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ops.proto + +/* + Package pb is a generated protocol buffer package. + + Package pb provides the protobuf definition of LLB: low-level builder instruction. + LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. + + It is generated from these files: + ops.proto + + It has these top-level messages: + Op + Input + ExecOp + Meta + Mount + CacheOpt + CopyOp + CopySource + SourceOp + BuildOp + BuildInput + OpMetadata + ExportCache + ProxyEnv + WorkerConstraint + Definition +*/ +package pb + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type MountType int32 + +const ( + MountType_BIND MountType = 0 + MountType_SECRET MountType = 1 + MountType_SSH MountType = 2 + MountType_CACHE MountType = 3 +) + +var MountType_name = map[int32]string{ + 0: "BIND", + 1: "SECRET", + 2: "SSH", + 3: "CACHE", +} +var MountType_value = map[string]int32{ + "BIND": 0, + "SECRET": 1, + "SSH": 2, + "CACHE": 3, +} + +func (x MountType) String() string { + return proto.EnumName(MountType_name, int32(x)) +} +func (MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorOps, []int{0} } + +// Op represents a vertex of the LLB DAG. +type Op struct { + // inputs is a set of input edges. + Inputs []*Input `protobuf:"bytes,1,rep,name=inputs" json:"inputs,omitempty"` + // Types that are valid to be assigned to Op: + // *Op_Exec + // *Op_Source + // *Op_Copy + // *Op_Build + Op isOp_Op `protobuf_oneof:"op"` +} + +func (m *Op) Reset() { *m = Op{} } +func (m *Op) String() string { return proto.CompactTextString(m) } +func (*Op) ProtoMessage() {} +func (*Op) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{0} } + +type isOp_Op interface { + isOp_Op() + MarshalTo([]byte) (int, error) + Size() int +} + +type Op_Exec struct { + Exec *ExecOp `protobuf:"bytes,2,opt,name=exec,oneof"` +} +type Op_Source struct { + Source *SourceOp `protobuf:"bytes,3,opt,name=source,oneof"` +} +type Op_Copy struct { + Copy *CopyOp `protobuf:"bytes,4,opt,name=copy,oneof"` +} +type Op_Build struct { + Build *BuildOp `protobuf:"bytes,5,opt,name=build,oneof"` +} + +func (*Op_Exec) isOp_Op() {} +func (*Op_Source) isOp_Op() {} +func (*Op_Copy) isOp_Op() {} +func (*Op_Build) isOp_Op() {} + +func (m *Op) GetOp() isOp_Op { + if m != nil { + return m.Op + } + return nil +} + +func (m *Op) GetInputs() []*Input { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *Op) GetExec() *ExecOp { + if x, ok := m.GetOp().(*Op_Exec); ok { + return x.Exec + } + return nil +} + +func (m *Op) GetSource() *SourceOp { + if x, ok := m.GetOp().(*Op_Source); ok { + return x.Source + } + return nil +} + +func (m *Op) GetCopy() *CopyOp { + if x, ok := m.GetOp().(*Op_Copy); ok { + return x.Copy + } + return nil +} + +func (m *Op) GetBuild() *BuildOp { + if x, ok := m.GetOp().(*Op_Build); ok { + return x.Build + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Op) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Op_OneofMarshaler, _Op_OneofUnmarshaler, _Op_OneofSizer, []interface{}{ + (*Op_Exec)(nil), + (*Op_Source)(nil), + (*Op_Copy)(nil), + (*Op_Build)(nil), + } +} + +func _Op_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Op) + // op + switch x := m.Op.(type) { + case *Op_Exec: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Exec); err != nil { + return err + } + case *Op_Source: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Source); err != nil { + return err + } + case *Op_Copy: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Copy); err != nil { + return err + } + case *Op_Build: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Build); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Op.Op has unexpected type %T", x) + } + return nil +} + +func _Op_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Op) + switch tag { + case 2: // op.exec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExecOp) + err := b.DecodeMessage(msg) + m.Op = &Op_Exec{msg} + return true, err + case 3: // op.source + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SourceOp) + err := b.DecodeMessage(msg) + m.Op = &Op_Source{msg} + return true, err + case 4: // op.copy + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CopyOp) + err := b.DecodeMessage(msg) + m.Op = &Op_Copy{msg} + return true, err + case 5: // op.build + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BuildOp) + err := b.DecodeMessage(msg) + m.Op = &Op_Build{msg} + return true, err + default: + return false, nil + } +} + +func _Op_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Op) + // op + switch x := m.Op.(type) { + case *Op_Exec: + s := proto.Size(x.Exec) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Op_Source: + s := proto.Size(x.Source) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Op_Copy: + s := proto.Size(x.Copy) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Op_Build: + s := proto.Size(x.Build) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Input represents an input edge for an Op. +type Input struct { + // digest of the marshaled input Op + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + // output index of the input Op + Index OutputIndex `protobuf:"varint,2,opt,name=index,proto3,customtype=OutputIndex" json:"index"` +} + +func (m *Input) Reset() { *m = Input{} } +func (m *Input) String() string { return proto.CompactTextString(m) } +func (*Input) ProtoMessage() {} +func (*Input) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{1} } + +// ExecOp executes a command in a container. +type ExecOp struct { + Meta *Meta `protobuf:"bytes,1,opt,name=meta" json:"meta,omitempty"` + Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"` +} + +func (m *ExecOp) Reset() { *m = ExecOp{} } +func (m *ExecOp) String() string { return proto.CompactTextString(m) } +func (*ExecOp) ProtoMessage() {} +func (*ExecOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{2} } + +func (m *ExecOp) GetMeta() *Meta { + if m != nil { + return m.Meta + } + return nil +} + +func (m *ExecOp) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +// Meta is a set of arguments for ExecOp. +// Meta is unrelated to LLB metadata. +// FIXME: rename (ExecContext? ExecArgs?) +type Meta struct { + Args []string `protobuf:"bytes,1,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,2,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` + ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv" json:"proxy_env,omitempty"` +} + +func (m *Meta) Reset() { *m = Meta{} } +func (m *Meta) String() string { return proto.CompactTextString(m) } +func (*Meta) ProtoMessage() {} +func (*Meta) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{3} } + +func (m *Meta) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *Meta) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +func (m *Meta) GetCwd() string { + if m != nil { + return m.Cwd + } + return "" +} + +func (m *Meta) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *Meta) GetProxyEnv() *ProxyEnv { + if m != nil { + return m.ProxyEnv + } + return nil +} + +// Mount specifies how to mount an input Op as a filesystem. +type Mount struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` + Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + Dest string `protobuf:"bytes,3,opt,name=dest,proto3" json:"dest,omitempty"` + Output OutputIndex `protobuf:"varint,4,opt,name=output,proto3,customtype=OutputIndex" json:"output"` + Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` + MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"` + CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt" json:"cacheOpt,omitempty"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (m *Mount) String() string { return proto.CompactTextString(m) } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{4} } + +func (m *Mount) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *Mount) GetDest() string { + if m != nil { + return m.Dest + } + return "" +} + +func (m *Mount) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *Mount) GetMountType() MountType { + if m != nil { + return m.MountType + } + return MountType_BIND +} + +func (m *Mount) GetCacheOpt() *CacheOpt { + if m != nil { + return m.CacheOpt + } + return nil +} + +type CacheOpt struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *CacheOpt) Reset() { *m = CacheOpt{} } +func (m *CacheOpt) String() string { return proto.CompactTextString(m) } +func (*CacheOpt) ProtoMessage() {} +func (*CacheOpt) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{5} } + +func (m *CacheOpt) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +// CopyOp copies files across Ops. +type CopyOp struct { + Src []*CopySource `protobuf:"bytes,1,rep,name=src" json:"src,omitempty"` + Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` +} + +func (m *CopyOp) Reset() { *m = CopyOp{} } +func (m *CopyOp) String() string { return proto.CompactTextString(m) } +func (*CopyOp) ProtoMessage() {} +func (*CopyOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{6} } + +func (m *CopyOp) GetSrc() []*CopySource { + if m != nil { + return m.Src + } + return nil +} + +func (m *CopyOp) GetDest() string { + if m != nil { + return m.Dest + } + return "" +} + +// CopySource specifies a source for CopyOp. +type CopySource struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` + Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` +} + +func (m *CopySource) Reset() { *m = CopySource{} } +func (m *CopySource) String() string { return proto.CompactTextString(m) } +func (*CopySource) ProtoMessage() {} +func (*CopySource) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{7} } + +func (m *CopySource) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +// SourceOp specifies a source such as build contexts and images. +type SourceOp struct { + // TODO: use source type or any type instead of URL protocol. + // identifier e.g. local://, docker-image://, git://, https://... + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // attrs are defined in attr.go + Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *SourceOp) Reset() { *m = SourceOp{} } +func (m *SourceOp) String() string { return proto.CompactTextString(m) } +func (*SourceOp) ProtoMessage() {} +func (*SourceOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{8} } + +func (m *SourceOp) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *SourceOp) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +// BuildOp is used for nested build invocation. +type BuildOp struct { + Builder InputIndex `protobuf:"varint,1,opt,name=builder,proto3,customtype=InputIndex" json:"builder"` + Inputs map[string]*BuildInput `protobuf:"bytes,2,rep,name=inputs" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + Def *Definition `protobuf:"bytes,3,opt,name=def" json:"def,omitempty"` + Attrs map[string]string `protobuf:"bytes,4,rep,name=attrs" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *BuildOp) Reset() { *m = BuildOp{} } +func (m *BuildOp) String() string { return proto.CompactTextString(m) } +func (*BuildOp) ProtoMessage() {} +func (*BuildOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{9} } + +func (m *BuildOp) GetInputs() map[string]*BuildInput { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *BuildOp) GetDef() *Definition { + if m != nil { + return m.Def + } + return nil +} + +func (m *BuildOp) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +// BuildInput is used for BuildOp. +type BuildInput struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` +} + +func (m *BuildInput) Reset() { *m = BuildInput{} } +func (m *BuildInput) String() string { return proto.CompactTextString(m) } +func (*BuildInput) ProtoMessage() {} +func (*BuildInput) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{10} } + +// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. +type OpMetadata struct { + // ignore_cache specifies to ignore the cache for this Op. + IgnoreCache bool `protobuf:"varint,1,opt,name=ignore_cache,json=ignoreCache,proto3" json:"ignore_cache,omitempty"` + // Description can be used for keeping any text fields that builder doesn't parse + Description map[string]string `protobuf:"bytes,2,rep,name=description" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + WorkerConstraint *WorkerConstraint `protobuf:"bytes,3,opt,name=worker_constraint,json=workerConstraint" json:"worker_constraint,omitempty"` + ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache" json:"export_cache,omitempty"` +} + +func (m *OpMetadata) Reset() { *m = OpMetadata{} } +func (m *OpMetadata) String() string { return proto.CompactTextString(m) } +func (*OpMetadata) ProtoMessage() {} +func (*OpMetadata) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{11} } + +func (m *OpMetadata) GetIgnoreCache() bool { + if m != nil { + return m.IgnoreCache + } + return false +} + +func (m *OpMetadata) GetDescription() map[string]string { + if m != nil { + return m.Description + } + return nil +} + +func (m *OpMetadata) GetWorkerConstraint() *WorkerConstraint { + if m != nil { + return m.WorkerConstraint + } + return nil +} + +func (m *OpMetadata) GetExportCache() *ExportCache { + if m != nil { + return m.ExportCache + } + return nil +} + +type ExportCache struct { + Value bool `protobuf:"varint,1,opt,name=Value,proto3" json:"Value,omitempty"` +} + +func (m *ExportCache) Reset() { *m = ExportCache{} } +func (m *ExportCache) String() string { return proto.CompactTextString(m) } +func (*ExportCache) ProtoMessage() {} +func (*ExportCache) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{12} } + +func (m *ExportCache) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +type ProxyEnv struct { + HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"` + HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"` + FtpProxy string `protobuf:"bytes,3,opt,name=ftp_proxy,json=ftpProxy,proto3" json:"ftp_proxy,omitempty"` + NoProxy string `protobuf:"bytes,4,opt,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"` +} + +func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } +func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } +func (*ProxyEnv) ProtoMessage() {} +func (*ProxyEnv) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{13} } + +func (m *ProxyEnv) GetHttpProxy() string { + if m != nil { + return m.HttpProxy + } + return "" +} + +func (m *ProxyEnv) GetHttpsProxy() string { + if m != nil { + return m.HttpsProxy + } + return "" +} + +func (m *ProxyEnv) GetFtpProxy() string { + if m != nil { + return m.FtpProxy + } + return "" +} + +func (m *ProxyEnv) GetNoProxy() string { + if m != nil { + return m.NoProxy + } + return "" +} + +// WorkerConstraint is experimental and likely to be changed. +type WorkerConstraint struct { + Filter []string `protobuf:"bytes,1,rep,name=filter" json:"filter,omitempty"` +} + +func (m *WorkerConstraint) Reset() { *m = WorkerConstraint{} } +func (m *WorkerConstraint) String() string { return proto.CompactTextString(m) } +func (*WorkerConstraint) ProtoMessage() {} +func (*WorkerConstraint) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{14} } + +func (m *WorkerConstraint) GetFilter() []string { + if m != nil { + return m.Filter + } + return nil +} + +// Definition is the LLB definition structure with per-vertex metadata entries +type Definition struct { + // def is a list of marshaled Op messages + Def [][]byte `protobuf:"bytes,1,rep,name=def" json:"def,omitempty"` + // metadata contains metadata for the each of the Op messages. + // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. + Metadata map[github_com_opencontainers_go_digest.Digest]OpMetadata `protobuf:"bytes,2,rep,name=metadata,castkey=github.com/opencontainers/go-digest.Digest" json:"metadata" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Definition) Reset() { *m = Definition{} } +func (m *Definition) String() string { return proto.CompactTextString(m) } +func (*Definition) ProtoMessage() {} +func (*Definition) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{15} } + +func (m *Definition) GetDef() [][]byte { + if m != nil { + return m.Def + } + return nil +} + +func (m *Definition) GetMetadata() map[github_com_opencontainers_go_digest.Digest]OpMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func init() { + proto.RegisterType((*Op)(nil), "pb.Op") + proto.RegisterType((*Input)(nil), "pb.Input") + proto.RegisterType((*ExecOp)(nil), "pb.ExecOp") + proto.RegisterType((*Meta)(nil), "pb.Meta") + proto.RegisterType((*Mount)(nil), "pb.Mount") + proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt") + proto.RegisterType((*CopyOp)(nil), "pb.CopyOp") + proto.RegisterType((*CopySource)(nil), "pb.CopySource") + proto.RegisterType((*SourceOp)(nil), "pb.SourceOp") + proto.RegisterType((*BuildOp)(nil), "pb.BuildOp") + proto.RegisterType((*BuildInput)(nil), "pb.BuildInput") + proto.RegisterType((*OpMetadata)(nil), "pb.OpMetadata") + proto.RegisterType((*ExportCache)(nil), "pb.ExportCache") + proto.RegisterType((*ProxyEnv)(nil), "pb.ProxyEnv") + proto.RegisterType((*WorkerConstraint)(nil), "pb.WorkerConstraint") + proto.RegisterType((*Definition)(nil), "pb.Definition") + proto.RegisterEnum("pb.MountType", MountType_name, MountType_value) +} +func (m *Op) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Op) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Inputs) > 0 { + for _, msg := range m.Inputs { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Op != nil { + nn1, err := m.Op.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *Op_Exec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Exec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Exec.Size())) + n2, err := m.Exec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *Op_Source) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Source != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Source.Size())) + n3, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Op_Copy) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Copy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Copy.Size())) + n4, err := m.Copy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *Op_Build) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Build != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Build.Size())) + n5, err := m.Build.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} +func (m *Input) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Input) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Digest) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if m.Index != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Index)) + } + return i, nil +} + +func (m *ExecOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Meta != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Meta.Size())) + n6, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if len(m.Mounts) > 0 { + for _, msg := range m.Mounts { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Meta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Cwd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Cwd))) + i += copy(dAtA[i:], m.Cwd) + } + if len(m.User) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if m.ProxyEnv != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.ProxyEnv.Size())) + n7, err := m.ProxyEnv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Input != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + } + if len(m.Selector) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + } + if len(m.Dest) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) + i += copy(dAtA[i:], m.Dest) + } + if m.Output != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Output)) + } + if m.Readonly { + dAtA[i] = 0x28 + i++ + if m.Readonly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.MountType != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.MountType)) + } + if m.CacheOpt != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.CacheOpt.Size())) + n8, err := m.CacheOpt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *CacheOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheOpt) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *CopyOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CopyOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Src) > 0 { + for _, msg := range m.Src { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Dest) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) + i += copy(dAtA[i:], m.Dest) + } + return i, nil +} + +func (m *CopySource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CopySource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Input != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + } + if len(m.Selector) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + } + return i, nil +} + +func (m *SourceOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Identifier) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Identifier))) + i += copy(dAtA[i:], m.Identifier) + } + if len(m.Attrs) > 0 { + for k, _ := range m.Attrs { + dAtA[i] = 0x12 + i++ + v := m.Attrs[k] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *BuildOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Builder != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Builder)) + } + if len(m.Inputs) > 0 { + for k, _ := range m.Inputs { + dAtA[i] = 0x12 + i++ + v := m.Inputs[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovOps(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(v.Size())) + n9, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + } + } + if m.Def != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Def.Size())) + n10, err := m.Def.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if len(m.Attrs) > 0 { + for k, _ := range m.Attrs { + dAtA[i] = 0x22 + i++ + v := m.Attrs[k] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *BuildInput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildInput) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Input != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + } + return i, nil +} + +func (m *OpMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.IgnoreCache { + dAtA[i] = 0x8 + i++ + if m.IgnoreCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Description) > 0 { + for k, _ := range m.Description { + dAtA[i] = 0x12 + i++ + v := m.Description[k] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.WorkerConstraint != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.WorkerConstraint.Size())) + n11, err := m.WorkerConstraint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.ExportCache != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.ExportCache.Size())) + n12, err := m.ExportCache.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *ExportCache) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportCache) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value { + dAtA[i] = 0x8 + i++ + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ProxyEnv) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyEnv) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.HttpProxy) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.HttpProxy))) + i += copy(dAtA[i:], m.HttpProxy) + } + if len(m.HttpsProxy) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.HttpsProxy))) + i += copy(dAtA[i:], m.HttpsProxy) + } + if len(m.FtpProxy) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.FtpProxy))) + i += copy(dAtA[i:], m.FtpProxy) + } + if len(m.NoProxy) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.NoProxy))) + i += copy(dAtA[i:], m.NoProxy) + } + return i, nil +} + +func (m *WorkerConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerConstraint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *Definition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Definition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Def) > 0 { + for _, b := range m.Def { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + if len(m.Metadata) > 0 { + for k, _ := range m.Metadata { + dAtA[i] = 0x12 + i++ + v := m.Metadata[k] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovOps(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64((&v).Size())) + n13, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + } + return i, nil +} + +func encodeVarintOps(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Op) Size() (n int) { + var l int + _ = l + if len(m.Inputs) > 0 { + for _, e := range m.Inputs { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + if m.Op != nil { + n += m.Op.Size() + } + return n +} + +func (m *Op_Exec) Size() (n int) { + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Source) Size() (n int) { + var l int + _ = l + if m.Source != nil { + l = m.Source.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Copy) Size() (n int) { + var l int + _ = l + if m.Copy != nil { + l = m.Copy.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Build) Size() (n int) { + var l int + _ = l + if m.Build != nil { + l = m.Build.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Input) Size() (n int) { + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovOps(uint64(m.Index)) + } + return n +} + +func (m *ExecOp) Size() (n int) { + var l int + _ = l + if m.Meta != nil { + l = m.Meta.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Meta) Size() (n int) { + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + l = len(m.Cwd) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.ProxyEnv != nil { + l = m.ProxyEnv.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *Mount) Size() (n int) { + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Dest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Output != 0 { + n += 1 + sovOps(uint64(m.Output)) + } + if m.Readonly { + n += 2 + } + if m.MountType != 0 { + n += 1 + sovOps(uint64(m.MountType)) + } + if m.CacheOpt != nil { + l = m.CacheOpt.Size() + n += 2 + l + sovOps(uint64(l)) + } + return n +} + +func (m *CacheOpt) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *CopyOp) Size() (n int) { + var l int + _ = l + if len(m.Src) > 0 { + for _, e := range m.Src { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + l = len(m.Dest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *CopySource) Size() (n int) { + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *SourceOp) Size() (n int) { + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *BuildOp) Size() (n int) { + var l int + _ = l + if m.Builder != 0 { + n += 1 + sovOps(uint64(m.Builder)) + } + if len(m.Inputs) > 0 { + for k, v := range m.Inputs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovOps(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + l + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + if m.Def != nil { + l = m.Def.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *BuildInput) Size() (n int) { + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + return n +} + +func (m *OpMetadata) Size() (n int) { + var l int + _ = l + if m.IgnoreCache { + n += 2 + } + if len(m.Description) > 0 { + for k, v := range m.Description { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + if m.WorkerConstraint != nil { + l = m.WorkerConstraint.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.ExportCache != nil { + l = m.ExportCache.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *ExportCache) Size() (n int) { + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *ProxyEnv) Size() (n int) { + var l int + _ = l + l = len(m.HttpProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.HttpsProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.FtpProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.NoProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *WorkerConstraint) Size() (n int) { + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Definition) Size() (n int) { + var l int + _ = l + if len(m.Def) > 0 { + for _, b := range m.Def { + l = len(b) + n += 1 + l + sovOps(uint64(l)) + } + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + l + sovOps(uint64(l)) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func sovOps(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozOps(x uint64) (n int) { + return sovOps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Op) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Op: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Op: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inputs = append(m.Inputs, &Input{}) + if err := m.Inputs[len(m.Inputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ExecOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Exec{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SourceOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Source{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Copy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &CopyOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Copy{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Build", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BuildOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Build{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Input) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Input: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Input: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (OutputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Meta == nil { + m.Meta = &Meta{} + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, &Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Meta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cwd", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cwd = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProxyEnv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProxyEnv == nil { + m.ProxyEnv = &ProxyEnv{} + } + if err := m.ProxyEnv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= (InputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + m.Output = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Output |= (OutputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Readonly = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MountType", wireType) + } + m.MountType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MountType |= (MountType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheOpt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CacheOpt == nil { + m.CacheOpt = &CacheOpt{} + } + if err := m.CacheOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CopyOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CopyOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CopyOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Src", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Src = append(m.Src, &CopySource{}) + if err := m.Src[len(m.Src)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CopySource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CopySource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CopySource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= (InputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Builder", wireType) + } + m.Builder = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Builder |= (InputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Inputs == nil { + m.Inputs = make(map[string]*BuildInput) + } + var mapkey string + var mapvalue *BuildInput + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BuildInput{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Inputs[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Def == nil { + m.Def = &Definition{} + } + if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildInput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildInput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildInput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= (InputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreCache = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Description[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkerConstraint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkerConstraint == nil { + m.WorkerConstraint = &WorkerConstraint{} + } + if err := m.WorkerConstraint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportCache", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExportCache == nil { + m.ExportCache = &ExportCache{} + } + if err := m.ExportCache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportCache) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportCache: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportCache: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProxyEnv) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyEnv: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyEnv: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HttpProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpsProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HttpsProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FtpProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FtpProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NoProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkerConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Definition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Definition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Definition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Def = append(m.Def, make([]byte, postIndex-iNdEx)) + copy(m.Def[len(m.Def)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[github_com_opencontainers_go_digest.Digest]OpMetadata) + } + var mapkey github_com_opencontainers_go_digest.Digest + mapvalue := &OpMetadata{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &OpMetadata{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[github_com_opencontainers_go_digest.Digest(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipOps(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthOps + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipOps(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthOps = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("ops.proto", fileDescriptorOps) } + +var fileDescriptorOps = []byte{ + // 1053 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, + 0x17, 0xcf, 0xae, 0x3f, 0xb2, 0x7b, 0x36, 0xcd, 0xdf, 0xff, 0x21, 0x2a, 0xc6, 0x94, 0xc4, 0x6c, + 0x11, 0x72, 0xd3, 0xc6, 0x91, 0x8c, 0x40, 0x15, 0x17, 0x95, 0xe2, 0x0f, 0x29, 0x06, 0x95, 0xa0, + 0x49, 0x05, 0x97, 0x91, 0xbd, 0x1e, 0x3b, 0xab, 0x3a, 0x3b, 0xab, 0xdd, 0xd9, 0xc4, 0xbe, 0x00, + 0x89, 0x3e, 0x01, 0x12, 0x4f, 0xc1, 0x43, 0xc0, 0x75, 0x2f, 0xb9, 0x85, 0x8b, 0x82, 0xc2, 0x8b, + 0xa0, 0x73, 0x66, 0xbc, 0xeb, 0x86, 0x22, 0xb5, 0x82, 0x2b, 0xcf, 0x9c, 0xf3, 0x3b, 0x67, 0xce, + 0xf9, 0x9d, 0x8f, 0x35, 0xb8, 0x32, 0x4e, 0xdb, 0x71, 0x22, 0x95, 0x64, 0x76, 0x3c, 0x6e, 0x1c, + 0xcc, 0x42, 0x75, 0x9e, 0x8d, 0xdb, 0x81, 0xbc, 0x38, 0x9c, 0xc9, 0x99, 0x3c, 0x24, 0xd5, 0x38, + 0x9b, 0xd2, 0x8d, 0x2e, 0x74, 0xd2, 0x26, 0xfe, 0xcf, 0x16, 0xd8, 0x27, 0x31, 0x7b, 0x1f, 0xaa, + 0x61, 0x14, 0x67, 0x2a, 0xad, 0x5b, 0xcd, 0x52, 0xcb, 0xeb, 0xb8, 0xed, 0x78, 0xdc, 0x1e, 0xa2, + 0x84, 0x1b, 0x05, 0x6b, 0x42, 0x59, 0x2c, 0x44, 0x50, 0xb7, 0x9b, 0x56, 0xcb, 0xeb, 0x00, 0x02, + 0x06, 0x0b, 0x11, 0x9c, 0xc4, 0xc7, 0x1b, 0x9c, 0x34, 0xec, 0x43, 0xa8, 0xa6, 0x32, 0x4b, 0x02, + 0x51, 0x2f, 0x11, 0x66, 0x0b, 0x31, 0xa7, 0x24, 0x21, 0x94, 0xd1, 0xa2, 0xa7, 0x40, 0xc6, 0xcb, + 0x7a, 0xb9, 0xf0, 0xd4, 0x93, 0xf1, 0x52, 0x7b, 0x42, 0x0d, 0xbb, 0x0b, 0x95, 0x71, 0x16, 0xce, + 0x27, 0xf5, 0x0a, 0x41, 0x3c, 0x84, 0x74, 0x51, 0x40, 0x18, 0xad, 0xeb, 0x96, 0xc1, 0x96, 0xb1, + 0xff, 0x2d, 0x54, 0x28, 0x4e, 0xf6, 0x19, 0x54, 0x27, 0xe1, 0x4c, 0xa4, 0xaa, 0x6e, 0x35, 0xad, + 0x96, 0xdb, 0xed, 0x3c, 0x7f, 0xb1, 0xb7, 0xf1, 0xdb, 0x8b, 0xbd, 0xfd, 0x35, 0x42, 0x64, 0x2c, + 0xa2, 0x40, 0x46, 0x6a, 0x14, 0x46, 0x22, 0x49, 0x0f, 0x67, 0xf2, 0x40, 0x9b, 0xb4, 0xfb, 0xf4, + 0xc3, 0x8d, 0x07, 0x76, 0x0f, 0x2a, 0x61, 0x34, 0x11, 0x0b, 0x4a, 0xb6, 0xd4, 0x7d, 0xcb, 0xb8, + 0xf2, 0x4e, 0x32, 0x15, 0x67, 0x6a, 0x88, 0x2a, 0xae, 0x11, 0xfe, 0x10, 0xaa, 0x9a, 0x06, 0x76, + 0x07, 0xca, 0x17, 0x42, 0x8d, 0xe8, 0x79, 0xaf, 0xe3, 0x60, 0xcc, 0x8f, 0x85, 0x1a, 0x71, 0x92, + 0x22, 0xc3, 0x17, 0x32, 0x8b, 0x54, 0x5a, 0xb7, 0x0b, 0x86, 0x1f, 0xa3, 0x84, 0x1b, 0x85, 0xff, + 0x0d, 0x94, 0xd1, 0x80, 0x31, 0x28, 0x8f, 0x92, 0x99, 0x2e, 0x85, 0xcb, 0xe9, 0xcc, 0x6a, 0x50, + 0x12, 0xd1, 0x25, 0xd9, 0xba, 0x1c, 0x8f, 0x28, 0x09, 0xae, 0x26, 0x44, 0xb5, 0xcb, 0xf1, 0x88, + 0x76, 0x59, 0x2a, 0x12, 0xe2, 0xd5, 0xe5, 0x74, 0x66, 0xf7, 0xc0, 0x8d, 0x13, 0xb9, 0x58, 0x9e, + 0xa1, 0x75, 0xa5, 0x28, 0xcb, 0x97, 0x28, 0x1c, 0x44, 0x97, 0xdc, 0x89, 0xcd, 0xc9, 0xff, 0xce, + 0x86, 0x0a, 0x05, 0xc4, 0x5a, 0x98, 0x7e, 0x9c, 0x69, 0x26, 0x4b, 0x5d, 0x66, 0xd2, 0x07, 0x22, + 0x3a, 0xcf, 0x1e, 0x49, 0x6f, 0x80, 0x93, 0x8a, 0xb9, 0x08, 0x94, 0x4c, 0x88, 0x2b, 0x97, 0xe7, + 0x77, 0x0c, 0x67, 0x82, 0xe5, 0xd0, 0x11, 0xd2, 0x99, 0xdd, 0x87, 0xaa, 0x24, 0x0e, 0x29, 0xc8, + 0x7f, 0x60, 0xd6, 0x40, 0xd0, 0x79, 0x22, 0x46, 0x13, 0x19, 0xcd, 0x97, 0x14, 0xba, 0xc3, 0xf3, + 0x3b, 0xbb, 0x0f, 0x2e, 0xb1, 0xf6, 0x64, 0x19, 0x8b, 0x7a, 0xb5, 0x69, 0xb5, 0xb6, 0x3b, 0xb7, + 0x72, 0x46, 0x51, 0xc8, 0x0b, 0x3d, 0x6b, 0x81, 0x13, 0x8c, 0x82, 0x73, 0x71, 0x12, 0xab, 0xfa, + 0x4e, 0xc1, 0x41, 0xcf, 0xc8, 0x78, 0xae, 0xf5, 0x1b, 0xe0, 0xac, 0xa4, 0x6c, 0x1b, 0xec, 0x61, + 0x5f, 0x37, 0x13, 0xb7, 0x87, 0x7d, 0xff, 0x11, 0x54, 0x75, 0x9b, 0xb2, 0x26, 0x94, 0xd2, 0x24, + 0x30, 0xa3, 0xb2, 0xbd, 0xea, 0x5f, 0xdd, 0xe9, 0x1c, 0x55, 0x79, 0xee, 0x76, 0x91, 0xbb, 0xcf, + 0x01, 0x0a, 0xd8, 0x7f, 0xc3, 0xb1, 0xff, 0x83, 0x05, 0xce, 0x6a, 0xc2, 0xd8, 0x2e, 0x40, 0x38, + 0x11, 0x91, 0x0a, 0xa7, 0xa1, 0x48, 0x4c, 0xe0, 0x6b, 0x12, 0x76, 0x00, 0x95, 0x91, 0x52, 0xc9, + 0xaa, 0x03, 0xdf, 0x5e, 0x1f, 0xcf, 0xf6, 0x11, 0x6a, 0x06, 0x91, 0x4a, 0x96, 0x5c, 0xa3, 0x1a, + 0x0f, 0x01, 0x0a, 0x21, 0xb6, 0xdb, 0x53, 0xb1, 0x34, 0x5e, 0xf1, 0xc8, 0x76, 0xa0, 0x72, 0x39, + 0x9a, 0x67, 0xc2, 0x04, 0xa5, 0x2f, 0x9f, 0xda, 0x0f, 0x2d, 0xff, 0x27, 0x1b, 0x36, 0xcd, 0xb8, + 0xb2, 0x07, 0xb0, 0x49, 0xe3, 0x6a, 0x22, 0x7a, 0x75, 0xa6, 0x2b, 0x08, 0x3b, 0xcc, 0xf7, 0xd0, + 0x5a, 0x8c, 0xc6, 0x95, 0xde, 0x47, 0x26, 0xc6, 0x62, 0x2b, 0x95, 0x26, 0x62, 0x6a, 0x16, 0x0e, + 0x95, 0xa2, 0x2f, 0xa6, 0x61, 0x14, 0xaa, 0x50, 0x46, 0x1c, 0x55, 0xec, 0xc1, 0x2a, 0xeb, 0x32, + 0x79, 0xbc, 0xbd, 0xee, 0xf1, 0xef, 0x49, 0x0f, 0xc1, 0x5b, 0x7b, 0xe6, 0x15, 0x59, 0x7f, 0xb0, + 0x9e, 0xb5, 0x79, 0x92, 0xdc, 0xe9, 0x6d, 0x59, 0xb0, 0xf0, 0x2f, 0xf8, 0xfb, 0x04, 0xa0, 0x70, + 0xf9, 0xfa, 0x9d, 0xe2, 0xff, 0x68, 0x03, 0x9c, 0xc4, 0xb8, 0x43, 0x26, 0x23, 0x5a, 0x39, 0x5b, + 0xe1, 0x2c, 0x92, 0x89, 0x38, 0xa3, 0xfe, 0x26, 0x7b, 0x87, 0x7b, 0x5a, 0x46, 0x6d, 0xce, 0x8e, + 0xc0, 0x9b, 0x88, 0x34, 0x48, 0xc2, 0x18, 0x09, 0x33, 0xa4, 0xef, 0x61, 0x4e, 0x85, 0x9f, 0x76, + 0xbf, 0x40, 0x68, 0xae, 0xd6, 0x6d, 0xd8, 0x11, 0xfc, 0xff, 0x4a, 0x26, 0x4f, 0x45, 0x72, 0x16, + 0xc8, 0x28, 0x55, 0xc9, 0x28, 0x8c, 0x94, 0xa9, 0xc7, 0x0e, 0x3a, 0xfa, 0x9a, 0x94, 0xbd, 0x5c, + 0xc7, 0x6b, 0x57, 0x37, 0x24, 0xac, 0x03, 0x5b, 0x62, 0x11, 0xcb, 0x44, 0x99, 0x40, 0xf5, 0x87, + 0xe1, 0x7f, 0xfa, 0x13, 0x83, 0x72, 0x0a, 0x96, 0x7b, 0xa2, 0xb8, 0x34, 0x1e, 0x41, 0xed, 0x66, + 0x5c, 0x6f, 0xc4, 0xf1, 0x5d, 0xf0, 0xd6, 0x7c, 0x23, 0xf0, 0x2b, 0x02, 0x6a, 0x92, 0xf4, 0xc5, + 0x7f, 0x66, 0x81, 0xb3, 0xda, 0x94, 0xec, 0x3d, 0x80, 0x73, 0xa5, 0xe2, 0x33, 0x5a, 0x98, 0xe6, + 0x11, 0x17, 0x25, 0x84, 0x60, 0x7b, 0xe0, 0xe1, 0x25, 0x35, 0x7a, 0xfd, 0x20, 0x59, 0xa4, 0x1a, + 0xf0, 0x2e, 0xb8, 0xd3, 0xdc, 0x5c, 0x2f, 0x45, 0x67, 0xba, 0xb2, 0x7e, 0x07, 0x9c, 0x48, 0x1a, + 0x9d, 0xde, 0xdf, 0x9b, 0x91, 0x24, 0x95, 0xbf, 0x0f, 0xb5, 0x9b, 0x1c, 0xb2, 0xdb, 0x50, 0x9d, + 0x86, 0x73, 0x45, 0x43, 0x85, 0x5f, 0x04, 0x73, 0xf3, 0x7f, 0xb5, 0x00, 0x8a, 0x01, 0x40, 0x42, + 0x70, 0x3a, 0x10, 0xb3, 0xa5, 0xa7, 0x61, 0x0e, 0xce, 0x85, 0xa9, 0xab, 0xa9, 0xf6, 0x9d, 0x97, + 0x87, 0xa6, 0xbd, 0x2a, 0x3b, 0x51, 0xaa, 0xbf, 0xa2, 0xcf, 0x7e, 0x7f, 0xa3, 0xaf, 0x68, 0xfe, + 0x42, 0xe3, 0x73, 0xb8, 0xf5, 0x92, 0xbb, 0xd7, 0x9c, 0xa7, 0xa2, 0xf7, 0xd6, 0x2a, 0xb6, 0xff, + 0x31, 0xb8, 0xf9, 0x76, 0x67, 0x0e, 0x94, 0xbb, 0xc3, 0x2f, 0xfa, 0xb5, 0x0d, 0x06, 0x50, 0x3d, + 0x1d, 0xf4, 0xf8, 0xe0, 0x49, 0xcd, 0x62, 0x9b, 0x50, 0x3a, 0x3d, 0x3d, 0xae, 0xd9, 0xcc, 0x85, + 0x4a, 0xef, 0xa8, 0x77, 0x3c, 0xa8, 0x95, 0xba, 0xb5, 0xe7, 0xd7, 0xbb, 0xd6, 0x2f, 0xd7, 0xbb, + 0xd6, 0x1f, 0xd7, 0xbb, 0xd6, 0xf7, 0x7f, 0xee, 0x6e, 0x8c, 0xab, 0xf4, 0xd7, 0xe7, 0xa3, 0xbf, + 0x02, 0x00, 0x00, 0xff, 0xff, 0xae, 0x2d, 0xf1, 0xce, 0x3a, 0x09, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto new file mode 100644 index 0000000000..a05c1f3de2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto @@ -0,0 +1,136 @@ +syntax = "proto3"; + +// Package pb provides the protobuf definition of LLB: low-level builder instruction. +// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. +package pb; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +// Op represents a vertex of the LLB DAG. +message Op { + // inputs is a set of input edges. + repeated Input inputs = 1; + oneof op { + ExecOp exec = 2; + SourceOp source = 3; + CopyOp copy = 4; + BuildOp build = 5; + } +} + +// Input represents an input edge for an Op. +message Input { + // digest of the marshaled input Op + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + // output index of the input Op + int64 index = 2 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; +} + +// ExecOp executes a command in a container. +message ExecOp { + Meta meta = 1; + repeated Mount mounts = 2; +} + +// Meta is a set of arguments for ExecOp. +// Meta is unrelated to LLB metadata. +// FIXME: rename (ExecContext? ExecArgs?) +message Meta { + repeated string args = 1; + repeated string env = 2; + string cwd = 3; + string user = 4; + ProxyEnv proxy_env = 5; +} + +// Mount specifies how to mount an input Op as a filesystem. +message Mount { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; + string selector = 2; + string dest = 3; + int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; + bool readonly = 5; + MountType mountType = 6; + CacheOpt cacheOpt = 20; +} + +enum MountType { + BIND = 0; + SECRET = 1; + SSH = 2; + CACHE = 3; +} + +message CacheOpt { + string ID = 1; +} + +// CopyOp copies files across Ops. +message CopyOp { + repeated CopySource src = 1; + string dest = 2; +} + +// CopySource specifies a source for CopyOp. +message CopySource { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; + string selector = 2; +} + +// SourceOp specifies a source such as build contexts and images. +message SourceOp { + // TODO: use source type or any type instead of URL protocol. + // identifier e.g. local://, docker-image://, git://, https://... + string identifier = 1; + // attrs are defined in attr.go + map attrs = 2; +} + +// BuildOp is used for nested build invocation. +message BuildOp { + int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; + map inputs = 2; + Definition def = 3; + map attrs = 4; + // outputs +} + +// BuildInput is used for BuildOp. +message BuildInput { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; +} + +// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. +message OpMetadata { + // ignore_cache specifies to ignore the cache for this Op. + bool ignore_cache = 1; + // Description can be used for keeping any text fields that builder doesn't parse + map description = 2; + WorkerConstraint worker_constraint = 3; + ExportCache export_cache = 4; +} + +message ExportCache { + bool Value = 1; +} + +message ProxyEnv { + string http_proxy = 1; + string https_proxy = 2; + string ftp_proxy = 3; + string no_proxy = 4; +} + +// WorkerConstraint is experimental and likely to be changed. +message WorkerConstraint { + repeated string filter = 1; // containerd-style filter +} + +// Definition is the LLB definition structure with per-vertex metadata entries +message Definition { + // def is a list of marshaled Op messages + repeated bytes def = 1; + // metadata contains metadata for the each of the Op messages. + // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. + map metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go new file mode 100644 index 0000000000..7b907ad32b --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go @@ -0,0 +1,55 @@ +// +build !windows + +package appdefaults + +import ( + "os" + "path/filepath" + "strings" +) + +const ( + Address = "unix:///run/buildkit/buildkitd.sock" + Root = "/var/lib/buildkit" +) + +// UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock +func UserAddress() string { + // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. + xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") + if xdgRuntimeDir != "" { + dirs := strings.Split(xdgRuntimeDir, ":") + return "unix://" + filepath.Join(dirs[0], "buildkit", "buildkitd.sock") + } + return Address +} + +// EnsureUserAddressDir sets sticky bit on XDG_RUNTIME_DIR if XDG_RUNTIME_DIR is set. +// See https://github.com/opencontainers/runc/issues/1694 +func EnsureUserAddressDir() error { + xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") + if xdgRuntimeDir != "" { + dirs := strings.Split(xdgRuntimeDir, ":") + dir := filepath.Join(dirs[0], "buildkit") + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + return os.Chmod(dir, 0700|os.ModeSticky) + } + return nil +} + +// UserRoot typically returns /home/$USER/.local/share/buildkit +func UserRoot() string { + // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. + xdgDataHome := os.Getenv("XDG_DATA_HOME") + if xdgDataHome != "" { + dirs := strings.Split(xdgDataHome, ":") + return filepath.Join(dirs[0], "buildkit") + } + home := os.Getenv("HOME") + if home != "" { + return filepath.Join(home, ".local", "share", "buildkit") + } + return Root +} diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go new file mode 100644 index 0000000000..dbc96c8095 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go @@ -0,0 +1,18 @@ +package appdefaults + +const ( + Address = "npipe:////./pipe/buildkitd" + Root = ".buildstate" +) + +func UserAddress() string { + return Address +} + +func EnsureUserAddressDir() error { + return nil +} + +func UserRoot() string { + return Root +} diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go new file mode 100644 index 0000000000..3f27a6f304 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go @@ -0,0 +1,310 @@ +package progressui + +import ( + "context" + "fmt" + "io" + "strings" + "time" + + "github.com/containerd/console" + "github.com/moby/buildkit/client" + "github.com/morikuni/aec" + digest "github.com/opencontainers/go-digest" + "github.com/tonistiigi/units" + "golang.org/x/time/rate" +) + +func DisplaySolveStatus(ctx context.Context, c console.Console, ch chan *client.SolveStatus) error { + disp := &display{c: c} + + t := newTrace() + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + displayLimiter := rate.NewLimiter(rate.Every(70*time.Millisecond), 1) + + var done bool + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + case ss, ok := <-ch: + if ok { + t.update(ss) + } else { + done = true + } + } + + if done { + disp.print(t.displayInfo(), true) + t.printErrorLogs(c) + return nil + } else if displayLimiter.Allow() { + disp.print(t.displayInfo(), false) + } + } +} + +type displayInfo struct { + startTime time.Time + jobs []job + countTotal int + countCompleted int +} + +type job struct { + startTime *time.Time + completedTime *time.Time + name string + status string + hasError bool + isCanceled bool +} + +type trace struct { + localTimeDiff time.Duration + vertexes []*vertex + byDigest map[digest.Digest]*vertex +} + +type vertex struct { + *client.Vertex + statuses []*status + byID map[string]*status + logs []*client.VertexLog + indent string +} + +type status struct { + *client.VertexStatus +} + +func newTrace() *trace { + return &trace{ + byDigest: make(map[digest.Digest]*vertex), + } +} + +func (t *trace) update(s *client.SolveStatus) { + for _, v := range s.Vertexes { + prev, ok := t.byDigest[v.Digest] + if !ok { + t.byDigest[v.Digest] = &vertex{ + byID: make(map[string]*status), + } + } + if v.Started != nil && (prev == nil || prev.Started == nil) { + if t.localTimeDiff == 0 { + t.localTimeDiff = time.Since(*v.Started) + } + t.vertexes = append(t.vertexes, t.byDigest[v.Digest]) + } + t.byDigest[v.Digest].Vertex = v + } + for _, s := range s.Statuses { + v, ok := t.byDigest[s.Vertex] + if !ok { + continue // shouldn't happen + } + prev, ok := v.byID[s.ID] + if !ok { + v.byID[s.ID] = &status{VertexStatus: s} + } + if s.Started != nil && (prev == nil || prev.Started == nil) { + v.statuses = append(v.statuses, v.byID[s.ID]) + } + v.byID[s.ID].VertexStatus = s + } + for _, l := range s.Logs { + v, ok := t.byDigest[l.Vertex] + if !ok { + continue // shouldn't happen + } + v.logs = append(v.logs, l) + } +} + +func (t *trace) printErrorLogs(f io.Writer) { + for _, v := range t.vertexes { + if v.Error != "" && !strings.HasSuffix(v.Error, context.Canceled.Error()) { + fmt.Fprintln(f, "------") + fmt.Fprintf(f, " > %s:\n", v.Name) + for _, l := range v.logs { + switch l.Stream { + case 1: + f.Write(l.Data) + case 2: + f.Write(l.Data) + } + } + fmt.Fprintln(f, "------") + } + } +} + +func (t *trace) displayInfo() (d displayInfo) { + d.startTime = time.Now() + if t.localTimeDiff != 0 { + d.startTime = (*t.vertexes[0].Started).Add(t.localTimeDiff) + } + d.countTotal = len(t.byDigest) + for _, v := range t.byDigest { + if v.Completed != nil { + d.countCompleted++ + } + } + + for _, v := range t.vertexes { + j := job{ + startTime: addTime(v.Started, t.localTimeDiff), + completedTime: addTime(v.Completed, t.localTimeDiff), + name: strings.Replace(v.Name, "\t", " ", -1), + } + if v.Error != "" { + if strings.HasSuffix(v.Error, context.Canceled.Error()) { + j.isCanceled = true + j.name = "CANCELED " + j.name + } else { + j.hasError = true + j.name = "ERROR " + j.name + } + } + if v.Cached { + j.name = "CACHED " + j.name + } + j.name = v.indent + j.name + d.jobs = append(d.jobs, j) + for _, s := range v.statuses { + j := job{ + startTime: addTime(s.Started, t.localTimeDiff), + completedTime: addTime(s.Completed, t.localTimeDiff), + name: v.indent + "=> " + s.ID, + } + if s.Total != 0 { + j.status = fmt.Sprintf("%.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total)) + } else if s.Current != 0 { + j.status = fmt.Sprintf("%.2f", units.Bytes(s.Current)) + } + d.jobs = append(d.jobs, j) + } + } + + return d +} + +func addTime(tm *time.Time, d time.Duration) *time.Time { + if tm == nil { + return nil + } + t := (*tm).Add(d) + return &t +} + +type display struct { + c console.Console + lineCount int + repeated bool +} + +func (disp *display) print(d displayInfo, all bool) { + // this output is inspired by Buck + width := 80 + height := 10 + size, err := disp.c.Size() + if err == nil && size.Width > 0 && size.Height > 0 { + width = int(size.Width) + height = int(size.Height) + } + + if !all { + d.jobs = wrapHeight(d.jobs, height-2) + } + + b := aec.EmptyBuilder + for i := 0; i <= disp.lineCount; i++ { + b = b.Up(1) + } + if !disp.repeated { + b = b.Down(1) + } + disp.repeated = true + fmt.Fprint(disp.c, b.Column(0).ANSI) + + statusStr := "" + if d.countCompleted > 0 && d.countCompleted == d.countTotal && all { + statusStr = "FINISHED" + } + + fmt.Fprint(disp.c, aec.Hide) + defer fmt.Fprint(disp.c, aec.Show) + + out := fmt.Sprintf("[+] Building %.1fs (%d/%d) %s", time.Since(d.startTime).Seconds(), d.countCompleted, d.countTotal, statusStr) + out = align(out, "", width) + fmt.Fprintln(disp.c, out) + lineCount := 0 + for _, j := range d.jobs { + endTime := time.Now() + if j.completedTime != nil { + endTime = *j.completedTime + } + if j.startTime == nil { + continue + } + dt := endTime.Sub(*j.startTime).Seconds() + if dt < 0.05 { + dt = 0 + } + pfx := " => " + timer := fmt.Sprintf(" %3.1fs\n", dt) + status := j.status + showStatus := false + + left := width - len(pfx) - len(timer) - 1 + if status != "" { + if left+len(status) > 20 { + showStatus = true + left -= len(status) + 1 + } + } + if left < 12 { // too small screen to show progress + continue + } + if len(j.name) > left { + j.name = j.name[:left] + } + + out := pfx + j.name + if showStatus { + out += " " + status + } + + out = align(out, timer, width) + if j.completedTime != nil { + color := aec.BlueF + if j.isCanceled { + color = aec.YellowF + } else if j.hasError { + color = aec.RedF + } + out = aec.Apply(out, color) + } + fmt.Fprint(disp.c, out) + lineCount++ + } + disp.lineCount = lineCount +} + +func align(l, r string, w int) string { + return fmt.Sprintf("%-[2]*[1]s %[3]s", l, w-len(r)-1, r) +} + +func wrapHeight(j []job, limit int) []job { + if len(j) > limit { + j = j[len(j)-limit:] + } + return j +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go new file mode 100644 index 0000000000..c607c4db09 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/path_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go new file mode 100644 index 0000000000..cbfe2c1576 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/path_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go new file mode 100644 index 0000000000..663625c8d9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go @@ -0,0 +1,29 @@ +// +build linux + +package system + +import ( + "sync" + + "golang.org/x/sys/unix" +) + +var seccompSupported bool +var seccompOnce sync.Once + +func SeccompSupported() bool { + seccompOnce.Do(func() { + seccompSupported = getSeccompSupported() + }) + return seccompSupported +} + +func getSeccompSupported() bool { + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go new file mode 100644 index 0000000000..305d6c9ef0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go @@ -0,0 +1,7 @@ +// +build !linux + +package system + +func SeccompSupported() bool { + return false +} diff --git a/vendor/github.com/tonistiigi/units/LICENSE b/vendor/github.com/tonistiigi/units/LICENSE new file mode 100644 index 0000000000..5c1095df0d --- /dev/null +++ b/vendor/github.com/tonistiigi/units/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Tõnis Tiigi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tonistiigi/units/bytes.go b/vendor/github.com/tonistiigi/units/bytes.go new file mode 100644 index 0000000000..14a20fad8b --- /dev/null +++ b/vendor/github.com/tonistiigi/units/bytes.go @@ -0,0 +1,117 @@ +/* + Simple byte size formatting. + + This package implements types that can be used in stdlib formatting functions + like `fmt.Printf` to control the output of the expected printed string. + + + Floating point flags %f and %g print the value in using the correct unit + suffix. Decimal units are default, # switches to binary units. If a value is + best represented as full bytes, integer bytes are printed instead. + + Examples: + fmt.Printf("%.2f", 123 * B) => "123B" + fmt.Printf("%.2f", 1234 * B) => "1.23kB" + fmt.Printf("%g", 1200 * B) => "1.2kB" + fmt.Printf("%#g", 1024 * B) => "1KiB" + + + Integer flag %d always prints the value in bytes. # flag adds an unit prefix. + + Examples: + fmt.Printf("%d", 1234 * B) => "1234" + fmt.Printf("%#d", 1234 * B) => "1234B" + + %v is equal to %g + +*/ +package units + +import ( + "fmt" + "io" + "math" + "math/big" +) + +type Bytes int64 + +const ( + B Bytes = 1 << (10 * iota) + KiB + MiB + GiB + TiB + PiB + EiB + + KB = 1e3 * B + MB = 1e3 * KB + GB = 1e3 * MB + TB = 1e3 * GB + PB = 1e3 * TB + EB = 1e3 * PB +) + +var units = map[bool][]string{ + false: []string{ + "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", + }, + true: []string{ + "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", + }, +} + +func (b Bytes) Format(f fmt.State, c rune) { + switch c { + case 'f', 'g': + fv, unit, ok := b.floatValue(f.Flag('#')) + if !ok { + b.formatInt(f, 'd', true) + return + } + big.NewFloat(fv).Format(f, c) + io.WriteString(f, unit) + case 'd': + b.formatInt(f, c, f.Flag('#')) + default: + if f.Flag('#') { + fmt.Fprintf(f, "bytes(%d)", int64(b)) + } else { + fmt.Fprintf(f, "%g", b) + } + } +} + +func (b Bytes) formatInt(f fmt.State, c rune, withUnit bool) { + big.NewInt(int64(b)).Format(f, c) + if withUnit { + io.WriteString(f, "B") + } +} + +func (b Bytes) floatValue(binary bool) (float64, string, bool) { + i := 0 + var baseUnit Bytes = 1 + if b < 0 { + baseUnit *= -1 + } + for { + next := baseUnit + if binary { + next *= 1 << 10 + } else { + next *= 1e3 + } + if (baseUnit > 0 && b >= next) || (baseUnit < 0 && b <= next) { + i++ + baseUnit = next + continue + } + if i == 0 { + return 0, "", false + } + + return float64(b) / math.Abs(float64(baseUnit)), units[binary][i], true + } +} diff --git a/vendor/github.com/tonistiigi/units/readme.md b/vendor/github.com/tonistiigi/units/readme.md new file mode 100644 index 0000000000..5c67d30d43 --- /dev/null +++ b/vendor/github.com/tonistiigi/units/readme.md @@ -0,0 +1,29 @@ +#### Simple byte size formatting. + +This package implements types that can be used in stdlib formatting functions +like `fmt.Printf` to control the output of the expected printed string. + +Floating point flags `%f` and %g print the value in using the correct unit +suffix. Decimal units are default, `#` switches to binary units. If a value is +best represented as full bytes, integer bytes are printed instead. + +##### Examples: + +``` +fmt.Printf("%.2f", 123 * B) => "123B" +fmt.Printf("%.2f", 1234 * B) => "1.23kB" +fmt.Printf("%g", 1200 * B) => "1.2kB" +fmt.Printf("%#g", 1024 * B) => "1KiB" +``` + + +Integer flag `%d` always prints the value in bytes. `#` flag adds an unit prefix. + +##### Examples: + +``` +fmt.Printf("%d", 1234 * B) => "1234" +fmt.Printf("%#d", 1234 * B) => "1234B" +``` + +`%v` is equal to `%g` \ No newline at end of file