build: basic buildkit progress support

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2018-04-19 10:07:27 -07:00 committed by Tibor Vass
parent 656fe85c74
commit 0f97642915
65 changed files with 14551 additions and 170 deletions

View File

@ -396,6 +396,7 @@ func runBuild(dockerCli command.Cli, options buildOptions) error {
Target: options.target,
RemoteContext: remote,
Platform: options.platform,
Version: types.BuilderV1,
}
if s != nil {
@ -420,9 +421,9 @@ func runBuild(dockerCli command.Cli, options buildOptions) error {
defer response.Body.Close()
imageID := ""
aux := func(m jsonmessage.JSONMessage) {
aux := func(msg jsonmessage.JSONMessage) {
var result types.BuildResult
if err := json.Unmarshal(*m.Aux, &result); err != nil {
if err := json.Unmarshal(*msg.Aux, &result); err != nil {
fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err)
} else {
imageID = result.ID

View File

@ -1,18 +1,27 @@
package image
import (
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"github.com/containerd/console"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/urlutil"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
"golang.org/x/sync/errgroup"
@ -29,13 +38,13 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
return errors.Errorf("buildkit not supported by daemon")
}
remote := clientSessionRemote
local := false
var remote string
var body io.Reader
switch {
case options.contextFromStdin():
return errors.Errorf("stdin not implemented")
body = os.Stdin
case isLocalDir(options.context):
local = true
remote = clientSessionRemote
case urlutil.IsGitURL(options.context):
remote = options.context
case urlutil.IsURL(options.context):
@ -51,7 +60,7 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
// statusContext = opentracing.ContextWithSpan(statusContext, span)
// }
if local {
if remote == clientSessionRemote {
s.Allow(filesync.NewFSSyncProvider([]filesync.SyncedDir{
{
Name: "context",
@ -70,7 +79,7 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
return s.Run(ctx, dockerCli.Client().DialSession)
return s.Run(context.TODO(), dockerCli.Client().DialSession)
})
eg.Go(func() error {
@ -78,6 +87,8 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
s.Close()
}()
buildID := stringid.GenerateRandomID()
configFile := dockerCli.ConfigFile()
buildOptions := types.ImageBuildOptions{
Memory: options.memory.Value(),
@ -109,16 +120,50 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
Target: options.target,
RemoteContext: remote,
Platform: options.platform,
SessionID: "buildkit:" + s.ID(),
SessionID: s.ID(),
Version: types.BuilderBuildKit,
BuildID: buildID,
}
response, err := dockerCli.Client().ImageBuild(ctx, nil, buildOptions)
response, err := dockerCli.Client().ImageBuild(context.Background(), body, buildOptions)
if err != nil {
return err
}
defer response.Body.Close()
if _, err := io.Copy(os.Stdout, response.Body); err != nil {
done := make(chan struct{})
defer close(done)
eg.Go(func() error {
select {
case <-ctx.Done():
return dockerCli.Client().BuildCancel(context.TODO(), buildID)
case <-done:
}
return nil
})
t := newTracer()
var auxCb func(jsonmessage.JSONMessage)
if c, err := console.ConsoleFromFile(os.Stderr); err == nil {
// not using shared context to not disrupt display but let is finish reporting errors
auxCb = t.write
eg.Go(func() error {
return progressui.DisplaySolveStatus(context.TODO(), c, t.displayCh)
})
defer close(t.displayCh)
}
err = jsonmessage.DisplayJSONMessagesStream(response.Body, os.Stdout, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), auxCb)
if err != nil {
if jerr, ok := err.(*jsonmessage.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
// if options.quiet {
// fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff)
// }
return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
}
return err
}
@ -133,3 +178,60 @@ func resetUIDAndGID(s *fsutil.Stat) bool {
s.Gid = uint32(0)
return true
}
type tracer struct {
displayCh chan *client.SolveStatus
}
func newTracer() *tracer {
return &tracer{
displayCh: make(chan *client.SolveStatus),
}
}
func (t *tracer) write(msg jsonmessage.JSONMessage) {
var resp controlapi.StatusResponse
var dt []byte
if err := json.Unmarshal(*msg.Aux, &dt); err != nil {
return
}
if err := (&resp).Unmarshal(dt); err != nil {
return
}
s := client.SolveStatus{}
for _, v := range resp.Vertexes {
s.Vertexes = append(s.Vertexes, &client.Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
})
}
for _, v := range resp.Statuses {
s.Statuses = append(s.Statuses, &client.VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range resp.Logs {
s.Logs = append(s.Logs, &client.VertexLog{
Vertex: v.Vertex,
Stream: int(v.Stream),
Data: v.Msg,
Timestamp: v.Timestamp,
})
}
t.displayCh <- &s
}

View File

@ -49,7 +49,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
// Count the times of calling for handleTarget,
// if it is called more that once, that should be considered an error in a trusted push.
cnt := 0
handleTarget := func(m jsonmessage.JSONMessage) {
handleTarget := func(msg jsonmessage.JSONMessage) {
cnt++
if cnt > 1 {
// handleTarget should only be called once. This will be treated as an error.
@ -57,7 +57,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
}
var pushResult types.PushResult
err := json.Unmarshal(*m.Aux, &pushResult)
err := json.Unmarshal(*msg.Aux, &pushResult)
if err == nil && pushResult.Tag != "" {
if dgst, err := digest.Parse(pushResult.Digest); err == nil {
h, err := hex.DecodeString(dgst.Hex())

View File

@ -6,7 +6,7 @@ github.com/coreos/etcd v3.2.1
github.com/cpuguy83/go-md2man v1.0.8
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5
github.com/docker/docker 162ba6016def672690ee4a1f3978368853a1e149
github.com/docker/docker experimental-buildkit https://github.com/tonistiigi/docker
github.com/docker/docker-credential-helpers 3c90bd29a46b943b2a9842987b58fb91a7c1819b
# the docker/go package contains a customized version of canonical/json
# and is used by Notary. The package is periodically rebased on current Go versions.
@ -91,3 +91,6 @@ k8s.io/client-go kubernetes-1.8.2
k8s.io/kubernetes v1.8.2
k8s.io/kube-openapi 61b46af70dfed79c6d24530cd23b41440a7f22a5
vbom.ml/util 928aaa586d7718c70f4090ddf83f2b34c16fdc8d
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
github.com/tonistiigi/units 29de085e9400559bd68aea2e7bc21566e7b8281d
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716

201
vendor/github.com/containerd/console/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

17
vendor/github.com/containerd/console/README.md generated vendored Normal file
View File

@ -0,0 +1,17 @@
# console
[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console)
Golang package for dealing with consoles. Light on deps and a simple API.
## Modifying the current process
```go
current := console.Current()
defer current.Reset()
if err := current.SetRaw(); err != nil {
}
ws, err := current.Size()
current.Resize(ws)
```

78
vendor/github.com/containerd/console/console.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"errors"
"io"
"os"
)
var ErrNotAConsole = errors.New("provided file is not a console")
type Console interface {
io.Reader
io.Writer
io.Closer
// Resize resizes the console to the provided window size
Resize(WinSize) error
// ResizeFrom resizes the calling console to the size of the
// provided console
ResizeFrom(Console) error
// SetRaw sets the console in raw mode
SetRaw() error
// DisableEcho disables echo on the console
DisableEcho() error
// Reset restores the console to its orignal state
Reset() error
// Size returns the window size of the console
Size() (WinSize, error)
// Fd returns the console's file descriptor
Fd() uintptr
// Name returns the console's file name
Name() string
}
// WinSize specifies the window size of the console
type WinSize struct {
// Height of the console
Height uint16
// Width of the console
Width uint16
x uint16
y uint16
}
// Current returns the current processes console
func Current() Console {
c, err := ConsoleFromFile(os.Stdin)
if err != nil {
// stdin should always be a console for the design
// of this function
panic(err)
}
return c
}
// ConsoleFromFile returns a console using the provided file
func ConsoleFromFile(f *os.File) (Console, error) {
if err := checkConsole(f); err != nil {
return nil, err
}
return newMaster(f)
}

271
vendor/github.com/containerd/console/console_linux.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"io"
"os"
"sync"
"golang.org/x/sys/unix"
)
const (
maxEvents = 128
)
// Epoller manages multiple epoll consoles using edge-triggered epoll api so we
// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP.
// For more details, see:
// - https://github.com/systemd/systemd/pull/4262
// - https://github.com/moby/moby/issues/27202
//
// Example usage of Epoller and EpollConsole can be as follow:
//
// epoller, _ := NewEpoller()
// epollConsole, _ := epoller.Add(console)
// go epoller.Wait()
// var (
// b bytes.Buffer
// wg sync.WaitGroup
// )
// wg.Add(1)
// go func() {
// io.Copy(&b, epollConsole)
// wg.Done()
// }()
// // perform I/O on the console
// epollConsole.Shutdown(epoller.CloseConsole)
// wg.Wait()
// epollConsole.Close()
type Epoller struct {
efd int
mu sync.Mutex
fdMapping map[int]*EpollConsole
}
// NewEpoller returns an instance of epoller with a valid epoll fd.
func NewEpoller() (*Epoller, error) {
efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC)
if err != nil {
return nil, err
}
return &Epoller{
efd: efd,
fdMapping: make(map[int]*EpollConsole),
}, nil
}
// Add creates a epoll console based on the provided console. The console will
// be registered with EPOLLET (i.e. using edge-triggered notification) and its
// file descriptor will be set to non-blocking mode. After this, user should use
// the return console to perform I/O.
func (e *Epoller) Add(console Console) (*EpollConsole, error) {
sysfd := int(console.Fd())
// Set sysfd to non-blocking mode
if err := unix.SetNonblock(sysfd, true); err != nil {
return nil, err
}
ev := unix.EpollEvent{
Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET,
Fd: int32(sysfd),
}
if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil {
return nil, err
}
ef := &EpollConsole{
Console: console,
sysfd: sysfd,
readc: sync.NewCond(&sync.Mutex{}),
writec: sync.NewCond(&sync.Mutex{}),
}
e.mu.Lock()
e.fdMapping[sysfd] = ef
e.mu.Unlock()
return ef, nil
}
// Wait starts the loop to wait for its consoles' notifications and signal
// appropriate console that it can perform I/O.
func (e *Epoller) Wait() error {
events := make([]unix.EpollEvent, maxEvents)
for {
n, err := unix.EpollWait(e.efd, events, -1)
if err != nil {
// EINTR: The call was interrupted by a signal handler before either
// any of the requested events occurred or the timeout expired
if err == unix.EINTR {
continue
}
return err
}
for i := 0; i < n; i++ {
ev := &events[i]
// the console is ready to be read from
if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
epfile.signalRead()
}
}
// the console is ready to be written to
if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
epfile.signalWrite()
}
}
}
}
}
// Close unregister the console's file descriptor from epoll interface
func (e *Epoller) CloseConsole(fd int) error {
e.mu.Lock()
defer e.mu.Unlock()
delete(e.fdMapping, fd)
return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{})
}
func (e *Epoller) getConsole(sysfd int) *EpollConsole {
e.mu.Lock()
f := e.fdMapping[sysfd]
e.mu.Unlock()
return f
}
// Close the epoll fd
func (e *Epoller) Close() error {
return unix.Close(e.efd)
}
// EpollConsole acts like a console but register its file descriptor with a
// epoll fd and uses epoll API to perform I/O.
type EpollConsole struct {
Console
readc *sync.Cond
writec *sync.Cond
sysfd int
closed bool
}
// Read reads up to len(p) bytes into p. It returns the number of bytes read
// (0 <= n <= len(p)) and any error encountered.
//
// If the console's read returns EAGAIN or EIO, we assumes that its a
// temporary error because the other side went away and wait for the signal
// generated by epoll event to continue.
func (ec *EpollConsole) Read(p []byte) (n int, err error) {
var read int
ec.readc.L.Lock()
defer ec.readc.L.Unlock()
for {
read, err = ec.Console.Read(p[n:])
n += read
if err != nil {
var hangup bool
if perr, ok := err.(*os.PathError); ok {
hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
} else {
hangup = (err == unix.EAGAIN || err == unix.EIO)
}
// if the other end disappear, assume this is temporary and wait for the
// signal to continue again. Unless we didnt read anything and the
// console is already marked as closed then we should exit
if hangup && !(n == 0 && len(p) > 0 && ec.closed) {
ec.readc.Wait()
continue
}
}
break
}
// if we didnt read anything then return io.EOF to end gracefully
if n == 0 && len(p) > 0 && err == nil {
err = io.EOF
}
// signal for others that we finished the read
ec.readc.Signal()
return n, err
}
// Writes len(p) bytes from p to the console. It returns the number of bytes
// written from p (0 <= n <= len(p)) and any error encountered that caused
// the write to stop early.
//
// If writes to the console returns EAGAIN or EIO, we assumes that its a
// temporary error because the other side went away and wait for the signal
// generated by epoll event to continue.
func (ec *EpollConsole) Write(p []byte) (n int, err error) {
var written int
ec.writec.L.Lock()
defer ec.writec.L.Unlock()
for {
written, err = ec.Console.Write(p[n:])
n += written
if err != nil {
var hangup bool
if perr, ok := err.(*os.PathError); ok {
hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
} else {
hangup = (err == unix.EAGAIN || err == unix.EIO)
}
// if the other end disappear, assume this is temporary and wait for the
// signal to continue again.
if hangup {
ec.writec.Wait()
continue
}
}
// unrecoverable error, break the loop and return the error
break
}
if n < len(p) && err == nil {
err = io.ErrShortWrite
}
// signal for others that we finished the write
ec.writec.Signal()
return n, err
}
// Close closed the file descriptor and signal call waiters for this fd.
// It accepts a callback which will be called with the console's fd. The
// callback typically will be used to do further cleanup such as unregister the
// console's fd from the epoll interface.
// User should call Shutdown and wait for all I/O operation to be finished
// before closing the console.
func (ec *EpollConsole) Shutdown(close func(int) error) error {
ec.readc.L.Lock()
defer ec.readc.L.Unlock()
ec.writec.L.Lock()
defer ec.writec.L.Unlock()
ec.readc.Broadcast()
ec.writec.Broadcast()
ec.closed = true
return close(ec.sysfd)
}
// signalRead signals that the console is readable.
func (ec *EpollConsole) signalRead() {
ec.readc.Signal()
}
// signalWrite signals that the console is writable.
func (ec *EpollConsole) signalWrite() {
ec.writec.Signal()
}

158
vendor/github.com/containerd/console/console_unix.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
// +build darwin freebsd linux openbsd solaris
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"os"
"golang.org/x/sys/unix"
)
// NewPty creates a new pty pair
// The master is returned as the first console and a string
// with the path to the pty slave is returned as the second
func NewPty() (Console, string, error) {
f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0)
if err != nil {
return nil, "", err
}
slave, err := ptsname(f)
if err != nil {
return nil, "", err
}
if err := unlockpt(f); err != nil {
return nil, "", err
}
m, err := newMaster(f)
if err != nil {
return nil, "", err
}
return m, slave, nil
}
type master struct {
f *os.File
original *unix.Termios
}
func (m *master) Read(b []byte) (int, error) {
return m.f.Read(b)
}
func (m *master) Write(b []byte) (int, error) {
return m.f.Write(b)
}
func (m *master) Close() error {
return m.f.Close()
}
func (m *master) Resize(ws WinSize) error {
return tcswinsz(m.f.Fd(), ws)
}
func (m *master) ResizeFrom(c Console) error {
ws, err := c.Size()
if err != nil {
return err
}
return m.Resize(ws)
}
func (m *master) Reset() error {
if m.original == nil {
return nil
}
return tcset(m.f.Fd(), m.original)
}
func (m *master) getCurrent() (unix.Termios, error) {
var termios unix.Termios
if err := tcget(m.f.Fd(), &termios); err != nil {
return unix.Termios{}, err
}
return termios, nil
}
func (m *master) SetRaw() error {
rawState, err := m.getCurrent()
if err != nil {
return err
}
rawState = cfmakeraw(rawState)
rawState.Oflag = rawState.Oflag | unix.OPOST
return tcset(m.f.Fd(), &rawState)
}
func (m *master) DisableEcho() error {
rawState, err := m.getCurrent()
if err != nil {
return err
}
rawState.Lflag = rawState.Lflag &^ unix.ECHO
return tcset(m.f.Fd(), &rawState)
}
func (m *master) Size() (WinSize, error) {
return tcgwinsz(m.f.Fd())
}
func (m *master) Fd() uintptr {
return m.f.Fd()
}
func (m *master) Name() string {
return m.f.Name()
}
// checkConsole checks if the provided file is a console
func checkConsole(f *os.File) error {
var termios unix.Termios
if tcget(f.Fd(), &termios) != nil {
return ErrNotAConsole
}
return nil
}
func newMaster(f *os.File) (Console, error) {
m := &master{
f: f,
}
t, err := m.getCurrent()
if err != nil {
return nil, err
}
m.original = &t
return m, nil
}
// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
// created by us acts normally. In particular, a not-very-well-known default of
// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
// problem for terminal emulators, because we relay data from the terminal we
// also relay that funky line discipline.
func ClearONLCR(fd uintptr) error {
return setONLCR(fd, false)
}
// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
// created by us acts as intended for a terminal emulator.
func SetONLCR(fd uintptr) error {
return setONLCR(fd, true)
}

216
vendor/github.com/containerd/console/console_windows.go generated vendored Normal file
View File

@ -0,0 +1,216 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"fmt"
"os"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
var (
vtInputSupported bool
ErrNotImplemented = errors.New("not implemented")
)
func (m *master) initStdios() {
m.in = windows.Handle(os.Stdin.Fd())
if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil {
// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil {
vtInputSupported = true
}
// Unconditionally set the console mode back even on failure because SetConsoleMode
// remembers invalid bits on input handles.
windows.SetConsoleMode(m.in, m.inMode)
} else {
fmt.Printf("failed to get console mode for stdin: %v\n", err)
}
m.out = windows.Handle(os.Stdout.Fd())
if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil {
if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
} else {
windows.SetConsoleMode(m.out, m.outMode)
}
} else {
fmt.Printf("failed to get console mode for stdout: %v\n", err)
}
m.err = windows.Handle(os.Stderr.Fd())
if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil {
if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
} else {
windows.SetConsoleMode(m.err, m.errMode)
}
} else {
fmt.Printf("failed to get console mode for stderr: %v\n", err)
}
}
type master struct {
in windows.Handle
inMode uint32
out windows.Handle
outMode uint32
err windows.Handle
errMode uint32
}
func (m *master) SetRaw() error {
if err := makeInputRaw(m.in, m.inMode); err != nil {
return err
}
// Set StdOut and StdErr to raw mode, we ignore failures since
// windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of
// Windows.
windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
return nil
}
func (m *master) Reset() error {
for _, s := range []struct {
fd windows.Handle
mode uint32
}{
{m.in, m.inMode},
{m.out, m.outMode},
{m.err, m.errMode},
} {
if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
return errors.Wrap(err, "unable to restore console mode")
}
}
return nil
}
func (m *master) Size() (WinSize, error) {
var info windows.ConsoleScreenBufferInfo
err := windows.GetConsoleScreenBufferInfo(m.out, &info)
if err != nil {
return WinSize{}, errors.Wrap(err, "unable to get console info")
}
winsize := WinSize{
Width: uint16(info.Window.Right - info.Window.Left + 1),
Height: uint16(info.Window.Bottom - info.Window.Top + 1),
}
return winsize, nil
}
func (m *master) Resize(ws WinSize) error {
return ErrNotImplemented
}
func (m *master) ResizeFrom(c Console) error {
return ErrNotImplemented
}
func (m *master) DisableEcho() error {
mode := m.inMode &^ windows.ENABLE_ECHO_INPUT
mode |= windows.ENABLE_PROCESSED_INPUT
mode |= windows.ENABLE_LINE_INPUT
if err := windows.SetConsoleMode(m.in, mode); err != nil {
return errors.Wrap(err, "unable to set console to disable echo")
}
return nil
}
func (m *master) Close() error {
return nil
}
func (m *master) Read(b []byte) (int, error) {
panic("not implemented on windows")
}
func (m *master) Write(b []byte) (int, error) {
panic("not implemented on windows")
}
func (m *master) Fd() uintptr {
return uintptr(m.in)
}
// on windows, console can only be made from os.Std{in,out,err}, hence there
// isnt a single name here we can use. Return a dummy "console" value in this
// case should be sufficient.
func (m *master) Name() string {
return "console"
}
// makeInputRaw puts the terminal (Windows Console) connected to the given
// file descriptor into raw mode
func makeInputRaw(fd windows.Handle, mode uint32) error {
// See
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
// Disable these modes
mode &^= windows.ENABLE_ECHO_INPUT
mode &^= windows.ENABLE_LINE_INPUT
mode &^= windows.ENABLE_MOUSE_INPUT
mode &^= windows.ENABLE_WINDOW_INPUT
mode &^= windows.ENABLE_PROCESSED_INPUT
// Enable these modes
mode |= windows.ENABLE_EXTENDED_FLAGS
mode |= windows.ENABLE_INSERT_MODE
mode |= windows.ENABLE_QUICK_EDIT_MODE
if vtInputSupported {
mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
}
if err := windows.SetConsoleMode(fd, mode); err != nil {
return errors.Wrap(err, "unable to set console to raw mode")
}
return nil
}
func checkConsole(f *os.File) error {
var mode uint32
if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil {
return err
}
return nil
}
func newMaster(f *os.File) (Console, error) {
if f != os.Stdin && f != os.Stdout && f != os.Stderr {
return nil, errors.New("creating a console from a file is not supported on windows")
}
m := &master{}
m.initStdios()
return m, nil
}

53
vendor/github.com/containerd/console/tc_darwin.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"fmt"
"os"
"unsafe"
"golang.org/x/sys/unix"
)
const (
cmdTcGet = unix.TIOCGETA
cmdTcSet = unix.TIOCSETA
)
func ioctl(fd, flag, data uintptr) error {
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 {
return err
}
return nil
}
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
// unlockpt should be called before opening the slave side of a pty.
func unlockpt(f *os.File) error {
var u int32
return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u)))
}
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME)
if err != nil {
return "", err
}
return fmt.Sprintf("/dev/pts/%d", n), nil
}

45
vendor/github.com/containerd/console/tc_freebsd.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"fmt"
"os"
"golang.org/x/sys/unix"
)
const (
cmdTcGet = unix.TIOCGETA
cmdTcSet = unix.TIOCSETA
)
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
// unlockpt should be called before opening the slave side of a pty.
// This does not exist on FreeBSD, it does not allocate controlling terminals on open
func unlockpt(f *os.File) error {
return nil
}
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
if err != nil {
return "", err
}
return fmt.Sprintf("/dev/pts/%d", n), nil
}

49
vendor/github.com/containerd/console/tc_linux.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"fmt"
"os"
"unsafe"
"golang.org/x/sys/unix"
)
const (
cmdTcGet = unix.TCGETS
cmdTcSet = unix.TCSETS
)
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
// unlockpt should be called before opening the slave side of a pty.
func unlockpt(f *os.File) error {
var u int32
if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 {
return err
}
return nil
}
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
var u uint32
if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 {
return "", err
}
return fmt.Sprintf("/dev/pts/%d", u), nil
}

51
vendor/github.com/containerd/console/tc_openbsd_cgo.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
// +build openbsd,cgo
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"os"
"golang.org/x/sys/unix"
)
//#include <stdlib.h>
import "C"
const (
cmdTcGet = unix.TIOCGETA
cmdTcSet = unix.TIOCSETA
)
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
ptspath, err := C.ptsname(C.int(f.Fd()))
if err != nil {
return "", err
}
return C.GoString(ptspath), nil
}
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
// unlockpt should be called before opening the slave side of a pty.
func unlockpt(f *os.File) error {
if _, err := C.grantpt(C.int(f.Fd())); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,47 @@
// +build openbsd,!cgo
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// Implementing the functions below requires cgo support. Non-cgo stubs
// versions are defined below to enable cross-compilation of source code
// that depends on these functions, but the resultant cross-compiled
// binaries cannot actually be used. If the stub function(s) below are
// actually invoked they will display an error message and cause the
// calling process to exit.
//
package console
import (
"os"
"golang.org/x/sys/unix"
)
const (
cmdTcGet = unix.TIOCGETA
cmdTcSet = unix.TIOCSETA
)
func ptsname(f *os.File) (string, error) {
panic("ptsname() support requires cgo.")
}
func unlockpt(f *os.File) error {
panic("unlockpt() support requires cgo.")
}

51
vendor/github.com/containerd/console/tc_solaris_cgo.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
// +build solaris,cgo
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"os"
"golang.org/x/sys/unix"
)
//#include <stdlib.h>
import "C"
const (
cmdTcGet = unix.TCGETS
cmdTcSet = unix.TCSETS
)
// ptsname retrieves the name of the first available pts for the given master.
func ptsname(f *os.File) (string, error) {
ptspath, err := C.ptsname(C.int(f.Fd()))
if err != nil {
return "", err
}
return C.GoString(ptspath), nil
}
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
// unlockpt should be called before opening the slave side of a pty.
func unlockpt(f *os.File) error {
if _, err := C.grantpt(C.int(f.Fd())); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,47 @@
// +build solaris,!cgo
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// Implementing the functions below requires cgo support. Non-cgo stubs
// versions are defined below to enable cross-compilation of source code
// that depends on these functions, but the resultant cross-compiled
// binaries cannot actually be used. If the stub function(s) below are
// actually invoked they will display an error message and cause the
// calling process to exit.
//
package console
import (
"os"
"golang.org/x/sys/unix"
)
const (
cmdTcGet = unix.TCGETS
cmdTcSet = unix.TCSETS
)
func ptsname(f *os.File) (string, error) {
panic("ptsname() support requires cgo.")
}
func unlockpt(f *os.File) error {
panic("unlockpt() support requires cgo.")
}

91
vendor/github.com/containerd/console/tc_unix.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
// +build darwin freebsd linux openbsd solaris
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package console
import (
"golang.org/x/sys/unix"
)
func tcget(fd uintptr, p *unix.Termios) error {
termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet)
if err != nil {
return err
}
*p = *termios
return nil
}
func tcset(fd uintptr, p *unix.Termios) error {
return unix.IoctlSetTermios(int(fd), cmdTcSet, p)
}
func tcgwinsz(fd uintptr) (WinSize, error) {
var ws WinSize
uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
if err != nil {
return ws, err
}
// Translate from unix.Winsize to console.WinSize
ws.Height = uws.Row
ws.Width = uws.Col
ws.x = uws.Xpixel
ws.y = uws.Ypixel
return ws, nil
}
func tcswinsz(fd uintptr, ws WinSize) error {
// Translate from console.WinSize to unix.Winsize
var uws unix.Winsize
uws.Row = ws.Height
uws.Col = ws.Width
uws.Xpixel = ws.x
uws.Ypixel = ws.y
return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws)
}
func setONLCR(fd uintptr, enable bool) error {
var termios unix.Termios
if err := tcget(fd, &termios); err != nil {
return err
}
if enable {
// Set +onlcr so we can act like a real terminal
termios.Oflag |= unix.ONLCR
} else {
// Set -onlcr so we don't have to deal with \r.
termios.Oflag &^= unix.ONLCR
}
return tcset(fd, &termios)
}
func cfmakeraw(t unix.Termios) unix.Termios {
t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
t.Oflag &^= unix.OPOST
t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
t.Cflag &^= (unix.CSIZE | unix.PARENB)
t.Cflag &^= unix.CS8
t.Cc[unix.VMIN] = 1
t.Cc[unix.VTIME] = 0
return t
}

View File

@ -181,8 +181,24 @@ type ImageBuildOptions struct {
Target string
SessionID string
Platform string
// Version specifies the version of the unerlying builder to use
Version BuilderVersion
// BuildID is an optional identifier that can be passed together with the
// build request. The same identifier can be used to gracefully cancel the
// build with the cancel request.
BuildID string
}
// BuilderVersion sets the version of underlying builder to use
type BuilderVersion string
const (
// BuilderV1 is the first generation builder in docker daemon
BuilderV1 BuilderVersion = "1"
// BuilderBuildKit is builder based on moby/buildkit project
BuilderBuildKit = "2"
)
// ImageBuildResponse holds information
// returned by a server after building
// an image.

View File

@ -512,7 +512,8 @@ type DiskUsage struct {
Images []*ImageSummary
Containers []*Container
Volumes []*Volume
BuilderSize int64
BuildCache []*BuildCache
BuilderSize int64 // deprecated
}
// ContainersPruneReport contains the response for Engine API:
@ -585,3 +586,17 @@ type PushResult struct {
type BuildResult struct {
ID string
}
// BuildCache contains information about a build cache record
type BuildCache struct {
ID string
Mutable bool
InUse bool
Size int64
CreatedAt time.Time
LastUsedAt *time.Time
UsageCount int
Parent string
Description string
}

21
vendor/github.com/docker/docker/client/build_cancel.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package client // import "github.com/docker/docker/client"
import (
"net/url"
"golang.org/x/net/context"
)
// BuildCancel requests the daemon to cancel ongoing build request
func (cli *Client) BuildCancel(ctx context.Context, id string) error {
query := url.Values{}
query.Set("id", id)
serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil)
if err != nil {
return err
}
defer ensureReaderClosed(serverResp)
return nil
}

View File

@ -9,7 +9,6 @@ import (
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
"github.com/docker/docker/api/types"
@ -17,21 +16,6 @@ import (
"github.com/pkg/errors"
)
// tlsClientCon holds tls information and a dialed connection.
type tlsClientCon struct {
*tls.Conn
rawConn net.Conn
}
func (c *tlsClientCon) CloseWrite() error {
// Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
// on its underlying connection.
if conn, ok := c.rawConn.(types.CloseWriter); ok {
return conn.CloseWrite()
}
return nil
}
// postHijacked sends a POST request and hijacks the connection.
func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
bodyEncoded, err := encodeData(body)
@ -54,96 +38,9 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu
return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err
}
func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
return tlsDialWithDialer(new(net.Dialer), network, addr, config)
}
// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
// order to return our custom tlsClientCon struct which holds both the tls.Conn
// object _and_ its underlying raw connection. The rationale for this is that
// we need to be able to close the write end of the connection when attaching,
// which tls.Conn does not provide.
func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
// We want the Timeout and Deadline values from dialer to cover the
// whole process: TCP connection and TLS handshake. This means that we
// also need to start our own timers now.
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
deadlineTimeout := time.Until(dialer.Deadline)
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
}
var errChannel chan error
if timeout != 0 {
errChannel = make(chan error, 2)
time.AfterFunc(timeout, func() {
errChannel <- errors.New("")
})
}
proxyDialer, err := sockets.DialerFromEnvironment(dialer)
if err != nil {
return nil, err
}
rawConn, err := proxyDialer.Dial(network, addr)
if err != nil {
return nil, err
}
// When we set up a TCP connection for hijack, there could be long periods
// of inactivity (a long running command with no output) that in certain
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
// state. Setting TCP KeepAlive on the socket connection will prohibit
// ECONNTIMEOUT unless the socket connection truly is broken
if tcpConn, ok := rawConn.(*net.TCPConn); ok {
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(30 * time.Second)
}
colonPos := strings.LastIndex(addr, ":")
if colonPos == -1 {
colonPos = len(addr)
}
hostname := addr[:colonPos]
// If no ServerName is set, infer the ServerName
// from the hostname we're connecting to.
if config.ServerName == "" {
// Make a copy to avoid polluting argument or default.
config = tlsConfigClone(config)
config.ServerName = hostname
}
conn := tls.Client(rawConn, config)
if timeout == 0 {
err = conn.Handshake()
} else {
go func() {
errChannel <- conn.Handshake()
}()
err = <-errChannel
}
if err != nil {
rawConn.Close()
return nil, err
}
// This is Docker difference with standard's crypto/tls package: returned a
// wrapper which holds both the TLS and raw connections.
return &tlsClientCon{conn, rawConn}, nil
}
func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
if tlsConfig != nil && proto != "unix" && proto != "npipe" {
// Notice this isn't Go standard's tls.Dial function
return tlsDial(proto, addr, tlsConfig)
return tls.Dial(proto, addr, tlsConfig)
}
if proto == "npipe" {
return sockets.DialPipe(addr, 32*time.Second)

View File

@ -133,5 +133,9 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur
if options.Platform != "" {
query.Set("platform", strings.ToLower(options.Platform))
}
if options.BuildID != "" {
query.Set("buildid", options.BuildID)
}
query.Set("version", string(options.Version))
return query, nil
}

View File

@ -86,6 +86,7 @@ type DistributionAPIClient interface {
type ImageAPIClient interface {
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error)
BuildCancel(ctx context.Context, id string) error
ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)

View File

@ -1,11 +0,0 @@
// +build go1.8
package client // import "github.com/docker/docker/client"
import "crypto/tls"
// tlsConfigClone returns a clone of tls.Config. This function is provided for
// compatibility for go1.7 that doesn't include this method in stdlib.
func tlsConfigClone(c *tls.Config) *tls.Config {
return c.Clone()
}

View File

@ -1,33 +0,0 @@
// +build go1.7,!go1.8
package client // import "github.com/docker/docker/client"
import "crypto/tls"
// tlsConfigClone returns a clone of tls.Config. This function is provided for
// compatibility for go1.7 that doesn't include this method in stdlib.
func tlsConfigClone(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
Renegotiation: c.Renegotiation,
}
}

View File

@ -27,10 +27,13 @@ github.com/imdario/mergo 0.2.1
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
# buildkit
github.com/moby/buildkit 43e758232a0ac7d50c6a11413186e16684fc1e4f
github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f
github.com/moby/buildkit 4b8dc5b08bdd1b9a29d0f767d94a1360e668da14
github.com/tonistiigi/fsutil 8839685ae8c3c8bd67d0ce28e9b3157b23c1c7a5
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
#get libnetwork packages
@ -72,8 +75,8 @@ github.com/pborman/uuid v1.0
google.golang.org/grpc v1.12.0
# When updating, also update RUNC_COMMIT in hack/dockerfile/install/runc accordingly
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
# This does not need to match RUNC_COMMIT as it is used for helper packages but should be newer or equal
github.com/opencontainers/runc 0e561642f81e84ebd0b3afd6ec510c75a2ccb71b
github.com/opencontainers/runtime-spec v1.0.1
github.com/opencontainers/image-spec v1.0.1
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
@ -112,11 +115,11 @@ github.com/googleapis/gax-go v2.0.0
google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
# containerd
github.com/containerd/containerd c7083eed5d8633d54c25fe81aa609010a4f2e495
github.com/containerd/containerd 63522d9eaa5a0443d225642c4b6f4f5fdedf932b
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577
@ -131,7 +134,7 @@ github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b65068
golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0

202
vendor/github.com/google/shlex/COPYING generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

2
vendor/github.com/google/shlex/README generated vendored Normal file
View File

@ -0,0 +1,2 @@
go-shlex is a simple lexer for go that supports shell-style quoting,
commenting, and escaping.

417
vendor/github.com/google/shlex/shlex.go generated vendored Normal file
View File

@ -0,0 +1,417 @@
/*
Copyright 2012 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package shlex implements a simple lexer which splits input in to tokens using
shell-style rules for quoting and commenting.
The basic use case uses the default ASCII lexer to split a string into sub-strings:
shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
To process a stream of strings:
l := NewLexer(os.Stdin)
for ; token, err := l.Next(); err != nil {
// process token
}
To access the raw token stream (which includes tokens for comments):
t := NewTokenizer(os.Stdin)
for ; token, err := t.Next(); err != nil {
// process token
}
*/
package shlex
import (
"bufio"
"fmt"
"io"
"strings"
)
// TokenType is a top-level token classification: A word, space, comment, unknown.
type TokenType int
// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
type runeTokenClass int
// the internal state used by the lexer state machine
type lexerState int
// Token is a (type, value) pair representing a lexographical token.
type Token struct {
tokenType TokenType
value string
}
// Equal reports whether tokens a, and b, are equal.
// Two tokens are equal if both their types and values are equal. A nil token can
// never be equal to another token.
func (a *Token) Equal(b *Token) bool {
if a == nil || b == nil {
return false
}
if a.tokenType != b.tokenType {
return false
}
return a.value == b.value
}
// Named classes of UTF-8 runes
const (
spaceRunes = " \t\r\n"
escapingQuoteRunes = `"`
nonEscapingQuoteRunes = "'"
escapeRunes = `\`
commentRunes = "#"
)
// Classes of rune token
const (
unknownRuneClass runeTokenClass = iota
spaceRuneClass
escapingQuoteRuneClass
nonEscapingQuoteRuneClass
escapeRuneClass
commentRuneClass
eofRuneClass
)
// Classes of lexographic token
const (
UnknownToken TokenType = iota
WordToken
SpaceToken
CommentToken
)
// Lexer state machine states
const (
startState lexerState = iota // no runes have been seen
inWordState // processing regular runes in a word
escapingState // we have just consumed an escape rune; the next rune is literal
escapingQuotedState // we have just consumed an escape rune within a quoted string
quotingEscapingState // we are within a quoted string that supports escaping ("...")
quotingState // we are within a string that does not support escaping ('...')
commentState // we are within a comment (everything following an unquoted or unescaped #
)
// tokenClassifier is used for classifying rune characters.
type tokenClassifier map[rune]runeTokenClass
func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
for _, runeChar := range runes {
typeMap[runeChar] = tokenType
}
}
// newDefaultClassifier creates a new classifier for ASCII characters.
func newDefaultClassifier() tokenClassifier {
t := tokenClassifier{}
t.addRuneClass(spaceRunes, spaceRuneClass)
t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
t.addRuneClass(escapeRunes, escapeRuneClass)
t.addRuneClass(commentRunes, commentRuneClass)
return t
}
// ClassifyRune classifiees a rune
func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
return t[runeVal]
}
// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
type Lexer Tokenizer
// NewLexer creates a new lexer from an input stream.
func NewLexer(r io.Reader) *Lexer {
return (*Lexer)(NewTokenizer(r))
}
// Next returns the next word, or an error. If there are no more words,
// the error will be io.EOF.
func (l *Lexer) Next() (string, error) {
for {
token, err := (*Tokenizer)(l).Next()
if err != nil {
return "", err
}
switch token.tokenType {
case WordToken:
return token.value, nil
case CommentToken:
// skip comments
default:
return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
}
}
}
// Tokenizer turns an input stream into a sequence of typed tokens
type Tokenizer struct {
input bufio.Reader
classifier tokenClassifier
}
// NewTokenizer creates a new tokenizer from an input stream.
func NewTokenizer(r io.Reader) *Tokenizer {
input := bufio.NewReader(r)
classifier := newDefaultClassifier()
return &Tokenizer{
input: *input,
classifier: classifier}
}
// scanStream scans the stream for the next token using the internal state machine.
// It will panic if it encounters a rune which it does not know how to handle.
func (t *Tokenizer) scanStream() (*Token, error) {
state := startState
var tokenType TokenType
var value []rune
var nextRune rune
var nextRuneType runeTokenClass
var err error
for {
nextRune, _, err = t.input.ReadRune()
nextRuneType = t.classifier.ClassifyRune(nextRune)
if err == io.EOF {
nextRuneType = eofRuneClass
err = nil
} else if err != nil {
return nil, err
}
switch state {
case startState: // no runes read yet
{
switch nextRuneType {
case eofRuneClass:
{
return nil, io.EOF
}
case spaceRuneClass:
{
}
case escapingQuoteRuneClass:
{
tokenType = WordToken
state = quotingEscapingState
}
case nonEscapingQuoteRuneClass:
{
tokenType = WordToken
state = quotingState
}
case escapeRuneClass:
{
tokenType = WordToken
state = escapingState
}
case commentRuneClass:
{
tokenType = CommentToken
state = commentState
}
default:
{
tokenType = WordToken
value = append(value, nextRune)
state = inWordState
}
}
}
case inWordState: // in a regular word
{
switch nextRuneType {
case eofRuneClass:
{
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case spaceRuneClass:
{
t.input.UnreadRune()
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case escapingQuoteRuneClass:
{
state = quotingEscapingState
}
case nonEscapingQuoteRuneClass:
{
state = quotingState
}
case escapeRuneClass:
{
state = escapingState
}
default:
{
value = append(value, nextRune)
}
}
}
case escapingState: // the rune after an escape character
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found after escape character")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
default:
{
state = inWordState
value = append(value, nextRune)
}
}
}
case escapingQuotedState: // the next rune after an escape character, in double quotes
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found after escape character")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
default:
{
state = quotingEscapingState
value = append(value, nextRune)
}
}
}
case quotingEscapingState: // in escaping double quotes
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found when expecting closing quote")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case escapingQuoteRuneClass:
{
state = inWordState
}
case escapeRuneClass:
{
state = escapingQuotedState
}
default:
{
value = append(value, nextRune)
}
}
}
case quotingState: // in non-escaping single quotes
{
switch nextRuneType {
case eofRuneClass:
{
err = fmt.Errorf("EOF found when expecting closing quote")
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case nonEscapingQuoteRuneClass:
{
state = inWordState
}
default:
{
value = append(value, nextRune)
}
}
}
case commentState: // in a comment
{
switch nextRuneType {
case eofRuneClass:
{
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
}
case spaceRuneClass:
{
if nextRune == '\n' {
state = startState
token := &Token{
tokenType: tokenType,
value: string(value)}
return token, err
} else {
value = append(value, nextRune)
}
}
default:
{
value = append(value, nextRune)
}
}
}
default:
{
return nil, fmt.Errorf("Unexpected state: %v", state)
}
}
}
}
// Next returns the next token in the stream.
func (t *Tokenizer) Next() (*Token, error) {
return t.scanStream()
}
// Split partitions a string into a slice of strings.
func Split(s string) ([]string, error) {
l := NewLexer(strings.NewReader(s))
subStrings := make([]string, 0)
for {
word, err := l.Next()
if err != nil {
if err == io.EOF {
return subStrings, nil
}
return subStrings, err
}
subStrings = append(subStrings, word)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,121 @@
syntax = "proto3";
package moby.buildkit.v1;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
service Control {
rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse);
rpc Prune(PruneRequest) returns (stream UsageRecord);
rpc Solve(SolveRequest) returns (SolveResponse);
rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Session(stream BytesMessage) returns (stream BytesMessage);
rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse);
}
message PruneRequest {
// TODO: filter
}
message DiskUsageRequest {
string filter = 1; // FIXME: this should be containerd-compatible repeated string?
}
message DiskUsageResponse {
repeated UsageRecord record = 1;
}
message UsageRecord {
string ID = 1;
bool Mutable = 2;
bool InUse = 3;
int64 Size = 4;
string Parent = 5;
google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true];
int64 UsageCount = 8;
string Description = 9;
}
message SolveRequest {
string Ref = 1;
pb.Definition Definition = 2;
string Exporter = 3;
map<string, string> ExporterAttrs = 4;
string Session = 5;
string Frontend = 6;
map<string, string> FrontendAttrs = 7;
CacheOptions Cache = 8 [(gogoproto.nullable) = false];
}
message CacheOptions {
string ExportRef = 1;
repeated string ImportRefs = 2;
map<string, string> ExportAttrs = 3;
}
message SolveResponse {
map<string, string> ExporterResponse = 1;
}
message StatusRequest {
string Ref = 1;
}
message StatusResponse {
repeated Vertex vertexes = 1;
repeated VertexStatus statuses = 2;
repeated VertexLog logs = 3;
}
message Vertex {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
repeated string inputs = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
string name = 3;
bool cached = 4;
google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ];
google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ];
string error = 7; // typed errors?
}
message VertexStatus {
string ID = 1;
string vertex = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
string name = 3;
int64 current = 4;
int64 total = 5;
// TODO: add started, completed
google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ];
google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ];
}
message VertexLog {
string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
int64 stream = 3;
bytes msg = 4;
}
message BytesMessage {
bytes data = 1;
}
message ListWorkersRequest {
repeated string filter = 1; // containerd style
}
message ListWorkersResponse {
repeated WorkerRecord record = 1;
}
message WorkerRecord {
string ID = 1;
map<string, string> Labels = 2;
}

View File

@ -0,0 +1,3 @@
package moby_buildkit_v1
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto

132
vendor/github.com/moby/buildkit/client/client.go generated vendored Normal file
View File

@ -0,0 +1,132 @@
package client
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"time"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/util/appdefaults"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type Client struct {
conn *grpc.ClientConn
}
type ClientOpt interface{}
// New returns a new buildkit client. Address can be empty for the system-default address.
func New(address string, opts ...ClientOpt) (*Client, error) {
gopts := []grpc.DialOption{
grpc.WithTimeout(30 * time.Second),
grpc.WithDialer(dialer),
grpc.FailOnNonTempDialError(true),
}
needWithInsecure := true
for _, o := range opts {
if _, ok := o.(*withBlockOpt); ok {
gopts = append(gopts, grpc.WithBlock(), grpc.FailOnNonTempDialError(true))
}
if credInfo, ok := o.(*withCredentials); ok {
opt, err := loadCredentials(credInfo)
if err != nil {
return nil, err
}
gopts = append(gopts, opt)
needWithInsecure = false
}
if wt, ok := o.(*withTracer); ok {
gopts = append(gopts,
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())),
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)))
}
}
if needWithInsecure {
gopts = append(gopts, grpc.WithInsecure())
}
if address == "" {
address = appdefaults.Address
}
conn, err := grpc.Dial(address, gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)
}
c := &Client{
conn: conn,
}
return c, nil
}
func (c *Client) controlClient() controlapi.ControlClient {
return controlapi.NewControlClient(c.conn)
}
func (c *Client) Close() error {
return c.conn.Close()
}
type withBlockOpt struct{}
func WithBlock() ClientOpt {
return &withBlockOpt{}
}
type withCredentials struct {
ServerName string
CACert string
Cert string
Key string
}
// WithCredentials configures the TLS parameters of the client.
// Arguments:
// * serverName: specifies the name of the target server
// * ca: specifies the filepath of the CA certificate to use for verification
// * cert: specifies the filepath of the client certificate
// * key: specifies the filepath of the client key
func WithCredentials(serverName, ca, cert, key string) ClientOpt {
return &withCredentials{serverName, ca, cert, key}
}
func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
ca, err := ioutil.ReadFile(opts.CACert)
if err != nil {
return nil, errors.Wrap(err, "could not read ca certificate")
}
certPool := x509.NewCertPool()
if ok := certPool.AppendCertsFromPEM(ca); !ok {
return nil, errors.New("failed to append ca certs")
}
cfg := &tls.Config{
ServerName: opts.ServerName,
RootCAs: certPool,
}
// we will produce an error if the user forgot about either cert or key if at least one is specified
if opts.Cert != "" || opts.Key != "" {
cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key)
if err != nil {
return nil, errors.Wrap(err, "could not read certificate/key")
}
cfg.Certificates = []tls.Certificate{cert}
cfg.BuildNameToCertificate()
}
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
}
func WithTracer(t opentracing.Tracer) ClientOpt {
return &withTracer{t}
}
type withTracer struct {
tracer opentracing.Tracer
}

19
vendor/github.com/moby/buildkit/client/client_unix.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// +build !windows
package client
import (
"net"
"strings"
"time"
"github.com/pkg/errors"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
addrParts := strings.SplitN(address, "://", 2)
if len(addrParts) != 2 {
return nil, errors.Errorf("invalid address %s", address)
}
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
}

View File

@ -0,0 +1,24 @@
package client
import (
"net"
"strings"
"time"
"github.com/Microsoft/go-winio"
"github.com/pkg/errors"
)
func dialer(address string, timeout time.Duration) (net.Conn, error) {
addrParts := strings.SplitN(address, "://", 2)
if len(addrParts) != 2 {
return nil, errors.Errorf("invalid address %s", address)
}
switch addrParts[0] {
case "npipe":
address = strings.Replace(addrParts[1], "/", "\\", 0)
return winio.DialPipe(address, &timeout)
default:
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
}
}

73
vendor/github.com/moby/buildkit/client/diskusage.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
package client
import (
"context"
"sort"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
)
type UsageInfo struct {
ID string
Mutable bool
InUse bool
Size int64
CreatedAt time.Time
LastUsedAt *time.Time
UsageCount int
Parent string
Description string
}
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {
info := &DiskUsageInfo{}
for _, o := range opts {
o(info)
}
req := &controlapi.DiskUsageRequest{Filter: info.Filter}
resp, err := c.controlClient().DiskUsage(ctx, req)
if err != nil {
return nil, errors.Wrap(err, "failed to call diskusage")
}
var du []*UsageInfo
for _, d := range resp.Record {
du = append(du, &UsageInfo{
ID: d.ID,
Mutable: d.Mutable,
InUse: d.InUse,
Size: d.Size_,
Parent: d.Parent,
CreatedAt: d.CreatedAt,
Description: d.Description,
UsageCount: int(d.UsageCount),
LastUsedAt: d.LastUsedAt,
})
}
sort.Slice(du, func(i, j int) bool {
if du[i].Size == du[j].Size {
return du[i].ID > du[j].ID
}
return du[i].Size > du[j].Size
})
return du, nil
}
type DiskUsageOption func(*DiskUsageInfo)
type DiskUsageInfo struct {
Filter string
}
func WithFilter(f string) DiskUsageOption {
return func(di *DiskUsageInfo) {
di.Filter = f
}
}

8
vendor/github.com/moby/buildkit/client/exporters.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
package client
const (
ExporterImage = "image"
ExporterLocal = "local"
ExporterOCI = "oci"
ExporterDocker = "docker"
)

45
vendor/github.com/moby/buildkit/client/graph.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package client
import (
"time"
digest "github.com/opencontainers/go-digest"
)
type Vertex struct {
Digest digest.Digest
Inputs []digest.Digest
Name string
Started *time.Time
Completed *time.Time
Cached bool
Error string
}
type VertexStatus struct {
ID string
Vertex digest.Digest
Name string
Total int64
Current int64
Timestamp time.Time
Started *time.Time
Completed *time.Time
}
type VertexLog struct {
Vertex digest.Digest
Stream int
Data []byte
Timestamp time.Time
}
type SolveStatus struct {
Vertexes []*Vertex
Statuses []*VertexStatus
Logs []*VertexLog
}
type SolveResponse struct {
ExporterResponse map[string]string
}

372
vendor/github.com/moby/buildkit/client/llb/exec.go generated vendored Normal file
View File

@ -0,0 +1,372 @@
package llb
import (
_ "crypto/sha256"
"sort"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type Meta struct {
Args []string
Env EnvList
Cwd string
User string
ProxyEnv *ProxyEnv
}
func NewExecOp(root Output, meta Meta, readOnly bool, md OpMetadata) *ExecOp {
e := &ExecOp{meta: meta, cachedOpMetadata: md}
rootMount := &mount{
target: pb.RootMount,
source: root,
readonly: readOnly,
}
e.mounts = append(e.mounts, rootMount)
if readOnly {
e.root = root
} else {
e.root = &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)}
}
rootMount.output = e.root
return e
}
type mount struct {
target string
readonly bool
source Output
output Output
selector string
cacheID string
// hasOutput bool
}
type ExecOp struct {
root Output
mounts []*mount
meta Meta
cachedPBDigest digest.Digest
cachedPB []byte
cachedOpMetadata OpMetadata
isValidated bool
}
func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output {
m := &mount{
target: target,
source: source,
}
for _, o := range opt {
o(m)
}
e.mounts = append(e.mounts, m)
if m.readonly {
m.output = source
} else {
m.output = &output{vertex: e, getIndex: e.getMountIndexFn(m)}
}
e.cachedPB = nil
e.isValidated = false
return m.output
}
func (e *ExecOp) GetMount(target string) Output {
for _, m := range e.mounts {
if m.target == target {
return m.output
}
}
return nil
}
func (e *ExecOp) Validate() error {
if e.isValidated {
return nil
}
if len(e.meta.Args) == 0 {
return errors.Errorf("arguments are required")
}
if e.meta.Cwd == "" {
return errors.Errorf("working directory is required")
}
for _, m := range e.mounts {
if m.source != nil {
if err := m.source.Vertex().Validate(); err != nil {
return err
}
}
}
e.isValidated = true
return nil
}
func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
if e.cachedPB != nil {
return e.cachedPBDigest, e.cachedPB, &e.cachedOpMetadata, nil
}
if err := e.Validate(); err != nil {
return "", nil, nil, err
}
// make sure mounts are sorted
sort.Slice(e.mounts, func(i, j int) bool {
return e.mounts[i].target < e.mounts[j].target
})
peo := &pb.ExecOp{
Meta: &pb.Meta{
Args: e.meta.Args,
Env: e.meta.Env.ToArray(),
Cwd: e.meta.Cwd,
User: e.meta.User,
},
}
if p := e.meta.ProxyEnv; p != nil {
peo.Meta.ProxyEnv = &pb.ProxyEnv{
HttpProxy: p.HttpProxy,
HttpsProxy: p.HttpsProxy,
FtpProxy: p.FtpProxy,
NoProxy: p.NoProxy,
}
}
pop := &pb.Op{
Op: &pb.Op_Exec{
Exec: peo,
},
}
outIndex := 0
for _, m := range e.mounts {
inputIndex := pb.InputIndex(len(pop.Inputs))
if m.source != nil {
inp, err := m.source.ToInput()
if err != nil {
return "", nil, nil, err
}
newInput := true
for i, inp2 := range pop.Inputs {
if *inp == *inp2 {
inputIndex = pb.InputIndex(i)
newInput = false
break
}
}
if newInput {
pop.Inputs = append(pop.Inputs, inp)
}
} else {
inputIndex = pb.Empty
}
outputIndex := pb.OutputIndex(-1)
if !m.readonly && m.cacheID == "" {
outputIndex = pb.OutputIndex(outIndex)
outIndex++
}
pm := &pb.Mount{
Input: inputIndex,
Dest: m.target,
Readonly: m.readonly,
Output: outputIndex,
Selector: m.selector,
}
if m.cacheID != "" {
pm.MountType = pb.MountType_CACHE
pm.CacheOpt = &pb.CacheOpt{
ID: m.cacheID,
}
}
peo.Mounts = append(peo.Mounts, pm)
}
dt, err := pop.Marshal()
if err != nil {
return "", nil, nil, err
}
e.cachedPBDigest = digest.FromBytes(dt)
e.cachedPB = dt
return e.cachedPBDigest, dt, &e.cachedOpMetadata, nil
}
func (e *ExecOp) Output() Output {
return e.root
}
func (e *ExecOp) Inputs() (inputs []Output) {
mm := map[Output]struct{}{}
for _, m := range e.mounts {
if m.source != nil {
mm[m.source] = struct{}{}
}
}
for o := range mm {
inputs = append(inputs, o)
}
return
}
func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) {
return func() (pb.OutputIndex, error) {
// make sure mounts are sorted
sort.Slice(e.mounts, func(i, j int) bool {
return e.mounts[i].target < e.mounts[j].target
})
i := 0
for _, m2 := range e.mounts {
if m2.readonly || m2.cacheID != "" {
continue
}
if m == m2 {
return pb.OutputIndex(i), nil
}
i++
}
return pb.OutputIndex(0), errors.Errorf("invalid mount: %s", m.target)
}
}
type ExecState struct {
State
exec *ExecOp
}
func (e ExecState) AddMount(target string, source State, opt ...MountOption) State {
return source.WithOutput(e.exec.AddMount(target, source.Output(), opt...))
}
func (e ExecState) GetMount(target string) State {
return NewState(e.exec.GetMount(target))
}
func (e ExecState) Root() State {
return e.State
}
type MountOption func(*mount)
func Readonly(m *mount) {
m.readonly = true
}
func SourcePath(src string) MountOption {
return func(m *mount) {
m.selector = src
}
}
func AsPersistentCacheDir(id string) MountOption {
return func(m *mount) {
m.cacheID = id
}
}
type RunOption interface {
SetRunOption(es *ExecInfo)
}
type runOptionFunc func(*ExecInfo)
func (fn runOptionFunc) SetRunOption(ei *ExecInfo) {
fn(ei)
}
func Shlex(str string) RunOption {
return Shlexf(str)
}
func Shlexf(str string, v ...interface{}) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = shlexf(str, v...)(ei.State)
})
}
func Args(a []string) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = args(a...)(ei.State)
})
}
func AddEnv(key, value string) RunOption {
return AddEnvf(key, value)
}
func AddEnvf(key, value string, v ...interface{}) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.AddEnvf(key, value, v...)
})
}
func User(str string) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.User(str)
})
}
func Dir(str string) RunOption {
return Dirf(str)
}
func Dirf(str string, v ...interface{}) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.Dirf(str, v...)
})
}
func Reset(s State) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.Reset(s)
})
}
func With(so ...StateOption) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = ei.State.With(so...)
})
}
func AddMount(dest string, mountState State, opts ...MountOption) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.Mounts = append(ei.Mounts, MountInfo{dest, mountState.Output(), opts})
})
}
func ReadonlyRootFS() RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.ReadonlyRootFS = true
})
}
func WithProxy(ps ProxyEnv) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.ProxyEnv = &ps
})
}
type ExecInfo struct {
opMetaWrapper
State State
Mounts []MountInfo
ReadonlyRootFS bool
ProxyEnv *ProxyEnv
}
type MountInfo struct {
Target string
Source Output
Opts []MountOption
}
type ProxyEnv struct {
HttpProxy string
HttpsProxy string
FtpProxy string
NoProxy string
}

60
vendor/github.com/moby/buildkit/client/llb/marshal.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
package llb
import (
"io"
"io/ioutil"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
)
// Definition is the LLB definition structure with per-vertex metadata entries
// Corresponds to the Definition structure defined in solver/pb.Definition.
type Definition struct {
Def [][]byte
Metadata map[digest.Digest]OpMetadata
}
func (def *Definition) ToPB() *pb.Definition {
md := make(map[digest.Digest]OpMetadata)
for k, v := range def.Metadata {
md[k] = v
}
return &pb.Definition{
Def: def.Def,
Metadata: md,
}
}
func (def *Definition) FromPB(x *pb.Definition) {
def.Def = x.Def
def.Metadata = make(map[digest.Digest]OpMetadata)
for k, v := range x.Metadata {
def.Metadata[k] = v
}
}
type OpMetadata = pb.OpMetadata
func WriteTo(def *Definition, w io.Writer) error {
b, err := def.ToPB().Marshal()
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
func ReadFrom(r io.Reader) (*Definition, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
var pbDef pb.Definition
if err := pbDef.Unmarshal(b); err != nil {
return nil, err
}
var def Definition
def.FromPB(&pbDef)
return &def, nil
}

152
vendor/github.com/moby/buildkit/client/llb/meta.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
package llb
import (
"fmt"
"path"
"github.com/google/shlex"
)
type contextKeyT string
var (
keyArgs = contextKeyT("llb.exec.args")
keyDir = contextKeyT("llb.exec.dir")
keyEnv = contextKeyT("llb.exec.env")
keyUser = contextKeyT("llb.exec.user")
)
func addEnv(key, value string) StateOption {
return addEnvf(key, value)
}
func addEnvf(key, value string, v ...interface{}) StateOption {
return func(s State) State {
return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, fmt.Sprintf(value, v...)))
}
}
func dir(str string) StateOption {
return dirf(str)
}
func dirf(str string, v ...interface{}) StateOption {
return func(s State) State {
value := fmt.Sprintf(str, v...)
if !path.IsAbs(value) {
prev := getDir(s)
if prev == "" {
prev = "/"
}
value = path.Join(prev, value)
}
return s.WithValue(keyDir, value)
}
}
func user(str string) StateOption {
return func(s State) State {
return s.WithValue(keyUser, str)
}
}
func reset(s_ State) StateOption {
return func(s State) State {
s = NewState(s.Output())
s.ctx = s_.ctx
return s
}
}
func getEnv(s State) EnvList {
v := s.Value(keyEnv)
if v != nil {
return v.(EnvList)
}
return EnvList{}
}
func getDir(s State) string {
v := s.Value(keyDir)
if v != nil {
return v.(string)
}
return ""
}
func getArgs(s State) []string {
v := s.Value(keyArgs)
if v != nil {
return v.([]string)
}
return nil
}
func getUser(s State) string {
v := s.Value(keyUser)
if v != nil {
return v.(string)
}
return ""
}
func args(args ...string) StateOption {
return func(s State) State {
return s.WithValue(keyArgs, args)
}
}
func shlexf(str string, v ...interface{}) StateOption {
return func(s State) State {
arg, err := shlex.Split(fmt.Sprintf(str, v...))
if err != nil {
// TODO: handle error
}
return args(arg...)(s)
}
}
type EnvList []KeyValue
type KeyValue struct {
key string
value string
}
func (e EnvList) AddOrReplace(k, v string) EnvList {
e = e.Delete(k)
e = append(e, KeyValue{key: k, value: v})
return e
}
func (e EnvList) Delete(k string) EnvList {
e = append([]KeyValue(nil), e...)
if i, ok := e.Index(k); ok {
return append(e[:i], e[i+1:]...)
}
return e
}
func (e EnvList) Get(k string) (string, bool) {
if index, ok := e.Index(k); ok {
return e[index].value, true
}
return "", false
}
func (e EnvList) Index(k string) (int, bool) {
for i, kv := range e {
if kv.key == k {
return i, true
}
}
return -1, false
}
func (e EnvList) ToArray() []string {
out := make([]string, 0, len(e))
for _, kv := range e {
out = append(out, kv.key+"="+kv.value)
}
return out
}

17
vendor/github.com/moby/buildkit/client/llb/resolver.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package llb
import (
"context"
digest "github.com/opencontainers/go-digest"
)
func WithMetaResolver(mr ImageMetaResolver) ImageOption {
return ImageOptionFunc(func(ii *ImageInfo) {
ii.metaResolver = mr
})
}
type ImageMetaResolver interface {
ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error)
}

344
vendor/github.com/moby/buildkit/client/llb/source.go generated vendored Normal file
View File

@ -0,0 +1,344 @@
package llb
import (
"context"
_ "crypto/sha256"
"encoding/json"
"os"
"strconv"
"strings"
"github.com/docker/distribution/reference"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type SourceOp struct {
id string
attrs map[string]string
output Output
cachedPBDigest digest.Digest
cachedPB []byte
cachedOpMetadata OpMetadata
err error
}
func NewSource(id string, attrs map[string]string, md OpMetadata) *SourceOp {
s := &SourceOp{
id: id,
attrs: attrs,
cachedOpMetadata: md,
}
s.output = &output{vertex: s}
return s
}
func (s *SourceOp) Validate() error {
if s.err != nil {
return s.err
}
if s.id == "" {
return errors.Errorf("source identifier can't be empty")
}
return nil
}
func (s *SourceOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
if s.cachedPB != nil {
return s.cachedPBDigest, s.cachedPB, &s.cachedOpMetadata, nil
}
if err := s.Validate(); err != nil {
return "", nil, nil, err
}
proto := &pb.Op{
Op: &pb.Op_Source{
Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs},
},
}
dt, err := proto.Marshal()
if err != nil {
return "", nil, nil, err
}
s.cachedPB = dt
s.cachedPBDigest = digest.FromBytes(dt)
return s.cachedPBDigest, dt, &s.cachedOpMetadata, nil
}
func (s *SourceOp) Output() Output {
return s.output
}
func (s *SourceOp) Inputs() []Output {
return nil
}
func Source(id string) State {
return NewState(NewSource(id, nil, OpMetadata{}).Output())
}
func Image(ref string, opts ...ImageOption) State {
r, err := reference.ParseNormalizedNamed(ref)
if err == nil {
ref = reference.TagNameOnly(r).String()
}
var info ImageInfo
for _, opt := range opts {
opt.SetImageOption(&info)
}
src := NewSource("docker-image://"+ref, nil, info.Metadata()) // controversial
if err != nil {
src.err = err
}
if info.metaResolver != nil {
_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref)
if err != nil {
src.err = err
} else {
var img struct {
Config struct {
Env []string `json:"Env,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty"`
User string `json:"User,omitempty"`
} `json:"config,omitempty"`
}
if err := json.Unmarshal(dt, &img); err != nil {
src.err = err
} else {
st := NewState(src.Output())
for _, env := range img.Config.Env {
parts := strings.SplitN(env, "=", 2)
if len(parts[0]) > 0 {
var v string
if len(parts) > 1 {
v = parts[1]
}
st = st.AddEnv(parts[0], v)
}
}
st = st.Dir(img.Config.WorkingDir)
return st
}
}
}
return NewState(src.Output())
}
type ImageOption interface {
SetImageOption(*ImageInfo)
}
type ImageOptionFunc func(*ImageInfo)
func (fn ImageOptionFunc) SetImageOption(ii *ImageInfo) {
fn(ii)
}
type ImageInfo struct {
opMetaWrapper
metaResolver ImageMetaResolver
}
func Git(remote, ref string, opts ...GitOption) State {
url := ""
for _, prefix := range []string{
"http://", "https://", "git://", "git@",
} {
if strings.HasPrefix(remote, prefix) {
url = strings.Split(remote, "#")[0]
remote = strings.TrimPrefix(remote, prefix)
}
}
id := remote
if ref != "" {
id += "#" + ref
}
gi := &GitInfo{}
for _, o := range opts {
o.SetGitOption(gi)
}
attrs := map[string]string{}
if gi.KeepGitDir {
attrs[pb.AttrKeepGitDir] = "true"
}
if url != "" {
attrs[pb.AttrFullRemoteURL] = url
}
source := NewSource("git://"+id, attrs, gi.Metadata())
return NewState(source.Output())
}
type GitOption interface {
SetGitOption(*GitInfo)
}
type gitOptionFunc func(*GitInfo)
func (fn gitOptionFunc) SetGitOption(gi *GitInfo) {
fn(gi)
}
type GitInfo struct {
opMetaWrapper
KeepGitDir bool
}
func KeepGitDir() GitOption {
return gitOptionFunc(func(gi *GitInfo) {
gi.KeepGitDir = true
})
}
func Scratch() State {
return NewState(nil)
}
func Local(name string, opts ...LocalOption) State {
gi := &LocalInfo{}
for _, o := range opts {
o.SetLocalOption(gi)
}
attrs := map[string]string{}
if gi.SessionID != "" {
attrs[pb.AttrLocalSessionID] = gi.SessionID
}
if gi.IncludePatterns != "" {
attrs[pb.AttrIncludePatterns] = gi.IncludePatterns
}
if gi.ExcludePatterns != "" {
attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns
}
if gi.SharedKeyHint != "" {
attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint
}
source := NewSource("local://"+name, attrs, gi.Metadata())
return NewState(source.Output())
}
type LocalOption interface {
SetLocalOption(*LocalInfo)
}
type localOptionFunc func(*LocalInfo)
func (fn localOptionFunc) SetLocalOption(li *LocalInfo) {
fn(li)
}
func SessionID(id string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
li.SessionID = id
})
}
func IncludePatterns(p []string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
if len(p) == 0 {
li.IncludePatterns = ""
return
}
dt, _ := json.Marshal(p) // empty on error
li.IncludePatterns = string(dt)
})
}
func ExcludePatterns(p []string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
if len(p) == 0 {
li.ExcludePatterns = ""
return
}
dt, _ := json.Marshal(p) // empty on error
li.ExcludePatterns = string(dt)
})
}
func SharedKeyHint(h string) LocalOption {
return localOptionFunc(func(li *LocalInfo) {
li.SharedKeyHint = h
})
}
type LocalInfo struct {
opMetaWrapper
SessionID string
IncludePatterns string
ExcludePatterns string
SharedKeyHint string
}
func HTTP(url string, opts ...HTTPOption) State {
hi := &HTTPInfo{}
for _, o := range opts {
o.SetHTTPOption(hi)
}
attrs := map[string]string{}
if hi.Checksum != "" {
attrs[pb.AttrHTTPChecksum] = hi.Checksum.String()
}
if hi.Filename != "" {
attrs[pb.AttrHTTPFilename] = hi.Filename
}
if hi.Perm != 0 {
attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8)
}
if hi.UID != 0 {
attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID)
}
if hi.UID != 0 {
attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID)
}
source := NewSource(url, attrs, hi.Metadata())
return NewState(source.Output())
}
type HTTPInfo struct {
opMetaWrapper
Checksum digest.Digest
Filename string
Perm int
UID int
GID int
}
type HTTPOption interface {
SetHTTPOption(*HTTPInfo)
}
type httpOptionFunc func(*HTTPInfo)
func (fn httpOptionFunc) SetHTTPOption(hi *HTTPInfo) {
fn(hi)
}
func Checksum(dgst digest.Digest) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Checksum = dgst
})
}
func Chmod(perm os.FileMode) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Perm = int(perm) & 0777
})
}
func Filename(name string) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Filename = name
})
}
func Chown(uid, gid int) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.UID = uid
hi.GID = gid
})
}

312
vendor/github.com/moby/buildkit/client/llb/state.go generated vendored Normal file
View File

@ -0,0 +1,312 @@
package llb
import (
"context"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/system"
digest "github.com/opencontainers/go-digest"
)
type StateOption func(State) State
type Output interface {
ToInput() (*pb.Input, error)
Vertex() Vertex
}
type Vertex interface {
Validate() error
Marshal() (digest.Digest, []byte, *OpMetadata, error)
Output() Output
Inputs() []Output
}
func NewState(o Output) State {
s := State{
out: o,
ctx: context.Background(),
}
s = dir("/")(s)
s = addEnv("PATH", system.DefaultPathEnv)(s)
return s
}
type State struct {
out Output
ctx context.Context
}
func (s State) WithValue(k, v interface{}) State {
return State{
out: s.out,
ctx: context.WithValue(s.ctx, k, v),
}
}
func (s State) Value(k interface{}) interface{} {
return s.ctx.Value(k)
}
func (s State) Marshal(md ...MetadataOpt) (*Definition, error) {
def := &Definition{
Metadata: make(map[digest.Digest]OpMetadata, 0),
}
if s.Output() == nil {
return def, nil
}
def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, md)
if err != nil {
return def, err
}
inp, err := s.Output().ToInput()
if err != nil {
return def, err
}
proto := &pb.Op{Inputs: []*pb.Input{inp}}
dt, err := proto.Marshal()
if err != nil {
return def, err
}
def.Def = append(def.Def, dt)
return def, nil
}
func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, md []MetadataOpt) (*Definition, error) {
if _, ok := vertexCache[v]; ok {
return def, nil
}
for _, inp := range v.Inputs() {
var err error
def, err = marshal(inp.Vertex(), def, cache, vertexCache, md)
if err != nil {
return def, err
}
}
dgst, dt, opMeta, err := v.Marshal()
if err != nil {
return def, err
}
vertexCache[v] = struct{}{}
if opMeta != nil {
m := mergeMetadata(def.Metadata[dgst], *opMeta)
for _, f := range md {
f.SetMetadataOption(&m)
}
def.Metadata[dgst] = m
}
if _, ok := cache[dgst]; ok {
return def, nil
}
def.Def = append(def.Def, dt)
cache[dgst] = struct{}{}
return def, nil
}
func (s State) Validate() error {
return s.Output().Vertex().Validate()
}
func (s State) Output() Output {
return s.out
}
func (s State) WithOutput(o Output) State {
return State{
out: o,
ctx: s.ctx,
}
}
func (s State) Run(ro ...RunOption) ExecState {
ei := &ExecInfo{State: s}
for _, o := range ro {
o.SetRunOption(ei)
}
meta := Meta{
Args: getArgs(ei.State),
Cwd: getDir(ei.State),
Env: getEnv(ei.State),
User: getUser(ei.State),
ProxyEnv: ei.ProxyEnv,
}
exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Metadata())
for _, m := range ei.Mounts {
exec.AddMount(m.Target, m.Source, m.Opts...)
}
return ExecState{
State: s.WithOutput(exec.Output()),
exec: exec,
}
}
func (s State) AddEnv(key, value string) State {
return s.AddEnvf(key, value)
}
func (s State) AddEnvf(key, value string, v ...interface{}) State {
return addEnvf(key, value, v...)(s)
}
func (s State) Dir(str string) State {
return s.Dirf(str)
}
func (s State) Dirf(str string, v ...interface{}) State {
return dirf(str, v...)(s)
}
func (s State) GetEnv(key string) (string, bool) {
return getEnv(s).Get(key)
}
func (s State) GetDir() string {
return getDir(s)
}
func (s State) GetArgs() []string {
return getArgs(s)
}
func (s State) Reset(s2 State) State {
return reset(s2)(s)
}
func (s State) User(v string) State {
return user(v)(s)
}
func (s State) With(so ...StateOption) State {
for _, o := range so {
s = o(s)
}
return s
}
type output struct {
vertex Vertex
getIndex func() (pb.OutputIndex, error)
}
func (o *output) ToInput() (*pb.Input, error) {
var index pb.OutputIndex
if o.getIndex != nil {
var err error
index, err = o.getIndex()
if err != nil {
return nil, err
}
}
dgst, _, _, err := o.vertex.Marshal()
if err != nil {
return nil, err
}
return &pb.Input{Digest: dgst, Index: index}, nil
}
func (o *output) Vertex() Vertex {
return o.vertex
}
type MetadataOpt interface {
SetMetadataOption(*OpMetadata)
RunOption
LocalOption
HTTPOption
ImageOption
GitOption
}
type metadataOptFunc func(m *OpMetadata)
func (fn metadataOptFunc) SetMetadataOption(m *OpMetadata) {
fn(m)
}
func (fn metadataOptFunc) SetRunOption(ei *ExecInfo) {
ei.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetLocalOption(li *LocalInfo) {
li.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetHTTPOption(hi *HTTPInfo) {
hi.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetImageOption(ii *ImageInfo) {
ii.ApplyMetadata(fn)
}
func (fn metadataOptFunc) SetGitOption(gi *GitInfo) {
gi.ApplyMetadata(fn)
}
func mergeMetadata(m1, m2 OpMetadata) OpMetadata {
if m2.IgnoreCache {
m1.IgnoreCache = true
}
if len(m2.Description) > 0 {
if m1.Description == nil {
m1.Description = make(map[string]string)
}
for k, v := range m2.Description {
m1.Description[k] = v
}
}
if m2.ExportCache != nil {
m1.ExportCache = m2.ExportCache
}
return m1
}
var IgnoreCache = metadataOptFunc(func(md *OpMetadata) {
md.IgnoreCache = true
})
func WithDescription(m map[string]string) MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
md.Description = m
})
}
// WithExportCache forces results for this vertex to be exported with the cache
func WithExportCache() MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
md.ExportCache = &pb.ExportCache{Value: true}
})
}
// WithoutExportCache sets results for this vertex to be not exported with
// the cache
func WithoutExportCache() MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
// ExportCache with value false means to disable exporting
md.ExportCache = &pb.ExportCache{Value: false}
})
}
// WithoutDefaultExportCache resets the cache export for the vertex to use
// the default defined by the build configuration.
func WithoutDefaultExportCache() MetadataOpt {
return metadataOptFunc(func(md *OpMetadata) {
// nil means no vertex based config has been set
md.ExportCache = nil
})
}
type opMetaWrapper struct {
OpMetadata
}
func (mw *opMetaWrapper) ApplyMetadata(f func(m *OpMetadata)) {
f(&mw.OpMetadata)
}
func (mw *opMetaWrapper) Metadata() OpMetadata {
return mw.OpMetadata
}

50
vendor/github.com/moby/buildkit/client/prune.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package client
import (
"context"
"io"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
)
func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error {
info := &PruneInfo{}
for _, o := range opts {
o(info)
}
req := &controlapi.PruneRequest{}
cl, err := c.controlClient().Prune(ctx, req)
if err != nil {
return errors.Wrap(err, "failed to call prune")
}
for {
d, err := cl.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if ch != nil {
ch <- UsageInfo{
ID: d.ID,
Mutable: d.Mutable,
InUse: d.InUse,
Size: d.Size_,
Parent: d.Parent,
CreatedAt: d.CreatedAt,
Description: d.Description,
UsageCount: int(d.UsageCount),
LastUsedAt: d.LastUsedAt,
}
}
}
}
type PruneOption func(*PruneInfo)
type PruneInfo struct {
}

251
vendor/github.com/moby/buildkit/client/solve.go generated vendored Normal file
View File

@ -0,0 +1,251 @@
package client
import (
"context"
"io"
"os"
"path/filepath"
"strings"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/solver/pb"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type SolveOpt struct {
Exporter string
ExporterAttrs map[string]string
ExporterOutput io.WriteCloser // for ExporterOCI and ExporterDocker
ExporterOutputDir string // for ExporterLocal
LocalDirs map[string]string
SharedKey string
Frontend string
FrontendAttrs map[string]string
ExportCache string
ExportCacheAttrs map[string]string
ImportCache []string
Session []session.Attachable
}
// Solve calls Solve on the controller.
// def must be nil if (and only if) opt.Frontend is set.
func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) {
defer func() {
if statusChan != nil {
close(statusChan)
}
}()
if opt.Frontend == "" && def == nil {
return nil, errors.New("invalid empty definition")
}
if opt.Frontend != "" && def != nil {
return nil, errors.Errorf("invalid definition for frontend %s", opt.Frontend)
}
syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs)
if err != nil {
return nil, err
}
ref := identity.NewID()
eg, ctx := errgroup.WithContext(ctx)
statusContext, cancelStatus := context.WithCancel(context.Background())
defer cancelStatus()
if span := opentracing.SpanFromContext(ctx); span != nil {
statusContext = opentracing.ContextWithSpan(statusContext, span)
}
s, err := session.NewSession(statusContext, defaultSessionName(), opt.SharedKey)
if err != nil {
return nil, errors.Wrap(err, "failed to create session")
}
if len(syncedDirs) > 0 {
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
}
for _, a := range opt.Session {
s.Allow(a)
}
switch opt.Exporter {
case ExporterLocal:
if opt.ExporterOutput != nil {
return nil, errors.New("output file writer is not supported by local exporter")
}
if opt.ExporterOutputDir == "" {
return nil, errors.New("output directory is required for local exporter")
}
s.Allow(filesync.NewFSSyncTargetDir(opt.ExporterOutputDir))
case ExporterOCI, ExporterDocker:
if opt.ExporterOutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
}
if opt.ExporterOutput == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", opt.Exporter)
}
s.Allow(filesync.NewFSSyncTarget(opt.ExporterOutput))
default:
if opt.ExporterOutput != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", opt.Exporter)
}
if opt.ExporterOutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
}
}
eg.Go(func() error {
return s.Run(statusContext, grpchijack.Dialer(c.controlClient()))
})
var res *SolveResponse
eg.Go(func() error {
defer func() { // make sure the Status ends cleanly on build errors
go func() {
<-time.After(3 * time.Second)
cancelStatus()
}()
logrus.Debugf("stopping session")
s.Close()
}()
var pbd *pb.Definition
if def != nil {
pbd = def.ToPB()
}
resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
Ref: ref,
Definition: pbd,
Exporter: opt.Exporter,
ExporterAttrs: opt.ExporterAttrs,
Session: s.ID(),
Frontend: opt.Frontend,
FrontendAttrs: opt.FrontendAttrs,
Cache: controlapi.CacheOptions{
ExportRef: opt.ExportCache,
ImportRefs: opt.ImportCache,
ExportAttrs: opt.ExportCacheAttrs,
},
})
if err != nil {
return errors.Wrap(err, "failed to solve")
}
res = &SolveResponse{
ExporterResponse: resp.ExporterResponse,
}
return nil
})
eg.Go(func() error {
stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{
Ref: ref,
})
if err != nil {
return errors.Wrap(err, "failed to get status")
}
for {
resp, err := stream.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return errors.Wrap(err, "failed to receive status")
}
s := SolveStatus{}
for _, v := range resp.Vertexes {
s.Vertexes = append(s.Vertexes, &Vertex{
Digest: v.Digest,
Inputs: v.Inputs,
Name: v.Name,
Started: v.Started,
Completed: v.Completed,
Error: v.Error,
Cached: v.Cached,
})
}
for _, v := range resp.Statuses {
s.Statuses = append(s.Statuses, &VertexStatus{
ID: v.ID,
Vertex: v.Vertex,
Name: v.Name,
Total: v.Total,
Current: v.Current,
Timestamp: v.Timestamp,
Started: v.Started,
Completed: v.Completed,
})
}
for _, v := range resp.Logs {
s.Logs = append(s.Logs, &VertexLog{
Vertex: v.Vertex,
Stream: int(v.Stream),
Data: v.Msg,
Timestamp: v.Timestamp,
})
}
if statusChan != nil {
statusChan <- &s
}
}
})
if err := eg.Wait(); err != nil {
return nil, err
}
return res, nil
}
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
for _, d := range localDirs {
fi, err := os.Stat(d)
if err != nil {
return nil, errors.Wrapf(err, "could not find %s", d)
}
if !fi.IsDir() {
return nil, errors.Errorf("%s not a directory", d)
}
}
dirs := make([]filesync.SyncedDir, 0, len(localDirs))
if def == nil {
for name, d := range localDirs {
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d})
}
} else {
for _, dt := range def.Def {
var op pb.Op
if err := (&op).Unmarshal(dt); err != nil {
return nil, errors.Wrap(err, "failed to parse llb proto op")
}
if src := op.GetSource(); src != nil {
if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property
name := strings.TrimPrefix(src.Identifier, "local://")
d, ok := localDirs[name]
if !ok {
return nil, errors.Errorf("local directory %s not enabled", name)
}
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d}) // TODO: excludes
}
}
}
}
return dirs, nil
}
func defaultSessionName() string {
wd, err := os.Getwd()
if err != nil {
return "unknown"
}
return filepath.Base(wd)
}

49
vendor/github.com/moby/buildkit/client/workers.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package client
import (
"context"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
)
type WorkerInfo struct {
ID string
Labels map[string]string
}
func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) {
info := &ListWorkersInfo{}
for _, o := range opts {
o(info)
}
req := &controlapi.ListWorkersRequest{Filter: info.Filter}
resp, err := c.controlClient().ListWorkers(ctx, req)
if err != nil {
return nil, errors.Wrap(err, "failed to list workers")
}
var wi []*WorkerInfo
for _, w := range resp.Record {
wi = append(wi, &WorkerInfo{
ID: w.ID,
Labels: w.Labels,
})
}
return wi, nil
}
type ListWorkersOption func(*ListWorkersInfo)
type ListWorkersInfo struct {
Filter []string
}
func WithWorkerFilter(f []string) ListWorkersOption {
return func(wi *ListWorkersInfo) {
wi.Filter = f
}
}

View File

@ -0,0 +1,156 @@
package grpchijack
import (
"context"
"io"
"net"
"strings"
"sync"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/session"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
func Dialer(api controlapi.ControlClient) session.Dialer {
return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
meta = lowerHeaders(meta)
md := metadata.MD(meta)
ctx = metadata.NewOutgoingContext(ctx, md)
stream, err := api.Session(ctx)
if err != nil {
return nil, err
}
c, _ := streamToConn(stream)
return c, nil
}
}
func streamToConn(stream grpc.Stream) (net.Conn, <-chan struct{}) {
closeCh := make(chan struct{})
c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh}
return c, closeCh
}
type conn struct {
stream grpc.Stream
buf []byte
lastBuf []byte
closedOnce sync.Once
readMu sync.Mutex
err error
closeCh chan struct{}
}
func (c *conn) Read(b []byte) (n int, err error) {
c.readMu.Lock()
defer c.readMu.Unlock()
if c.lastBuf != nil {
n := copy(b, c.lastBuf)
c.lastBuf = c.lastBuf[n:]
if len(c.lastBuf) == 0 {
c.lastBuf = nil
}
return n, nil
}
m := new(controlapi.BytesMessage)
m.Data = c.buf
if err := c.stream.RecvMsg(m); err != nil {
return 0, err
}
c.buf = m.Data[:cap(m.Data)]
n = copy(b, m.Data)
if n < len(m.Data) {
c.lastBuf = m.Data[n:]
}
return n, nil
}
func (c *conn) Write(b []byte) (int, error) {
m := &controlapi.BytesMessage{Data: b}
if err := c.stream.SendMsg(m); err != nil {
return 0, err
}
return len(b), nil
}
func (c *conn) Close() (err error) {
c.closedOnce.Do(func() {
defer func() {
close(c.closeCh)
}()
if cs, ok := c.stream.(grpc.ClientStream); ok {
err = cs.CloseSend()
if err != nil {
return
}
}
c.readMu.Lock()
for {
m := new(controlapi.BytesMessage)
m.Data = c.buf
err = c.stream.RecvMsg(m)
if err != nil {
if err != io.EOF {
return
}
err = nil
break
}
c.buf = m.Data[:cap(m.Data)]
c.lastBuf = append(c.lastBuf, c.buf...)
}
c.readMu.Unlock()
})
return nil
}
func (c *conn) LocalAddr() net.Addr {
return dummyAddr{}
}
func (c *conn) RemoteAddr() net.Addr {
return dummyAddr{}
}
func (c *conn) SetDeadline(t time.Time) error {
return nil
}
func (c *conn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *conn) SetWriteDeadline(t time.Time) error {
return nil
}
type dummyAddr struct {
}
func (d dummyAddr) Network() string {
return "tcp"
}
func (d dummyAddr) String() string {
return "localhost"
}
func lowerHeaders(in map[string][]string) map[string][]string {
out := map[string][]string{}
for k := range in {
out[strings.ToLower(k)] = in[k]
}
return out
}

View File

@ -0,0 +1,14 @@
package grpchijack
import (
"net"
controlapi "github.com/moby/buildkit/api/services/control"
"google.golang.org/grpc/metadata"
)
func Hijack(stream controlapi.Control_SessionServer) (net.Conn, <-chan struct{}, map[string][]string) {
md, _ := metadata.FromIncomingContext(stream.Context())
c, closeCh := streamToConn(stream)
return c, closeCh, md
}

15
vendor/github.com/moby/buildkit/solver/pb/attr.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package pb
const AttrKeepGitDir = "git.keepgitdir"
const AttrFullRemoteURL = "git.fullurl"
const AttrLocalSessionID = "local.session"
const AttrIncludePatterns = "local.includepattern"
const AttrExcludePatterns = "local.excludepatterns"
const AttrSharedKeyHint = "local.sharedkeyhint"
const AttrLLBDefinitionFilename = "llbbuild.filename"
const AttrHTTPChecksum = "http.checksum"
const AttrHTTPFilename = "http.filename"
const AttrHTTPPerm = "http.perm"
const AttrHTTPUID = "http.uid"
const AttrHTTPGID = "http.gid"

12
vendor/github.com/moby/buildkit/solver/pb/const.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
package pb
type InputIndex int64
type OutputIndex int64
const RootMount = "/"
const SkipOutput OutputIndex = -1
const Empty InputIndex = -1
const LLBBuilder InputIndex = -1
const LLBDefinitionInput = "buildkit.llb.definition"
const LLBDefaultDefinitionFile = LLBDefinitionInput

View File

@ -0,0 +1,3 @@
package pb
//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto

4490
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

136
vendor/github.com/moby/buildkit/solver/pb/ops.proto generated vendored Normal file
View File

@ -0,0 +1,136 @@
syntax = "proto3";
// Package pb provides the protobuf definition of LLB: low-level builder instruction.
// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph.
package pb;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
// Op represents a vertex of the LLB DAG.
message Op {
// inputs is a set of input edges.
repeated Input inputs = 1;
oneof op {
ExecOp exec = 2;
SourceOp source = 3;
CopyOp copy = 4;
BuildOp build = 5;
}
}
// Input represents an input edge for an Op.
message Input {
// digest of the marshaled input Op
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// output index of the input Op
int64 index = 2 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false];
}
// ExecOp executes a command in a container.
message ExecOp {
Meta meta = 1;
repeated Mount mounts = 2;
}
// Meta is a set of arguments for ExecOp.
// Meta is unrelated to LLB metadata.
// FIXME: rename (ExecContext? ExecArgs?)
message Meta {
repeated string args = 1;
repeated string env = 2;
string cwd = 3;
string user = 4;
ProxyEnv proxy_env = 5;
}
// Mount specifies how to mount an input Op as a filesystem.
message Mount {
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
string selector = 2;
string dest = 3;
int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false];
bool readonly = 5;
MountType mountType = 6;
CacheOpt cacheOpt = 20;
}
enum MountType {
BIND = 0;
SECRET = 1;
SSH = 2;
CACHE = 3;
}
message CacheOpt {
string ID = 1;
}
// CopyOp copies files across Ops.
message CopyOp {
repeated CopySource src = 1;
string dest = 2;
}
// CopySource specifies a source for CopyOp.
message CopySource {
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
string selector = 2;
}
// SourceOp specifies a source such as build contexts and images.
message SourceOp {
// TODO: use source type or any type instead of URL protocol.
// identifier e.g. local://, docker-image://, git://, https://...
string identifier = 1;
// attrs are defined in attr.go
map<string, string> attrs = 2;
}
// BuildOp is used for nested build invocation.
message BuildOp {
int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
map<string, BuildInput> inputs = 2;
Definition def = 3;
map<string, string> attrs = 4;
// outputs
}
// BuildInput is used for BuildOp.
message BuildInput {
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
}
// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time.
message OpMetadata {
// ignore_cache specifies to ignore the cache for this Op.
bool ignore_cache = 1;
// Description can be used for keeping any text fields that builder doesn't parse
map<string, string> description = 2;
WorkerConstraint worker_constraint = 3;
ExportCache export_cache = 4;
}
message ExportCache {
bool Value = 1;
}
message ProxyEnv {
string http_proxy = 1;
string https_proxy = 2;
string ftp_proxy = 3;
string no_proxy = 4;
}
// WorkerConstraint is experimental and likely to be changed.
message WorkerConstraint {
repeated string filter = 1; // containerd-style filter
}
// Definition is the LLB definition structure with per-vertex metadata entries
message Definition {
// def is a list of marshaled Op messages
repeated bytes def = 1;
// metadata contains metadata for the each of the Op messages.
// A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future.
map<string, OpMetadata> metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}

View File

@ -0,0 +1,55 @@
// +build !windows
package appdefaults
import (
"os"
"path/filepath"
"strings"
)
const (
Address = "unix:///run/buildkit/buildkitd.sock"
Root = "/var/lib/buildkit"
)
// UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock
func UserAddress() string {
// pam_systemd sets XDG_RUNTIME_DIR but not other dirs.
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
if xdgRuntimeDir != "" {
dirs := strings.Split(xdgRuntimeDir, ":")
return "unix://" + filepath.Join(dirs[0], "buildkit", "buildkitd.sock")
}
return Address
}
// EnsureUserAddressDir sets sticky bit on XDG_RUNTIME_DIR if XDG_RUNTIME_DIR is set.
// See https://github.com/opencontainers/runc/issues/1694
func EnsureUserAddressDir() error {
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
if xdgRuntimeDir != "" {
dirs := strings.Split(xdgRuntimeDir, ":")
dir := filepath.Join(dirs[0], "buildkit")
if err := os.MkdirAll(dir, 0700); err != nil {
return err
}
return os.Chmod(dir, 0700|os.ModeSticky)
}
return nil
}
// UserRoot typically returns /home/$USER/.local/share/buildkit
func UserRoot() string {
// pam_systemd sets XDG_RUNTIME_DIR but not other dirs.
xdgDataHome := os.Getenv("XDG_DATA_HOME")
if xdgDataHome != "" {
dirs := strings.Split(xdgDataHome, ":")
return filepath.Join(dirs[0], "buildkit")
}
home := os.Getenv("HOME")
if home != "" {
return filepath.Join(home, ".local", "share", "buildkit")
}
return Root
}

View File

@ -0,0 +1,18 @@
package appdefaults
const (
Address = "npipe:////./pipe/buildkitd"
Root = ".buildstate"
)
func UserAddress() string {
return Address
}
func EnsureUserAddressDir() error {
return nil
}
func UserRoot() string {
return Root
}

View File

@ -0,0 +1,310 @@
package progressui
import (
"context"
"fmt"
"io"
"strings"
"time"
"github.com/containerd/console"
"github.com/moby/buildkit/client"
"github.com/morikuni/aec"
digest "github.com/opencontainers/go-digest"
"github.com/tonistiigi/units"
"golang.org/x/time/rate"
)
func DisplaySolveStatus(ctx context.Context, c console.Console, ch chan *client.SolveStatus) error {
disp := &display{c: c}
t := newTrace()
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
displayLimiter := rate.NewLimiter(rate.Every(70*time.Millisecond), 1)
var done bool
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
case ss, ok := <-ch:
if ok {
t.update(ss)
} else {
done = true
}
}
if done {
disp.print(t.displayInfo(), true)
t.printErrorLogs(c)
return nil
} else if displayLimiter.Allow() {
disp.print(t.displayInfo(), false)
}
}
}
type displayInfo struct {
startTime time.Time
jobs []job
countTotal int
countCompleted int
}
type job struct {
startTime *time.Time
completedTime *time.Time
name string
status string
hasError bool
isCanceled bool
}
type trace struct {
localTimeDiff time.Duration
vertexes []*vertex
byDigest map[digest.Digest]*vertex
}
type vertex struct {
*client.Vertex
statuses []*status
byID map[string]*status
logs []*client.VertexLog
indent string
}
type status struct {
*client.VertexStatus
}
func newTrace() *trace {
return &trace{
byDigest: make(map[digest.Digest]*vertex),
}
}
func (t *trace) update(s *client.SolveStatus) {
for _, v := range s.Vertexes {
prev, ok := t.byDigest[v.Digest]
if !ok {
t.byDigest[v.Digest] = &vertex{
byID: make(map[string]*status),
}
}
if v.Started != nil && (prev == nil || prev.Started == nil) {
if t.localTimeDiff == 0 {
t.localTimeDiff = time.Since(*v.Started)
}
t.vertexes = append(t.vertexes, t.byDigest[v.Digest])
}
t.byDigest[v.Digest].Vertex = v
}
for _, s := range s.Statuses {
v, ok := t.byDigest[s.Vertex]
if !ok {
continue // shouldn't happen
}
prev, ok := v.byID[s.ID]
if !ok {
v.byID[s.ID] = &status{VertexStatus: s}
}
if s.Started != nil && (prev == nil || prev.Started == nil) {
v.statuses = append(v.statuses, v.byID[s.ID])
}
v.byID[s.ID].VertexStatus = s
}
for _, l := range s.Logs {
v, ok := t.byDigest[l.Vertex]
if !ok {
continue // shouldn't happen
}
v.logs = append(v.logs, l)
}
}
func (t *trace) printErrorLogs(f io.Writer) {
for _, v := range t.vertexes {
if v.Error != "" && !strings.HasSuffix(v.Error, context.Canceled.Error()) {
fmt.Fprintln(f, "------")
fmt.Fprintf(f, " > %s:\n", v.Name)
for _, l := range v.logs {
switch l.Stream {
case 1:
f.Write(l.Data)
case 2:
f.Write(l.Data)
}
}
fmt.Fprintln(f, "------")
}
}
}
func (t *trace) displayInfo() (d displayInfo) {
d.startTime = time.Now()
if t.localTimeDiff != 0 {
d.startTime = (*t.vertexes[0].Started).Add(t.localTimeDiff)
}
d.countTotal = len(t.byDigest)
for _, v := range t.byDigest {
if v.Completed != nil {
d.countCompleted++
}
}
for _, v := range t.vertexes {
j := job{
startTime: addTime(v.Started, t.localTimeDiff),
completedTime: addTime(v.Completed, t.localTimeDiff),
name: strings.Replace(v.Name, "\t", " ", -1),
}
if v.Error != "" {
if strings.HasSuffix(v.Error, context.Canceled.Error()) {
j.isCanceled = true
j.name = "CANCELED " + j.name
} else {
j.hasError = true
j.name = "ERROR " + j.name
}
}
if v.Cached {
j.name = "CACHED " + j.name
}
j.name = v.indent + j.name
d.jobs = append(d.jobs, j)
for _, s := range v.statuses {
j := job{
startTime: addTime(s.Started, t.localTimeDiff),
completedTime: addTime(s.Completed, t.localTimeDiff),
name: v.indent + "=> " + s.ID,
}
if s.Total != 0 {
j.status = fmt.Sprintf("%.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total))
} else if s.Current != 0 {
j.status = fmt.Sprintf("%.2f", units.Bytes(s.Current))
}
d.jobs = append(d.jobs, j)
}
}
return d
}
func addTime(tm *time.Time, d time.Duration) *time.Time {
if tm == nil {
return nil
}
t := (*tm).Add(d)
return &t
}
type display struct {
c console.Console
lineCount int
repeated bool
}
func (disp *display) print(d displayInfo, all bool) {
// this output is inspired by Buck
width := 80
height := 10
size, err := disp.c.Size()
if err == nil && size.Width > 0 && size.Height > 0 {
width = int(size.Width)
height = int(size.Height)
}
if !all {
d.jobs = wrapHeight(d.jobs, height-2)
}
b := aec.EmptyBuilder
for i := 0; i <= disp.lineCount; i++ {
b = b.Up(1)
}
if !disp.repeated {
b = b.Down(1)
}
disp.repeated = true
fmt.Fprint(disp.c, b.Column(0).ANSI)
statusStr := ""
if d.countCompleted > 0 && d.countCompleted == d.countTotal && all {
statusStr = "FINISHED"
}
fmt.Fprint(disp.c, aec.Hide)
defer fmt.Fprint(disp.c, aec.Show)
out := fmt.Sprintf("[+] Building %.1fs (%d/%d) %s", time.Since(d.startTime).Seconds(), d.countCompleted, d.countTotal, statusStr)
out = align(out, "", width)
fmt.Fprintln(disp.c, out)
lineCount := 0
for _, j := range d.jobs {
endTime := time.Now()
if j.completedTime != nil {
endTime = *j.completedTime
}
if j.startTime == nil {
continue
}
dt := endTime.Sub(*j.startTime).Seconds()
if dt < 0.05 {
dt = 0
}
pfx := " => "
timer := fmt.Sprintf(" %3.1fs\n", dt)
status := j.status
showStatus := false
left := width - len(pfx) - len(timer) - 1
if status != "" {
if left+len(status) > 20 {
showStatus = true
left -= len(status) + 1
}
}
if left < 12 { // too small screen to show progress
continue
}
if len(j.name) > left {
j.name = j.name[:left]
}
out := pfx + j.name
if showStatus {
out += " " + status
}
out = align(out, timer, width)
if j.completedTime != nil {
color := aec.BlueF
if j.isCanceled {
color = aec.YellowF
} else if j.hasError {
color = aec.RedF
}
out = aec.Apply(out, color)
}
fmt.Fprint(disp.c, out)
lineCount++
}
disp.lineCount = lineCount
}
func align(l, r string, w int) string {
return fmt.Sprintf("%-[2]*[1]s %[3]s", l, w-len(r)-1, r)
}
func wrapHeight(j []job, limit int) []job {
if len(j) > limit {
j = j[len(j)-limit:]
}
return j
}

View File

@ -0,0 +1,14 @@
// +build !windows
package system
// DefaultPathEnv is unix style list of directories to search for
// executables. Each directory is separated from the next by a colon
// ':' character .
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
// is the system drive. This is a no-op on Linux.
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
return path, nil
}

View File

@ -0,0 +1,37 @@
// +build windows
package system
import (
"fmt"
"path/filepath"
"strings"
)
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
// the container. Docker has no context of what the default path should be.
const DefaultPathEnv = ""
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
// This is used, for example, when validating a user provided path in docker cp.
// If a drive letter is supplied, it must be the system drive. The drive letter
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
// need the path in this syntax so that it can ultimately be contatenated with
// a Windows long-path which doesn't support drive-letters. Examples:
// C: --> Fail
// C:\ --> \
// a --> a
// /a --> \a
// d:\ --> Fail
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
return "", fmt.Errorf("No relative path specified in %q", path)
}
if !filepath.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}

View File

@ -0,0 +1,29 @@
// +build linux
package system
import (
"sync"
"golang.org/x/sys/unix"
)
var seccompSupported bool
var seccompOnce sync.Once
func SeccompSupported() bool {
seccompOnce.Do(func() {
seccompSupported = getSeccompSupported()
})
return seccompSupported
}
func getSeccompSupported() bool {
if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
// Make sure the kernel has CONFIG_SECCOMP_FILTER.
if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
return true
}
}
return false
}

View File

@ -0,0 +1,7 @@
// +build !linux
package system
func SeccompSupported() bool {
return false
}

21
vendor/github.com/tonistiigi/units/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Tõnis Tiigi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

117
vendor/github.com/tonistiigi/units/bytes.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
/*
Simple byte size formatting.
This package implements types that can be used in stdlib formatting functions
like `fmt.Printf` to control the output of the expected printed string.
Floating point flags %f and %g print the value in using the correct unit
suffix. Decimal units are default, # switches to binary units. If a value is
best represented as full bytes, integer bytes are printed instead.
Examples:
fmt.Printf("%.2f", 123 * B) => "123B"
fmt.Printf("%.2f", 1234 * B) => "1.23kB"
fmt.Printf("%g", 1200 * B) => "1.2kB"
fmt.Printf("%#g", 1024 * B) => "1KiB"
Integer flag %d always prints the value in bytes. # flag adds an unit prefix.
Examples:
fmt.Printf("%d", 1234 * B) => "1234"
fmt.Printf("%#d", 1234 * B) => "1234B"
%v is equal to %g
*/
package units
import (
"fmt"
"io"
"math"
"math/big"
)
type Bytes int64
const (
B Bytes = 1 << (10 * iota)
KiB
MiB
GiB
TiB
PiB
EiB
KB = 1e3 * B
MB = 1e3 * KB
GB = 1e3 * MB
TB = 1e3 * GB
PB = 1e3 * TB
EB = 1e3 * PB
)
var units = map[bool][]string{
false: []string{
"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB",
},
true: []string{
"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
},
}
func (b Bytes) Format(f fmt.State, c rune) {
switch c {
case 'f', 'g':
fv, unit, ok := b.floatValue(f.Flag('#'))
if !ok {
b.formatInt(f, 'd', true)
return
}
big.NewFloat(fv).Format(f, c)
io.WriteString(f, unit)
case 'd':
b.formatInt(f, c, f.Flag('#'))
default:
if f.Flag('#') {
fmt.Fprintf(f, "bytes(%d)", int64(b))
} else {
fmt.Fprintf(f, "%g", b)
}
}
}
func (b Bytes) formatInt(f fmt.State, c rune, withUnit bool) {
big.NewInt(int64(b)).Format(f, c)
if withUnit {
io.WriteString(f, "B")
}
}
func (b Bytes) floatValue(binary bool) (float64, string, bool) {
i := 0
var baseUnit Bytes = 1
if b < 0 {
baseUnit *= -1
}
for {
next := baseUnit
if binary {
next *= 1 << 10
} else {
next *= 1e3
}
if (baseUnit > 0 && b >= next) || (baseUnit < 0 && b <= next) {
i++
baseUnit = next
continue
}
if i == 0 {
return 0, "", false
}
return float64(b) / math.Abs(float64(baseUnit)), units[binary][i], true
}
}

29
vendor/github.com/tonistiigi/units/readme.md generated vendored Normal file
View File

@ -0,0 +1,29 @@
#### Simple byte size formatting.
This package implements types that can be used in stdlib formatting functions
like `fmt.Printf` to control the output of the expected printed string.
Floating point flags `%f` and %g print the value in using the correct unit
suffix. Decimal units are default, `#` switches to binary units. If a value is
best represented as full bytes, integer bytes are printed instead.
##### Examples:
```
fmt.Printf("%.2f", 123 * B) => "123B"
fmt.Printf("%.2f", 1234 * B) => "1.23kB"
fmt.Printf("%g", 1200 * B) => "1.2kB"
fmt.Printf("%#g", 1024 * B) => "1KiB"
```
Integer flag `%d` always prints the value in bytes. `#` flag adds an unit prefix.
##### Examples:
```
fmt.Printf("%d", 1234 * B) => "1234"
fmt.Printf("%#d", 1234 * B) => "1234B"
```
`%v` is equal to `%g`