mirror of https://github.com/docker/cli.git
Merge pull request #1111 from tiborvass/experimental-buildkit
Support for experimental BuildKit
This commit is contained in:
commit
2daec78609
|
@ -16,6 +16,15 @@ const (
|
||||||
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}"
|
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}"
|
||||||
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
|
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
|
||||||
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
|
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
|
||||||
|
defaultBuildCacheVerboseFormat = `
|
||||||
|
ID: {{.ID}}
|
||||||
|
Description: {{.Description}}
|
||||||
|
Mutable: {{.Mutable}}
|
||||||
|
Size: {{.Size}}
|
||||||
|
CreatedAt: {{.CreatedAt}}
|
||||||
|
LastUsedAt: {{.LastUsedAt}}
|
||||||
|
UsageCount: {{.UsageCount}}
|
||||||
|
`
|
||||||
|
|
||||||
typeHeader = "TYPE"
|
typeHeader = "TYPE"
|
||||||
totalHeader = "TOTAL"
|
totalHeader = "TOTAL"
|
||||||
|
@ -34,6 +43,7 @@ type DiskUsageContext struct {
|
||||||
Images []*types.ImageSummary
|
Images []*types.ImageSummary
|
||||||
Containers []*types.Container
|
Containers []*types.Container
|
||||||
Volumes []*types.Volume
|
Volumes []*types.Volume
|
||||||
|
BuildCache []*types.BuildCache
|
||||||
BuilderSize int64
|
BuilderSize int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,6 +110,7 @@ func (ctx *DiskUsageContext) Write() (err error) {
|
||||||
|
|
||||||
err = ctx.contextFormat(tmpl, &diskUsageBuilderContext{
|
err = ctx.contextFormat(tmpl, &diskUsageBuilderContext{
|
||||||
builderSize: ctx.BuilderSize,
|
builderSize: ctx.BuilderSize,
|
||||||
|
buildCache: ctx.BuildCache,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -184,6 +195,13 @@ func (ctx *DiskUsageContext) verboseWrite() error {
|
||||||
|
|
||||||
// And build cache
|
// And build cache
|
||||||
fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
||||||
|
|
||||||
|
t := template.Must(template.New("buildcache").Parse(defaultBuildCacheVerboseFormat))
|
||||||
|
|
||||||
|
for _, v := range ctx.BuildCache {
|
||||||
|
t.Execute(ctx.Output, *v)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,6 +384,7 @@ func (c *diskUsageVolumesContext) Reclaimable() string {
|
||||||
type diskUsageBuilderContext struct {
|
type diskUsageBuilderContext struct {
|
||||||
HeaderContext
|
HeaderContext
|
||||||
builderSize int64
|
builderSize int64
|
||||||
|
buildCache []*types.BuildCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) {
|
func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) {
|
||||||
|
@ -377,11 +396,17 @@ func (c *diskUsageBuilderContext) Type() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *diskUsageBuilderContext) TotalCount() string {
|
func (c *diskUsageBuilderContext) TotalCount() string {
|
||||||
return ""
|
return fmt.Sprintf("%d", len(c.buildCache))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *diskUsageBuilderContext) Active() string {
|
func (c *diskUsageBuilderContext) Active() string {
|
||||||
return ""
|
numActive := 0
|
||||||
|
for _, bc := range c.buildCache {
|
||||||
|
if bc.InUse {
|
||||||
|
numActive++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d", numActive)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *diskUsageBuilderContext) Size() string {
|
func (c *diskUsageBuilderContext) Size() string {
|
||||||
|
@ -389,5 +414,12 @@ func (c *diskUsageBuilderContext) Size() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *diskUsageBuilderContext) Reclaimable() string {
|
func (c *diskUsageBuilderContext) Reclaimable() string {
|
||||||
return c.Size()
|
var inUseBytes int64
|
||||||
|
for _, bc := range c.buildCache {
|
||||||
|
if bc.InUse {
|
||||||
|
inUseBytes += bc.Size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return units.HumanSize(float64(c.builderSize - inUseBytes))
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ func TestDiskUsageContextFormatWrite(t *testing.T) {
|
||||||
Images 0 0 0B 0B
|
Images 0 0 0B 0B
|
||||||
Containers 0 0 0B 0B
|
Containers 0 0 0B 0B
|
||||||
Local Volumes 0 0 0B 0B
|
Local Volumes 0 0 0B 0B
|
||||||
Build Cache 0B 0B
|
Build Cache 0 0 0B 0B
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -76,7 +76,7 @@ Build cache usage: 0B
|
||||||
Images 0 0 0B 0B
|
Images 0 0 0B 0B
|
||||||
Containers 0 0 0B 0B
|
Containers 0 0 0B 0B
|
||||||
Local Volumes 0 0 0B 0B
|
Local Volumes 0 0 0B 0B
|
||||||
Build Cache 0B 0B
|
Build Cache 0 0 0B 0B
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -2,4 +2,4 @@ TYPE ACTIVE
|
||||||
Images 0
|
Images 0
|
||||||
Containers 0
|
Containers 0
|
||||||
Local Volumes 0
|
Local Volumes 0
|
||||||
Build Cache
|
Build Cache 0
|
||||||
|
|
|
@ -17,8 +17,8 @@ size: 0B
|
||||||
reclaimable: 0B
|
reclaimable: 0B
|
||||||
|
|
||||||
type: Build Cache
|
type: Build Cache
|
||||||
total:
|
total: 0
|
||||||
active:
|
active: 0
|
||||||
size: 0B
|
size: 0B
|
||||||
reclaimable: 0B
|
reclaimable: 0B
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,8 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var errStdinConflict = errors.New("invalid argument: can't use stdin for both build context and dockerfile")
|
||||||
|
|
||||||
type buildOptions struct {
|
type buildOptions struct {
|
||||||
context string
|
context string
|
||||||
dockerfileName string
|
dockerfileName string
|
||||||
|
@ -55,6 +57,7 @@ type buildOptions struct {
|
||||||
isolation string
|
isolation string
|
||||||
quiet bool
|
quiet bool
|
||||||
noCache bool
|
noCache bool
|
||||||
|
console opts.NullableBool
|
||||||
rm bool
|
rm bool
|
||||||
forceRm bool
|
forceRm bool
|
||||||
pull bool
|
pull bool
|
||||||
|
@ -149,6 +152,9 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
flags.SetAnnotation("stream", "experimental", nil)
|
flags.SetAnnotation("stream", "experimental", nil)
|
||||||
flags.SetAnnotation("stream", "version", []string{"1.31"})
|
flags.SetAnnotation("stream", "version", []string{"1.31"})
|
||||||
|
|
||||||
|
flags.Var(&options.console, "console", "Show console output (with buildkit only) (true, false, auto)")
|
||||||
|
flags.SetAnnotation("console", "experimental", nil)
|
||||||
|
flags.SetAnnotation("console", "version", []string{"1.38"})
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,6 +176,10 @@ func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
|
||||||
|
|
||||||
// nolint: gocyclo
|
// nolint: gocyclo
|
||||||
func runBuild(dockerCli command.Cli, options buildOptions) error {
|
func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||||
|
if os.Getenv("DOCKER_BUILDKIT") != "" {
|
||||||
|
return runBuildBuildKit(dockerCli, options)
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
buildCtx io.ReadCloser
|
buildCtx io.ReadCloser
|
||||||
dockerfileCtx io.ReadCloser
|
dockerfileCtx io.ReadCloser
|
||||||
|
@ -188,7 +198,7 @@ func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||||
|
|
||||||
if options.dockerfileFromStdin() {
|
if options.dockerfileFromStdin() {
|
||||||
if options.contextFromStdin() {
|
if options.contextFromStdin() {
|
||||||
return errors.New("invalid argument: can't use stdin for both build context and dockerfile")
|
return errStdinConflict
|
||||||
}
|
}
|
||||||
dockerfileCtx = dockerCli.In()
|
dockerfileCtx = dockerCli.In()
|
||||||
}
|
}
|
||||||
|
@ -362,37 +372,11 @@ func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||||
|
|
||||||
configFile := dockerCli.ConfigFile()
|
configFile := dockerCli.ConfigFile()
|
||||||
authConfigs, _ := configFile.GetAllCredentials()
|
authConfigs, _ := configFile.GetAllCredentials()
|
||||||
buildOptions := types.ImageBuildOptions{
|
buildOptions := imageBuildOptions(dockerCli, options)
|
||||||
Memory: options.memory.Value(),
|
buildOptions.Version = types.BuilderV1
|
||||||
MemorySwap: options.memorySwap.Value(),
|
buildOptions.Dockerfile = relDockerfile
|
||||||
Tags: options.tags.GetAll(),
|
buildOptions.AuthConfigs = authConfigs
|
||||||
SuppressOutput: options.quiet,
|
buildOptions.RemoteContext = remote
|
||||||
NoCache: options.noCache,
|
|
||||||
Remove: options.rm,
|
|
||||||
ForceRemove: options.forceRm,
|
|
||||||
PullParent: options.pull,
|
|
||||||
Isolation: container.Isolation(options.isolation),
|
|
||||||
CPUSetCPUs: options.cpuSetCpus,
|
|
||||||
CPUSetMems: options.cpuSetMems,
|
|
||||||
CPUShares: options.cpuShares,
|
|
||||||
CPUQuota: options.cpuQuota,
|
|
||||||
CPUPeriod: options.cpuPeriod,
|
|
||||||
CgroupParent: options.cgroupParent,
|
|
||||||
Dockerfile: relDockerfile,
|
|
||||||
ShmSize: options.shmSize.Value(),
|
|
||||||
Ulimits: options.ulimits.GetList(),
|
|
||||||
BuildArgs: configFile.ParseProxyConfig(dockerCli.Client().DaemonHost(), options.buildArgs.GetAll()),
|
|
||||||
AuthConfigs: authConfigs,
|
|
||||||
Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()),
|
|
||||||
CacheFrom: options.cacheFrom,
|
|
||||||
SecurityOpt: options.securityOpt,
|
|
||||||
NetworkMode: options.networkMode,
|
|
||||||
Squash: options.squash,
|
|
||||||
ExtraHosts: options.extraHosts.GetAll(),
|
|
||||||
Target: options.target,
|
|
||||||
RemoteContext: remote,
|
|
||||||
Platform: options.platform,
|
|
||||||
}
|
|
||||||
|
|
||||||
if s != nil {
|
if s != nil {
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -416,9 +400,9 @@ func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
imageID := ""
|
imageID := ""
|
||||||
aux := func(m jsonmessage.JSONMessage) {
|
aux := func(msg jsonmessage.JSONMessage) {
|
||||||
var result types.BuildResult
|
var result types.BuildResult
|
||||||
if err := json.Unmarshal(*m.Aux, &result); err != nil {
|
if err := json.Unmarshal(*msg.Aux, &result); err != nil {
|
||||||
fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err)
|
fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err)
|
||||||
} else {
|
} else {
|
||||||
imageID = result.ID
|
imageID = result.ID
|
||||||
|
@ -602,3 +586,35 @@ func replaceDockerfileForContentTrust(ctx context.Context, inputTarStream io.Rea
|
||||||
|
|
||||||
return pipeReader
|
return pipeReader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func imageBuildOptions(dockerCli command.Cli, options buildOptions) types.ImageBuildOptions {
|
||||||
|
configFile := dockerCli.ConfigFile()
|
||||||
|
return types.ImageBuildOptions{
|
||||||
|
Memory: options.memory.Value(),
|
||||||
|
MemorySwap: options.memorySwap.Value(),
|
||||||
|
Tags: options.tags.GetAll(),
|
||||||
|
SuppressOutput: options.quiet,
|
||||||
|
NoCache: options.noCache,
|
||||||
|
Remove: options.rm,
|
||||||
|
ForceRemove: options.forceRm,
|
||||||
|
PullParent: options.pull,
|
||||||
|
Isolation: container.Isolation(options.isolation),
|
||||||
|
CPUSetCPUs: options.cpuSetCpus,
|
||||||
|
CPUSetMems: options.cpuSetMems,
|
||||||
|
CPUShares: options.cpuShares,
|
||||||
|
CPUQuota: options.cpuQuota,
|
||||||
|
CPUPeriod: options.cpuPeriod,
|
||||||
|
CgroupParent: options.cgroupParent,
|
||||||
|
ShmSize: options.shmSize.Value(),
|
||||||
|
Ulimits: options.ulimits.GetList(),
|
||||||
|
BuildArgs: configFile.ParseProxyConfig(dockerCli.Client().DaemonHost(), options.buildArgs.GetAll()),
|
||||||
|
Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()),
|
||||||
|
CacheFrom: options.cacheFrom,
|
||||||
|
SecurityOpt: options.securityOpt,
|
||||||
|
NetworkMode: options.networkMode,
|
||||||
|
Squash: options.squash,
|
||||||
|
ExtraHosts: options.extraHosts.GetAll(),
|
||||||
|
Target: options.target,
|
||||||
|
Platform: options.platform,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -81,59 +81,82 @@ func ValidateContextDirectory(srcPath string, excludes []string) error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetContextFromReader will read the contents of the given reader as either a
|
// DetectArchiveReader detects whether the input stream is an archive or a
|
||||||
// Dockerfile or tar archive. Returns a tar archive used as a context and a
|
// Dockerfile and returns a buffered version of input, safe to consume in lieu
|
||||||
// path to the Dockerfile inside the tar.
|
// of input. If an archive is detected, isArchive is set to true, and to false
|
||||||
func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
|
// otherwise, in which case it is safe to assume input represents the contents
|
||||||
buf := bufio.NewReader(r)
|
// of a Dockerfile.
|
||||||
|
func DetectArchiveReader(input io.ReadCloser) (rc io.ReadCloser, isArchive bool, err error) {
|
||||||
|
buf := bufio.NewReader(input)
|
||||||
|
|
||||||
magic, err := buf.Peek(archiveHeaderSize)
|
magic, err := buf.Peek(archiveHeaderSize)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return nil, "", errors.Errorf("failed to peek context header from STDIN: %v", err)
|
return nil, false, errors.Errorf("failed to peek context header from STDIN: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if IsArchive(magic) {
|
return ioutils.NewReadCloserWrapper(buf, func() error { return input.Close() }), IsArchive(magic), nil
|
||||||
return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil
|
}
|
||||||
|
|
||||||
|
// WriteTempDockerfile writes a Dockerfile stream to a temporary file with a
|
||||||
|
// name specified by DefaultDockerfileName and returns the path to the
|
||||||
|
// temporary directory containing the Dockerfile.
|
||||||
|
func WriteTempDockerfile(rc io.ReadCloser) (dockerfileDir string, err error) {
|
||||||
|
// err is a named return value, due to the defer call below.
|
||||||
|
dockerfileDir, err = ioutil.TempDir("", "docker-build-tempdockerfile-")
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Errorf("unable to create temporary context directory: %v", err)
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
os.RemoveAll(dockerfileDir)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
f, err := os.Create(filepath.Join(dockerfileDir, DefaultDockerfileName))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if _, err := io.Copy(f, rc); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return dockerfileDir, rc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContextFromReader will read the contents of the given reader as either a
|
||||||
|
// Dockerfile or tar archive. Returns a tar archive used as a context and a
|
||||||
|
// path to the Dockerfile inside the tar.
|
||||||
|
func GetContextFromReader(rc io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
|
||||||
|
rc, isArchive, err := DetectArchiveReader(rc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if isArchive {
|
||||||
|
return rc, dockerfileName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Input should be read as a Dockerfile.
|
||||||
|
|
||||||
if dockerfileName == "-" {
|
if dockerfileName == "-" {
|
||||||
return nil, "", errors.New("build context is not an archive")
|
return nil, "", errors.New("build context is not an archive")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Input should be read as a Dockerfile.
|
dockerfileDir, err := WriteTempDockerfile(rc)
|
||||||
tmpDir, err := ioutil.TempDir("", "docker-build-context-")
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", errors.Errorf("unable to create temporary context directory: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
_, err = io.Copy(f, buf)
|
|
||||||
if err != nil {
|
|
||||||
f.Close()
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := f.Close(); err != nil {
|
tar, err := archive.Tar(dockerfileDir, archive.Uncompressed)
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
if err := r.Close(); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
tar, err := archive.Tar(tmpDir, archive.Uncompressed)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return ioutils.NewReadCloserWrapper(tar, func() error {
|
return ioutils.NewReadCloserWrapper(tar, func() error {
|
||||||
err := tar.Close()
|
err := tar.Close()
|
||||||
os.RemoveAll(tmpDir)
|
os.RemoveAll(dockerfileDir)
|
||||||
return err
|
return err
|
||||||
}), DefaultDockerfileName, nil
|
}), DefaultDockerfileName, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsArchive checks for the magic bytes of a tar or any supported compression
|
// IsArchive checks for the magic bytes of a tar or any supported compression
|
||||||
|
|
|
@ -0,0 +1,300 @@
|
||||||
|
package image
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/docker/cli/cli"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/command/image/build"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/pkg/jsonmessage"
|
||||||
|
"github.com/docker/docker/pkg/stringid"
|
||||||
|
"github.com/docker/docker/pkg/urlutil"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/session/auth/authprovider"
|
||||||
|
"github.com/moby/buildkit/session/filesync"
|
||||||
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/tonistiigi/fsutil"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
const uploadRequestRemote = "upload-request"
|
||||||
|
|
||||||
|
var errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||||
|
|
||||||
|
//nolint: gocyclo
|
||||||
|
func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||||
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
|
s, err := trySession(dockerCli, options.context)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if s == nil {
|
||||||
|
return errors.Errorf("buildkit not supported by daemon")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
remote string
|
||||||
|
body io.Reader
|
||||||
|
dockerfileName = options.dockerfileName
|
||||||
|
dockerfileReader io.ReadCloser
|
||||||
|
dockerfileDir string
|
||||||
|
contextDir string
|
||||||
|
)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case options.contextFromStdin():
|
||||||
|
if options.dockerfileFromStdin() {
|
||||||
|
return errStdinConflict
|
||||||
|
}
|
||||||
|
rc, isArchive, err := build.DetectArchiveReader(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if isArchive {
|
||||||
|
body = rc
|
||||||
|
remote = uploadRequestRemote
|
||||||
|
} else {
|
||||||
|
if options.dockerfileName != "" {
|
||||||
|
return errDockerfileConflict
|
||||||
|
}
|
||||||
|
dockerfileReader = rc
|
||||||
|
remote = clientSessionRemote
|
||||||
|
// TODO: make fssync handle empty contextdir
|
||||||
|
contextDir, _ = ioutil.TempDir("", "empty-dir")
|
||||||
|
defer os.RemoveAll(contextDir)
|
||||||
|
}
|
||||||
|
case isLocalDir(options.context):
|
||||||
|
contextDir = options.context
|
||||||
|
if options.dockerfileFromStdin() {
|
||||||
|
dockerfileReader = os.Stdin
|
||||||
|
} else if options.dockerfileName != "" {
|
||||||
|
dockerfileName = filepath.Base(options.dockerfileName)
|
||||||
|
dockerfileDir = filepath.Dir(options.dockerfileName)
|
||||||
|
} else {
|
||||||
|
dockerfileDir = options.context
|
||||||
|
}
|
||||||
|
remote = clientSessionRemote
|
||||||
|
case urlutil.IsGitURL(options.context):
|
||||||
|
remote = options.context
|
||||||
|
case urlutil.IsURL(options.context):
|
||||||
|
remote = options.context
|
||||||
|
default:
|
||||||
|
return errors.Errorf("unable to prepare context: path %q not found", options.context)
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerfileReader != nil {
|
||||||
|
dockerfileName = build.DefaultDockerfileName
|
||||||
|
dockerfileDir, err = build.WriteTempDockerfile(dockerfileReader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(dockerfileDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerfileDir != "" {
|
||||||
|
s.Allow(filesync.NewFSSyncProvider([]filesync.SyncedDir{
|
||||||
|
{
|
||||||
|
Name: "context",
|
||||||
|
Dir: contextDir,
|
||||||
|
Map: resetUIDAndGID,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "dockerfile",
|
||||||
|
Dir: dockerfileDir,
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Allow(authprovider.NewDockerAuthProvider())
|
||||||
|
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
eg.Go(func() error {
|
||||||
|
return s.Run(context.TODO(), dockerCli.Client().DialSession)
|
||||||
|
})
|
||||||
|
|
||||||
|
buildID := stringid.GenerateRandomID()
|
||||||
|
if body != nil {
|
||||||
|
eg.Go(func() error {
|
||||||
|
buildOptions := types.ImageBuildOptions{
|
||||||
|
Version: types.BuilderBuildKit,
|
||||||
|
BuildID: uploadRequestRemote + ":" + buildID,
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := dockerCli.Client().ImageBuild(context.Background(), body, buildOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
eg.Go(func() error {
|
||||||
|
defer func() { // make sure the Status ends cleanly on build errors
|
||||||
|
s.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
buildOptions := imageBuildOptions(dockerCli, options)
|
||||||
|
buildOptions.Version = types.BuilderBuildKit
|
||||||
|
buildOptions.Dockerfile = dockerfileName
|
||||||
|
//buildOptions.AuthConfigs = authConfigs // handled by session
|
||||||
|
buildOptions.RemoteContext = remote
|
||||||
|
buildOptions.SessionID = s.ID()
|
||||||
|
buildOptions.BuildID = buildID
|
||||||
|
return doBuild(ctx, eg, dockerCli, options, buildOptions)
|
||||||
|
})
|
||||||
|
|
||||||
|
return eg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, options buildOptions, buildOptions types.ImageBuildOptions) (finalErr error) {
|
||||||
|
response, err := dockerCli.Client().ImageBuild(context.Background(), nil, buildOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
eg.Go(func() error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return dockerCli.Client().BuildCancel(context.TODO(), buildOptions.BuildID)
|
||||||
|
case <-done:
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
t := newTracer()
|
||||||
|
ssArr := []*client.SolveStatus{}
|
||||||
|
|
||||||
|
displayStatus := func(displayCh chan *client.SolveStatus) {
|
||||||
|
var c console.Console
|
||||||
|
out := os.Stderr
|
||||||
|
// TODO: Handle interactive output in non-interactive environment.
|
||||||
|
consoleOpt := options.console.Value()
|
||||||
|
if cons, err := console.ConsoleFromFile(out); err == nil && (consoleOpt == nil || *consoleOpt) {
|
||||||
|
c = cons
|
||||||
|
}
|
||||||
|
// not using shared context to not disrupt display but let is finish reporting errors
|
||||||
|
eg.Go(func() error {
|
||||||
|
return progressui.DisplaySolveStatus(context.TODO(), c, out, displayCh)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.quiet {
|
||||||
|
eg.Go(func() error {
|
||||||
|
// TODO: make sure t.displayCh closes
|
||||||
|
for ss := range t.displayCh {
|
||||||
|
ssArr = append(ssArr, ss)
|
||||||
|
}
|
||||||
|
<-done
|
||||||
|
// TODO: verify that finalErr is indeed set when error occurs
|
||||||
|
if finalErr != nil {
|
||||||
|
displayCh := make(chan *client.SolveStatus)
|
||||||
|
go func() {
|
||||||
|
for _, ss := range ssArr {
|
||||||
|
displayCh <- ss
|
||||||
|
}
|
||||||
|
close(displayCh)
|
||||||
|
}()
|
||||||
|
displayStatus(displayCh)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
displayStatus(t.displayCh)
|
||||||
|
}
|
||||||
|
defer close(t.displayCh)
|
||||||
|
err = jsonmessage.DisplayJSONMessagesStream(response.Body, os.Stdout, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), t.write)
|
||||||
|
if err != nil {
|
||||||
|
if jerr, ok := err.(*jsonmessage.JSONError); ok {
|
||||||
|
// If no error code is set, default to 1
|
||||||
|
if jerr.Code == 0 {
|
||||||
|
jerr.Code = 1
|
||||||
|
}
|
||||||
|
return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func resetUIDAndGID(s *fsutil.Stat) bool {
|
||||||
|
s.Uid = 0
|
||||||
|
s.Gid = 0
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type tracer struct {
|
||||||
|
displayCh chan *client.SolveStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTracer() *tracer {
|
||||||
|
return &tracer{
|
||||||
|
displayCh: make(chan *client.SolveStatus),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tracer) write(msg jsonmessage.JSONMessage) {
|
||||||
|
var resp controlapi.StatusResponse
|
||||||
|
|
||||||
|
if msg.ID != "moby.buildkit.trace" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var dt []byte
|
||||||
|
// ignoring all messages that are not understood
|
||||||
|
if err := json.Unmarshal(*msg.Aux, &dt); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := (&resp).Unmarshal(dt); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s := client.SolveStatus{}
|
||||||
|
for _, v := range resp.Vertexes {
|
||||||
|
s.Vertexes = append(s.Vertexes, &client.Vertex{
|
||||||
|
Digest: v.Digest,
|
||||||
|
Inputs: v.Inputs,
|
||||||
|
Name: v.Name,
|
||||||
|
Started: v.Started,
|
||||||
|
Completed: v.Completed,
|
||||||
|
Error: v.Error,
|
||||||
|
Cached: v.Cached,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, v := range resp.Statuses {
|
||||||
|
s.Statuses = append(s.Statuses, &client.VertexStatus{
|
||||||
|
ID: v.ID,
|
||||||
|
Vertex: v.Vertex,
|
||||||
|
Name: v.Name,
|
||||||
|
Total: v.Total,
|
||||||
|
Current: v.Current,
|
||||||
|
Timestamp: v.Timestamp,
|
||||||
|
Started: v.Started,
|
||||||
|
Completed: v.Completed,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, v := range resp.Logs {
|
||||||
|
s.Logs = append(s.Logs, &client.VertexLog{
|
||||||
|
Vertex: v.Vertex,
|
||||||
|
Stream: int(v.Stream),
|
||||||
|
Data: v.Msg,
|
||||||
|
Timestamp: v.Timestamp,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.displayCh <- &s
|
||||||
|
}
|
|
@ -49,7 +49,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
||||||
// Count the times of calling for handleTarget,
|
// Count the times of calling for handleTarget,
|
||||||
// if it is called more that once, that should be considered an error in a trusted push.
|
// if it is called more that once, that should be considered an error in a trusted push.
|
||||||
cnt := 0
|
cnt := 0
|
||||||
handleTarget := func(m jsonmessage.JSONMessage) {
|
handleTarget := func(msg jsonmessage.JSONMessage) {
|
||||||
cnt++
|
cnt++
|
||||||
if cnt > 1 {
|
if cnt > 1 {
|
||||||
// handleTarget should only be called once. This will be treated as an error.
|
// handleTarget should only be called once. This will be treated as an error.
|
||||||
|
@ -57,7 +57,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
||||||
}
|
}
|
||||||
|
|
||||||
var pushResult types.PushResult
|
var pushResult types.PushResult
|
||||||
err := json.Unmarshal(*m.Aux, &pushResult)
|
err := json.Unmarshal(*msg.Aux, &pushResult)
|
||||||
if err == nil && pushResult.Tag != "" {
|
if err == nil && pushResult.Tag != "" {
|
||||||
if dgst, err := digest.Parse(pushResult.Digest); err == nil {
|
if dgst, err := digest.Parse(pushResult.Digest); err == nil {
|
||||||
h, err := hex.DecodeString(dgst.Hex())
|
h, err := hex.DecodeString(dgst.Hex())
|
||||||
|
|
|
@ -59,6 +59,7 @@ func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error {
|
||||||
},
|
},
|
||||||
LayersSize: du.LayersSize,
|
LayersSize: du.LayersSize,
|
||||||
BuilderSize: du.BuilderSize,
|
BuilderSize: du.BuilderSize,
|
||||||
|
BuildCache: du.BuildCache,
|
||||||
Images: du.Images,
|
Images: du.Images,
|
||||||
Containers: du.Containers,
|
Containers: du.Containers,
|
||||||
Volumes: du.Volumes,
|
Volumes: du.Volumes,
|
||||||
|
|
36
opts/opts.go
36
opts/opts.go
|
@ -6,6 +6,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
|
@ -486,3 +487,38 @@ func (m *MemSwapBytes) UnmarshalJSON(s []byte) error {
|
||||||
b := MemBytes(*m)
|
b := MemBytes(*m)
|
||||||
return b.UnmarshalJSON(s)
|
return b.UnmarshalJSON(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NullableBool is a type for tri-state boolean options
|
||||||
|
type NullableBool struct {
|
||||||
|
b *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the type
|
||||||
|
func (n *NullableBool) Type() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the value in *bool
|
||||||
|
func (n *NullableBool) Value() *bool {
|
||||||
|
return n.b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the value. If value is empty string or "auto", nil is set.
|
||||||
|
// Otherwise true or false are set based on flag.Bool behavior.
|
||||||
|
func (n *NullableBool) Set(value string) error {
|
||||||
|
if value != "auto" && value != "" {
|
||||||
|
b, err := strconv.ParseBool(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.b = &b
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NullableBool) String() string {
|
||||||
|
if n.b == nil {
|
||||||
|
return "auto"
|
||||||
|
}
|
||||||
|
return strconv.FormatBool(*n.b)
|
||||||
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ github.com/coreos/etcd v3.2.1
|
||||||
github.com/cpuguy83/go-md2man v1.0.8
|
github.com/cpuguy83/go-md2man v1.0.8
|
||||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||||
github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5
|
github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5
|
||||||
github.com/docker/docker 162ba6016def672690ee4a1f3978368853a1e149
|
github.com/docker/docker c752b0991e31ba9869ab6a0661af57e9423874fb
|
||||||
github.com/docker/docker-credential-helpers 3c90bd29a46b943b2a9842987b58fb91a7c1819b
|
github.com/docker/docker-credential-helpers 3c90bd29a46b943b2a9842987b58fb91a7c1819b
|
||||||
# the docker/go package contains a customized version of canonical/json
|
# the docker/go package contains a customized version of canonical/json
|
||||||
# and is used by Notary. The package is periodically rebased on current Go versions.
|
# and is used by Notary. The package is periodically rebased on current Go versions.
|
||||||
|
@ -49,7 +49,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||||
github.com/Microsoft/go-winio v0.4.6
|
github.com/Microsoft/go-winio v0.4.6
|
||||||
github.com/miekg/pkcs11 5f6e0d0dad6f472df908c8e968a98ef00c9224bb
|
github.com/miekg/pkcs11 5f6e0d0dad6f472df908c8e968a98ef00c9224bb
|
||||||
github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
|
github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
|
||||||
github.com/moby/buildkit 43e758232a0ac7d50c6a11413186e16684fc1e4f
|
github.com/moby/buildkit b062a2d8ddbaa477c25c63d68a9cffbb43f6e474
|
||||||
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
|
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
|
||||||
github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
|
github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||||
|
@ -70,7 +70,7 @@ github.com/sirupsen/logrus v1.0.3
|
||||||
github.com/spf13/cobra v0.0.3
|
github.com/spf13/cobra v0.0.3
|
||||||
github.com/spf13/pflag v1.0.1
|
github.com/spf13/pflag v1.0.1
|
||||||
github.com/theupdateframework/notary v0.6.1
|
github.com/theupdateframework/notary v0.6.1
|
||||||
github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f
|
github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
|
||||||
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
||||||
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
|
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
|
||||||
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
|
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
|
||||||
|
@ -90,3 +90,6 @@ k8s.io/client-go kubernetes-1.8.2
|
||||||
k8s.io/kubernetes v1.8.2
|
k8s.io/kubernetes v1.8.2
|
||||||
k8s.io/kube-openapi 61b46af70dfed79c6d24530cd23b41440a7f22a5
|
k8s.io/kube-openapi 61b46af70dfed79c6d24530cd23b41440a7f22a5
|
||||||
vbom.ml/util 928aaa586d7718c70f4090ddf83f2b34c16fdc8d
|
vbom.ml/util 928aaa586d7718c70f4090ddf83f2b34c16fdc8d
|
||||||
|
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
|
||||||
|
github.com/tonistiigi/units 29de085e9400559bd68aea2e7bc21566e7b8281d
|
||||||
|
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
|
||||||
|
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,17 @@
|
||||||
|
# console
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console)
|
||||||
|
|
||||||
|
Golang package for dealing with consoles. Light on deps and a simple API.
|
||||||
|
|
||||||
|
## Modifying the current process
|
||||||
|
|
||||||
|
```go
|
||||||
|
current := console.Current()
|
||||||
|
defer current.Reset()
|
||||||
|
|
||||||
|
if err := current.SetRaw(); err != nil {
|
||||||
|
}
|
||||||
|
ws, err := current.Size()
|
||||||
|
current.Resize(ws)
|
||||||
|
```
|
|
@ -0,0 +1,78 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrNotAConsole = errors.New("provided file is not a console")
|
||||||
|
|
||||||
|
type Console interface {
|
||||||
|
io.Reader
|
||||||
|
io.Writer
|
||||||
|
io.Closer
|
||||||
|
|
||||||
|
// Resize resizes the console to the provided window size
|
||||||
|
Resize(WinSize) error
|
||||||
|
// ResizeFrom resizes the calling console to the size of the
|
||||||
|
// provided console
|
||||||
|
ResizeFrom(Console) error
|
||||||
|
// SetRaw sets the console in raw mode
|
||||||
|
SetRaw() error
|
||||||
|
// DisableEcho disables echo on the console
|
||||||
|
DisableEcho() error
|
||||||
|
// Reset restores the console to its orignal state
|
||||||
|
Reset() error
|
||||||
|
// Size returns the window size of the console
|
||||||
|
Size() (WinSize, error)
|
||||||
|
// Fd returns the console's file descriptor
|
||||||
|
Fd() uintptr
|
||||||
|
// Name returns the console's file name
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// WinSize specifies the window size of the console
|
||||||
|
type WinSize struct {
|
||||||
|
// Height of the console
|
||||||
|
Height uint16
|
||||||
|
// Width of the console
|
||||||
|
Width uint16
|
||||||
|
x uint16
|
||||||
|
y uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current returns the current processes console
|
||||||
|
func Current() Console {
|
||||||
|
c, err := ConsoleFromFile(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
// stdin should always be a console for the design
|
||||||
|
// of this function
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsoleFromFile returns a console using the provided file
|
||||||
|
func ConsoleFromFile(f *os.File) (Console, error) {
|
||||||
|
if err := checkConsole(f); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newMaster(f)
|
||||||
|
}
|
|
@ -0,0 +1,271 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxEvents = 128
|
||||||
|
)
|
||||||
|
|
||||||
|
// Epoller manages multiple epoll consoles using edge-triggered epoll api so we
|
||||||
|
// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP.
|
||||||
|
// For more details, see:
|
||||||
|
// - https://github.com/systemd/systemd/pull/4262
|
||||||
|
// - https://github.com/moby/moby/issues/27202
|
||||||
|
//
|
||||||
|
// Example usage of Epoller and EpollConsole can be as follow:
|
||||||
|
//
|
||||||
|
// epoller, _ := NewEpoller()
|
||||||
|
// epollConsole, _ := epoller.Add(console)
|
||||||
|
// go epoller.Wait()
|
||||||
|
// var (
|
||||||
|
// b bytes.Buffer
|
||||||
|
// wg sync.WaitGroup
|
||||||
|
// )
|
||||||
|
// wg.Add(1)
|
||||||
|
// go func() {
|
||||||
|
// io.Copy(&b, epollConsole)
|
||||||
|
// wg.Done()
|
||||||
|
// }()
|
||||||
|
// // perform I/O on the console
|
||||||
|
// epollConsole.Shutdown(epoller.CloseConsole)
|
||||||
|
// wg.Wait()
|
||||||
|
// epollConsole.Close()
|
||||||
|
type Epoller struct {
|
||||||
|
efd int
|
||||||
|
mu sync.Mutex
|
||||||
|
fdMapping map[int]*EpollConsole
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEpoller returns an instance of epoller with a valid epoll fd.
|
||||||
|
func NewEpoller() (*Epoller, error) {
|
||||||
|
efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Epoller{
|
||||||
|
efd: efd,
|
||||||
|
fdMapping: make(map[int]*EpollConsole),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add creates a epoll console based on the provided console. The console will
|
||||||
|
// be registered with EPOLLET (i.e. using edge-triggered notification) and its
|
||||||
|
// file descriptor will be set to non-blocking mode. After this, user should use
|
||||||
|
// the return console to perform I/O.
|
||||||
|
func (e *Epoller) Add(console Console) (*EpollConsole, error) {
|
||||||
|
sysfd := int(console.Fd())
|
||||||
|
// Set sysfd to non-blocking mode
|
||||||
|
if err := unix.SetNonblock(sysfd, true); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ev := unix.EpollEvent{
|
||||||
|
Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET,
|
||||||
|
Fd: int32(sysfd),
|
||||||
|
}
|
||||||
|
if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ef := &EpollConsole{
|
||||||
|
Console: console,
|
||||||
|
sysfd: sysfd,
|
||||||
|
readc: sync.NewCond(&sync.Mutex{}),
|
||||||
|
writec: sync.NewCond(&sync.Mutex{}),
|
||||||
|
}
|
||||||
|
e.mu.Lock()
|
||||||
|
e.fdMapping[sysfd] = ef
|
||||||
|
e.mu.Unlock()
|
||||||
|
return ef, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait starts the loop to wait for its consoles' notifications and signal
|
||||||
|
// appropriate console that it can perform I/O.
|
||||||
|
func (e *Epoller) Wait() error {
|
||||||
|
events := make([]unix.EpollEvent, maxEvents)
|
||||||
|
for {
|
||||||
|
n, err := unix.EpollWait(e.efd, events, -1)
|
||||||
|
if err != nil {
|
||||||
|
// EINTR: The call was interrupted by a signal handler before either
|
||||||
|
// any of the requested events occurred or the timeout expired
|
||||||
|
if err == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
ev := &events[i]
|
||||||
|
// the console is ready to be read from
|
||||||
|
if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
|
||||||
|
if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
|
||||||
|
epfile.signalRead()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// the console is ready to be written to
|
||||||
|
if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
|
||||||
|
if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
|
||||||
|
epfile.signalWrite()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close unregister the console's file descriptor from epoll interface
|
||||||
|
func (e *Epoller) CloseConsole(fd int) error {
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
delete(e.fdMapping, fd)
|
||||||
|
return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Epoller) getConsole(sysfd int) *EpollConsole {
|
||||||
|
e.mu.Lock()
|
||||||
|
f := e.fdMapping[sysfd]
|
||||||
|
e.mu.Unlock()
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the epoll fd
|
||||||
|
func (e *Epoller) Close() error {
|
||||||
|
return unix.Close(e.efd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EpollConsole acts like a console but register its file descriptor with a
|
||||||
|
// epoll fd and uses epoll API to perform I/O.
|
||||||
|
type EpollConsole struct {
|
||||||
|
Console
|
||||||
|
readc *sync.Cond
|
||||||
|
writec *sync.Cond
|
||||||
|
sysfd int
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads up to len(p) bytes into p. It returns the number of bytes read
|
||||||
|
// (0 <= n <= len(p)) and any error encountered.
|
||||||
|
//
|
||||||
|
// If the console's read returns EAGAIN or EIO, we assumes that its a
|
||||||
|
// temporary error because the other side went away and wait for the signal
|
||||||
|
// generated by epoll event to continue.
|
||||||
|
func (ec *EpollConsole) Read(p []byte) (n int, err error) {
|
||||||
|
var read int
|
||||||
|
ec.readc.L.Lock()
|
||||||
|
defer ec.readc.L.Unlock()
|
||||||
|
for {
|
||||||
|
read, err = ec.Console.Read(p[n:])
|
||||||
|
n += read
|
||||||
|
if err != nil {
|
||||||
|
var hangup bool
|
||||||
|
if perr, ok := err.(*os.PathError); ok {
|
||||||
|
hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
|
||||||
|
} else {
|
||||||
|
hangup = (err == unix.EAGAIN || err == unix.EIO)
|
||||||
|
}
|
||||||
|
// if the other end disappear, assume this is temporary and wait for the
|
||||||
|
// signal to continue again. Unless we didnt read anything and the
|
||||||
|
// console is already marked as closed then we should exit
|
||||||
|
if hangup && !(n == 0 && len(p) > 0 && ec.closed) {
|
||||||
|
ec.readc.Wait()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// if we didnt read anything then return io.EOF to end gracefully
|
||||||
|
if n == 0 && len(p) > 0 && err == nil {
|
||||||
|
err = io.EOF
|
||||||
|
}
|
||||||
|
// signal for others that we finished the read
|
||||||
|
ec.readc.Signal()
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writes len(p) bytes from p to the console. It returns the number of bytes
|
||||||
|
// written from p (0 <= n <= len(p)) and any error encountered that caused
|
||||||
|
// the write to stop early.
|
||||||
|
//
|
||||||
|
// If writes to the console returns EAGAIN or EIO, we assumes that its a
|
||||||
|
// temporary error because the other side went away and wait for the signal
|
||||||
|
// generated by epoll event to continue.
|
||||||
|
func (ec *EpollConsole) Write(p []byte) (n int, err error) {
|
||||||
|
var written int
|
||||||
|
ec.writec.L.Lock()
|
||||||
|
defer ec.writec.L.Unlock()
|
||||||
|
for {
|
||||||
|
written, err = ec.Console.Write(p[n:])
|
||||||
|
n += written
|
||||||
|
if err != nil {
|
||||||
|
var hangup bool
|
||||||
|
if perr, ok := err.(*os.PathError); ok {
|
||||||
|
hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
|
||||||
|
} else {
|
||||||
|
hangup = (err == unix.EAGAIN || err == unix.EIO)
|
||||||
|
}
|
||||||
|
// if the other end disappear, assume this is temporary and wait for the
|
||||||
|
// signal to continue again.
|
||||||
|
if hangup {
|
||||||
|
ec.writec.Wait()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// unrecoverable error, break the loop and return the error
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if n < len(p) && err == nil {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
}
|
||||||
|
// signal for others that we finished the write
|
||||||
|
ec.writec.Signal()
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closed the file descriptor and signal call waiters for this fd.
|
||||||
|
// It accepts a callback which will be called with the console's fd. The
|
||||||
|
// callback typically will be used to do further cleanup such as unregister the
|
||||||
|
// console's fd from the epoll interface.
|
||||||
|
// User should call Shutdown and wait for all I/O operation to be finished
|
||||||
|
// before closing the console.
|
||||||
|
func (ec *EpollConsole) Shutdown(close func(int) error) error {
|
||||||
|
ec.readc.L.Lock()
|
||||||
|
defer ec.readc.L.Unlock()
|
||||||
|
ec.writec.L.Lock()
|
||||||
|
defer ec.writec.L.Unlock()
|
||||||
|
|
||||||
|
ec.readc.Broadcast()
|
||||||
|
ec.writec.Broadcast()
|
||||||
|
ec.closed = true
|
||||||
|
return close(ec.sysfd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// signalRead signals that the console is readable.
|
||||||
|
func (ec *EpollConsole) signalRead() {
|
||||||
|
ec.readc.Signal()
|
||||||
|
}
|
||||||
|
|
||||||
|
// signalWrite signals that the console is writable.
|
||||||
|
func (ec *EpollConsole) signalWrite() {
|
||||||
|
ec.writec.Signal()
|
||||||
|
}
|
|
@ -0,0 +1,158 @@
|
||||||
|
// +build darwin freebsd linux openbsd solaris
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewPty creates a new pty pair
|
||||||
|
// The master is returned as the first console and a string
|
||||||
|
// with the path to the pty slave is returned as the second
|
||||||
|
func NewPty() (Console, string, error) {
|
||||||
|
f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
slave, err := ptsname(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
if err := unlockpt(f); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
m, err := newMaster(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
return m, slave, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type master struct {
|
||||||
|
f *os.File
|
||||||
|
original *unix.Termios
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Read(b []byte) (int, error) {
|
||||||
|
return m.f.Read(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Write(b []byte) (int, error) {
|
||||||
|
return m.f.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Close() error {
|
||||||
|
return m.f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Resize(ws WinSize) error {
|
||||||
|
return tcswinsz(m.f.Fd(), ws)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) ResizeFrom(c Console) error {
|
||||||
|
ws, err := c.Size()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return m.Resize(ws)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Reset() error {
|
||||||
|
if m.original == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return tcset(m.f.Fd(), m.original)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) getCurrent() (unix.Termios, error) {
|
||||||
|
var termios unix.Termios
|
||||||
|
if err := tcget(m.f.Fd(), &termios); err != nil {
|
||||||
|
return unix.Termios{}, err
|
||||||
|
}
|
||||||
|
return termios, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) SetRaw() error {
|
||||||
|
rawState, err := m.getCurrent()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rawState = cfmakeraw(rawState)
|
||||||
|
rawState.Oflag = rawState.Oflag | unix.OPOST
|
||||||
|
return tcset(m.f.Fd(), &rawState)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) DisableEcho() error {
|
||||||
|
rawState, err := m.getCurrent()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rawState.Lflag = rawState.Lflag &^ unix.ECHO
|
||||||
|
return tcset(m.f.Fd(), &rawState)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Size() (WinSize, error) {
|
||||||
|
return tcgwinsz(m.f.Fd())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Fd() uintptr {
|
||||||
|
return m.f.Fd()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Name() string {
|
||||||
|
return m.f.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkConsole checks if the provided file is a console
|
||||||
|
func checkConsole(f *os.File) error {
|
||||||
|
var termios unix.Termios
|
||||||
|
if tcget(f.Fd(), &termios) != nil {
|
||||||
|
return ErrNotAConsole
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMaster(f *os.File) (Console, error) {
|
||||||
|
m := &master{
|
||||||
|
f: f,
|
||||||
|
}
|
||||||
|
t, err := m.getCurrent()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.original = &t
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
|
||||||
|
// created by us acts normally. In particular, a not-very-well-known default of
|
||||||
|
// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
|
||||||
|
// problem for terminal emulators, because we relay data from the terminal we
|
||||||
|
// also relay that funky line discipline.
|
||||||
|
func ClearONLCR(fd uintptr) error {
|
||||||
|
return setONLCR(fd, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
|
||||||
|
// created by us acts as intended for a terminal emulator.
|
||||||
|
func SetONLCR(fd uintptr) error {
|
||||||
|
return setONLCR(fd, true)
|
||||||
|
}
|
|
@ -0,0 +1,216 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
vtInputSupported bool
|
||||||
|
ErrNotImplemented = errors.New("not implemented")
|
||||||
|
)
|
||||||
|
|
||||||
|
func (m *master) initStdios() {
|
||||||
|
m.in = windows.Handle(os.Stdin.Fd())
|
||||||
|
if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil {
|
||||||
|
// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
|
||||||
|
if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil {
|
||||||
|
vtInputSupported = true
|
||||||
|
}
|
||||||
|
// Unconditionally set the console mode back even on failure because SetConsoleMode
|
||||||
|
// remembers invalid bits on input handles.
|
||||||
|
windows.SetConsoleMode(m.in, m.inMode)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("failed to get console mode for stdin: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.out = windows.Handle(os.Stdout.Fd())
|
||||||
|
if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil {
|
||||||
|
if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
|
||||||
|
m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||||
|
} else {
|
||||||
|
windows.SetConsoleMode(m.out, m.outMode)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf("failed to get console mode for stdout: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.err = windows.Handle(os.Stderr.Fd())
|
||||||
|
if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil {
|
||||||
|
if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
|
||||||
|
m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||||
|
} else {
|
||||||
|
windows.SetConsoleMode(m.err, m.errMode)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf("failed to get console mode for stderr: %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type master struct {
|
||||||
|
in windows.Handle
|
||||||
|
inMode uint32
|
||||||
|
|
||||||
|
out windows.Handle
|
||||||
|
outMode uint32
|
||||||
|
|
||||||
|
err windows.Handle
|
||||||
|
errMode uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) SetRaw() error {
|
||||||
|
if err := makeInputRaw(m.in, m.inMode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set StdOut and StdErr to raw mode, we ignore failures since
|
||||||
|
// windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of
|
||||||
|
// Windows.
|
||||||
|
|
||||||
|
windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
|
||||||
|
|
||||||
|
windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Reset() error {
|
||||||
|
for _, s := range []struct {
|
||||||
|
fd windows.Handle
|
||||||
|
mode uint32
|
||||||
|
}{
|
||||||
|
{m.in, m.inMode},
|
||||||
|
{m.out, m.outMode},
|
||||||
|
{m.err, m.errMode},
|
||||||
|
} {
|
||||||
|
if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
|
||||||
|
return errors.Wrap(err, "unable to restore console mode")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Size() (WinSize, error) {
|
||||||
|
var info windows.ConsoleScreenBufferInfo
|
||||||
|
err := windows.GetConsoleScreenBufferInfo(m.out, &info)
|
||||||
|
if err != nil {
|
||||||
|
return WinSize{}, errors.Wrap(err, "unable to get console info")
|
||||||
|
}
|
||||||
|
|
||||||
|
winsize := WinSize{
|
||||||
|
Width: uint16(info.Window.Right - info.Window.Left + 1),
|
||||||
|
Height: uint16(info.Window.Bottom - info.Window.Top + 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
return winsize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Resize(ws WinSize) error {
|
||||||
|
return ErrNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) ResizeFrom(c Console) error {
|
||||||
|
return ErrNotImplemented
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) DisableEcho() error {
|
||||||
|
mode := m.inMode &^ windows.ENABLE_ECHO_INPUT
|
||||||
|
mode |= windows.ENABLE_PROCESSED_INPUT
|
||||||
|
mode |= windows.ENABLE_LINE_INPUT
|
||||||
|
|
||||||
|
if err := windows.SetConsoleMode(m.in, mode); err != nil {
|
||||||
|
return errors.Wrap(err, "unable to set console to disable echo")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Read(b []byte) (int, error) {
|
||||||
|
panic("not implemented on windows")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Write(b []byte) (int, error) {
|
||||||
|
panic("not implemented on windows")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *master) Fd() uintptr {
|
||||||
|
return uintptr(m.in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// on windows, console can only be made from os.Std{in,out,err}, hence there
|
||||||
|
// isnt a single name here we can use. Return a dummy "console" value in this
|
||||||
|
// case should be sufficient.
|
||||||
|
func (m *master) Name() string {
|
||||||
|
return "console"
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeInputRaw puts the terminal (Windows Console) connected to the given
|
||||||
|
// file descriptor into raw mode
|
||||||
|
func makeInputRaw(fd windows.Handle, mode uint32) error {
|
||||||
|
// See
|
||||||
|
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
||||||
|
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
|
||||||
|
|
||||||
|
// Disable these modes
|
||||||
|
mode &^= windows.ENABLE_ECHO_INPUT
|
||||||
|
mode &^= windows.ENABLE_LINE_INPUT
|
||||||
|
mode &^= windows.ENABLE_MOUSE_INPUT
|
||||||
|
mode &^= windows.ENABLE_WINDOW_INPUT
|
||||||
|
mode &^= windows.ENABLE_PROCESSED_INPUT
|
||||||
|
|
||||||
|
// Enable these modes
|
||||||
|
mode |= windows.ENABLE_EXTENDED_FLAGS
|
||||||
|
mode |= windows.ENABLE_INSERT_MODE
|
||||||
|
mode |= windows.ENABLE_QUICK_EDIT_MODE
|
||||||
|
|
||||||
|
if vtInputSupported {
|
||||||
|
mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := windows.SetConsoleMode(fd, mode); err != nil {
|
||||||
|
return errors.Wrap(err, "unable to set console to raw mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkConsole(f *os.File) error {
|
||||||
|
var mode uint32
|
||||||
|
if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMaster(f *os.File) (Console, error) {
|
||||||
|
if f != os.Stdin && f != os.Stdout && f != os.Stderr {
|
||||||
|
return nil, errors.New("creating a console from a file is not supported on windows")
|
||||||
|
}
|
||||||
|
m := &master{}
|
||||||
|
m.initStdios()
|
||||||
|
return m, nil
|
||||||
|
}
|
|
@ -0,0 +1,53 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cmdTcGet = unix.TIOCGETA
|
||||||
|
cmdTcSet = unix.TIOCSETA
|
||||||
|
)
|
||||||
|
|
||||||
|
func ioctl(fd, flag, data uintptr) error {
|
||||||
|
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||||
|
// unlockpt should be called before opening the slave side of a pty.
|
||||||
|
func unlockpt(f *os.File) error {
|
||||||
|
var u int32
|
||||||
|
return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ptsname retrieves the name of the first available pts for the given master.
|
||||||
|
func ptsname(f *os.File) (string, error) {
|
||||||
|
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("/dev/pts/%d", n), nil
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cmdTcGet = unix.TIOCGETA
|
||||||
|
cmdTcSet = unix.TIOCSETA
|
||||||
|
)
|
||||||
|
|
||||||
|
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||||
|
// unlockpt should be called before opening the slave side of a pty.
|
||||||
|
// This does not exist on FreeBSD, it does not allocate controlling terminals on open
|
||||||
|
func unlockpt(f *os.File) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ptsname retrieves the name of the first available pts for the given master.
|
||||||
|
func ptsname(f *os.File) (string, error) {
|
||||||
|
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("/dev/pts/%d", n), nil
|
||||||
|
}
|
|
@ -0,0 +1,49 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cmdTcGet = unix.TCGETS
|
||||||
|
cmdTcSet = unix.TCSETS
|
||||||
|
)
|
||||||
|
|
||||||
|
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||||
|
// unlockpt should be called before opening the slave side of a pty.
|
||||||
|
func unlockpt(f *os.File) error {
|
||||||
|
var u int32
|
||||||
|
if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ptsname retrieves the name of the first available pts for the given master.
|
||||||
|
func ptsname(f *os.File) (string, error) {
|
||||||
|
var u uint32
|
||||||
|
if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("/dev/pts/%d", u), nil
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
// +build openbsd,cgo
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
//#include <stdlib.h>
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
cmdTcGet = unix.TIOCGETA
|
||||||
|
cmdTcSet = unix.TIOCSETA
|
||||||
|
)
|
||||||
|
|
||||||
|
// ptsname retrieves the name of the first available pts for the given master.
|
||||||
|
func ptsname(f *os.File) (string, error) {
|
||||||
|
ptspath, err := C.ptsname(C.int(f.Fd()))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return C.GoString(ptspath), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||||
|
// unlockpt should be called before opening the slave side of a pty.
|
||||||
|
func unlockpt(f *os.File) error {
|
||||||
|
if _, err := C.grantpt(C.int(f.Fd())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
// +build openbsd,!cgo
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
//
|
||||||
|
// Implementing the functions below requires cgo support. Non-cgo stubs
|
||||||
|
// versions are defined below to enable cross-compilation of source code
|
||||||
|
// that depends on these functions, but the resultant cross-compiled
|
||||||
|
// binaries cannot actually be used. If the stub function(s) below are
|
||||||
|
// actually invoked they will display an error message and cause the
|
||||||
|
// calling process to exit.
|
||||||
|
//
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cmdTcGet = unix.TIOCGETA
|
||||||
|
cmdTcSet = unix.TIOCSETA
|
||||||
|
)
|
||||||
|
|
||||||
|
func ptsname(f *os.File) (string, error) {
|
||||||
|
panic("ptsname() support requires cgo.")
|
||||||
|
}
|
||||||
|
|
||||||
|
func unlockpt(f *os.File) error {
|
||||||
|
panic("unlockpt() support requires cgo.")
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
// +build solaris,cgo
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
//#include <stdlib.h>
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
const (
|
||||||
|
cmdTcGet = unix.TCGETS
|
||||||
|
cmdTcSet = unix.TCSETS
|
||||||
|
)
|
||||||
|
|
||||||
|
// ptsname retrieves the name of the first available pts for the given master.
|
||||||
|
func ptsname(f *os.File) (string, error) {
|
||||||
|
ptspath, err := C.ptsname(C.int(f.Fd()))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return C.GoString(ptspath), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||||
|
// unlockpt should be called before opening the slave side of a pty.
|
||||||
|
func unlockpt(f *os.File) error {
|
||||||
|
if _, err := C.grantpt(C.int(f.Fd())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
// +build solaris,!cgo
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
//
|
||||||
|
// Implementing the functions below requires cgo support. Non-cgo stubs
|
||||||
|
// versions are defined below to enable cross-compilation of source code
|
||||||
|
// that depends on these functions, but the resultant cross-compiled
|
||||||
|
// binaries cannot actually be used. If the stub function(s) below are
|
||||||
|
// actually invoked they will display an error message and cause the
|
||||||
|
// calling process to exit.
|
||||||
|
//
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cmdTcGet = unix.TCGETS
|
||||||
|
cmdTcSet = unix.TCSETS
|
||||||
|
)
|
||||||
|
|
||||||
|
func ptsname(f *os.File) (string, error) {
|
||||||
|
panic("ptsname() support requires cgo.")
|
||||||
|
}
|
||||||
|
|
||||||
|
func unlockpt(f *os.File) error {
|
||||||
|
panic("unlockpt() support requires cgo.")
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
// +build darwin freebsd linux openbsd solaris
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package console
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func tcget(fd uintptr, p *unix.Termios) error {
|
||||||
|
termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*p = *termios
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func tcset(fd uintptr, p *unix.Termios) error {
|
||||||
|
return unix.IoctlSetTermios(int(fd), cmdTcSet, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func tcgwinsz(fd uintptr) (WinSize, error) {
|
||||||
|
var ws WinSize
|
||||||
|
|
||||||
|
uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
|
||||||
|
if err != nil {
|
||||||
|
return ws, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Translate from unix.Winsize to console.WinSize
|
||||||
|
ws.Height = uws.Row
|
||||||
|
ws.Width = uws.Col
|
||||||
|
ws.x = uws.Xpixel
|
||||||
|
ws.y = uws.Ypixel
|
||||||
|
return ws, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func tcswinsz(fd uintptr, ws WinSize) error {
|
||||||
|
// Translate from console.WinSize to unix.Winsize
|
||||||
|
|
||||||
|
var uws unix.Winsize
|
||||||
|
uws.Row = ws.Height
|
||||||
|
uws.Col = ws.Width
|
||||||
|
uws.Xpixel = ws.x
|
||||||
|
uws.Ypixel = ws.y
|
||||||
|
|
||||||
|
return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setONLCR(fd uintptr, enable bool) error {
|
||||||
|
var termios unix.Termios
|
||||||
|
if err := tcget(fd, &termios); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if enable {
|
||||||
|
// Set +onlcr so we can act like a real terminal
|
||||||
|
termios.Oflag |= unix.ONLCR
|
||||||
|
} else {
|
||||||
|
// Set -onlcr so we don't have to deal with \r.
|
||||||
|
termios.Oflag &^= unix.ONLCR
|
||||||
|
}
|
||||||
|
return tcset(fd, &termios)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cfmakeraw(t unix.Termios) unix.Termios {
|
||||||
|
t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
|
||||||
|
t.Oflag &^= unix.OPOST
|
||||||
|
t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
|
||||||
|
t.Cflag &^= (unix.CSIZE | unix.PARENB)
|
||||||
|
t.Cflag &^= unix.CS8
|
||||||
|
t.Cc[unix.VMIN] = 1
|
||||||
|
t.Cc[unix.VTIME] = 0
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
|
@ -181,8 +181,24 @@ type ImageBuildOptions struct {
|
||||||
Target string
|
Target string
|
||||||
SessionID string
|
SessionID string
|
||||||
Platform string
|
Platform string
|
||||||
|
// Version specifies the version of the unerlying builder to use
|
||||||
|
Version BuilderVersion
|
||||||
|
// BuildID is an optional identifier that can be passed together with the
|
||||||
|
// build request. The same identifier can be used to gracefully cancel the
|
||||||
|
// build with the cancel request.
|
||||||
|
BuildID string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BuilderVersion sets the version of underlying builder to use
|
||||||
|
type BuilderVersion string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// BuilderV1 is the first generation builder in docker daemon
|
||||||
|
BuilderV1 BuilderVersion = "1"
|
||||||
|
// BuilderBuildKit is builder based on moby/buildkit project
|
||||||
|
BuilderBuildKit = "2"
|
||||||
|
)
|
||||||
|
|
||||||
// ImageBuildResponse holds information
|
// ImageBuildResponse holds information
|
||||||
// returned by a server after building
|
// returned by a server after building
|
||||||
// an image.
|
// an image.
|
||||||
|
|
|
@ -512,7 +512,8 @@ type DiskUsage struct {
|
||||||
Images []*ImageSummary
|
Images []*ImageSummary
|
||||||
Containers []*Container
|
Containers []*Container
|
||||||
Volumes []*Volume
|
Volumes []*Volume
|
||||||
BuilderSize int64
|
BuildCache []*BuildCache
|
||||||
|
BuilderSize int64 // deprecated
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainersPruneReport contains the response for Engine API:
|
// ContainersPruneReport contains the response for Engine API:
|
||||||
|
@ -585,3 +586,17 @@ type PushResult struct {
|
||||||
type BuildResult struct {
|
type BuildResult struct {
|
||||||
ID string
|
ID string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BuildCache contains information about a build cache record
|
||||||
|
type BuildCache struct {
|
||||||
|
ID string
|
||||||
|
Mutable bool
|
||||||
|
InUse bool
|
||||||
|
Size int64
|
||||||
|
|
||||||
|
CreatedAt time.Time
|
||||||
|
LastUsedAt *time.Time
|
||||||
|
UsageCount int
|
||||||
|
Parent string
|
||||||
|
Description string
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
package client // import "github.com/docker/docker/client"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BuildCancel requests the daemon to cancel ongoing build request
|
||||||
|
func (cli *Client) BuildCancel(ctx context.Context, id string) error {
|
||||||
|
query := url.Values{}
|
||||||
|
query.Set("id", id)
|
||||||
|
|
||||||
|
serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer ensureReaderClosed(serverResp)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
@ -17,21 +16,6 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// tlsClientCon holds tls information and a dialed connection.
|
|
||||||
type tlsClientCon struct {
|
|
||||||
*tls.Conn
|
|
||||||
rawConn net.Conn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *tlsClientCon) CloseWrite() error {
|
|
||||||
// Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
|
|
||||||
// on its underlying connection.
|
|
||||||
if conn, ok := c.rawConn.(types.CloseWriter); ok {
|
|
||||||
return conn.CloseWrite()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// postHijacked sends a POST request and hijacks the connection.
|
// postHijacked sends a POST request and hijacks the connection.
|
||||||
func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
|
func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
|
||||||
bodyEncoded, err := encodeData(body)
|
bodyEncoded, err := encodeData(body)
|
||||||
|
@ -54,96 +38,9 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu
|
||||||
return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err
|
return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
|
|
||||||
return tlsDialWithDialer(new(net.Dialer), network, addr, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
|
|
||||||
// order to return our custom tlsClientCon struct which holds both the tls.Conn
|
|
||||||
// object _and_ its underlying raw connection. The rationale for this is that
|
|
||||||
// we need to be able to close the write end of the connection when attaching,
|
|
||||||
// which tls.Conn does not provide.
|
|
||||||
func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
|
|
||||||
// We want the Timeout and Deadline values from dialer to cover the
|
|
||||||
// whole process: TCP connection and TLS handshake. This means that we
|
|
||||||
// also need to start our own timers now.
|
|
||||||
timeout := dialer.Timeout
|
|
||||||
|
|
||||||
if !dialer.Deadline.IsZero() {
|
|
||||||
deadlineTimeout := time.Until(dialer.Deadline)
|
|
||||||
if timeout == 0 || deadlineTimeout < timeout {
|
|
||||||
timeout = deadlineTimeout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var errChannel chan error
|
|
||||||
|
|
||||||
if timeout != 0 {
|
|
||||||
errChannel = make(chan error, 2)
|
|
||||||
time.AfterFunc(timeout, func() {
|
|
||||||
errChannel <- errors.New("")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
proxyDialer, err := sockets.DialerFromEnvironment(dialer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rawConn, err := proxyDialer.Dial(network, addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// When we set up a TCP connection for hijack, there could be long periods
|
|
||||||
// of inactivity (a long running command with no output) that in certain
|
|
||||||
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
|
|
||||||
// state. Setting TCP KeepAlive on the socket connection will prohibit
|
|
||||||
// ECONNTIMEOUT unless the socket connection truly is broken
|
|
||||||
if tcpConn, ok := rawConn.(*net.TCPConn); ok {
|
|
||||||
tcpConn.SetKeepAlive(true)
|
|
||||||
tcpConn.SetKeepAlivePeriod(30 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
colonPos := strings.LastIndex(addr, ":")
|
|
||||||
if colonPos == -1 {
|
|
||||||
colonPos = len(addr)
|
|
||||||
}
|
|
||||||
hostname := addr[:colonPos]
|
|
||||||
|
|
||||||
// If no ServerName is set, infer the ServerName
|
|
||||||
// from the hostname we're connecting to.
|
|
||||||
if config.ServerName == "" {
|
|
||||||
// Make a copy to avoid polluting argument or default.
|
|
||||||
config = tlsConfigClone(config)
|
|
||||||
config.ServerName = hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
conn := tls.Client(rawConn, config)
|
|
||||||
|
|
||||||
if timeout == 0 {
|
|
||||||
err = conn.Handshake()
|
|
||||||
} else {
|
|
||||||
go func() {
|
|
||||||
errChannel <- conn.Handshake()
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = <-errChannel
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
rawConn.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is Docker difference with standard's crypto/tls package: returned a
|
|
||||||
// wrapper which holds both the TLS and raw connections.
|
|
||||||
return &tlsClientCon{conn, rawConn}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
|
func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
|
||||||
if tlsConfig != nil && proto != "unix" && proto != "npipe" {
|
if tlsConfig != nil && proto != "unix" && proto != "npipe" {
|
||||||
// Notice this isn't Go standard's tls.Dial function
|
return tls.Dial(proto, addr, tlsConfig)
|
||||||
return tlsDial(proto, addr, tlsConfig)
|
|
||||||
}
|
}
|
||||||
if proto == "npipe" {
|
if proto == "npipe" {
|
||||||
return sockets.DialPipe(addr, 32*time.Second)
|
return sockets.DialPipe(addr, 32*time.Second)
|
||||||
|
|
|
@ -133,5 +133,9 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur
|
||||||
if options.Platform != "" {
|
if options.Platform != "" {
|
||||||
query.Set("platform", strings.ToLower(options.Platform))
|
query.Set("platform", strings.ToLower(options.Platform))
|
||||||
}
|
}
|
||||||
|
if options.BuildID != "" {
|
||||||
|
query.Set("buildid", options.BuildID)
|
||||||
|
}
|
||||||
|
query.Set("version", string(options.Version))
|
||||||
return query, nil
|
return query, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,6 +86,7 @@ type DistributionAPIClient interface {
|
||||||
type ImageAPIClient interface {
|
type ImageAPIClient interface {
|
||||||
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
|
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
|
||||||
BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error)
|
BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error)
|
||||||
|
BuildCancel(ctx context.Context, id string) error
|
||||||
ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
|
ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
|
||||||
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
|
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
|
||||||
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
|
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package client // import "github.com/docker/docker/client"
|
|
||||||
|
|
||||||
import "crypto/tls"
|
|
||||||
|
|
||||||
// tlsConfigClone returns a clone of tls.Config. This function is provided for
|
|
||||||
// compatibility for go1.7 that doesn't include this method in stdlib.
|
|
||||||
func tlsConfigClone(c *tls.Config) *tls.Config {
|
|
||||||
return c.Clone()
|
|
||||||
}
|
|
|
@ -1,33 +0,0 @@
|
||||||
// +build go1.7,!go1.8
|
|
||||||
|
|
||||||
package client // import "github.com/docker/docker/client"
|
|
||||||
|
|
||||||
import "crypto/tls"
|
|
||||||
|
|
||||||
// tlsConfigClone returns a clone of tls.Config. This function is provided for
|
|
||||||
// compatibility for go1.7 that doesn't include this method in stdlib.
|
|
||||||
func tlsConfigClone(c *tls.Config) *tls.Config {
|
|
||||||
return &tls.Config{
|
|
||||||
Rand: c.Rand,
|
|
||||||
Time: c.Time,
|
|
||||||
Certificates: c.Certificates,
|
|
||||||
NameToCertificate: c.NameToCertificate,
|
|
||||||
GetCertificate: c.GetCertificate,
|
|
||||||
RootCAs: c.RootCAs,
|
|
||||||
NextProtos: c.NextProtos,
|
|
||||||
ServerName: c.ServerName,
|
|
||||||
ClientAuth: c.ClientAuth,
|
|
||||||
ClientCAs: c.ClientCAs,
|
|
||||||
InsecureSkipVerify: c.InsecureSkipVerify,
|
|
||||||
CipherSuites: c.CipherSuites,
|
|
||||||
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
|
||||||
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
|
||||||
SessionTicketKey: c.SessionTicketKey,
|
|
||||||
ClientSessionCache: c.ClientSessionCache,
|
|
||||||
MinVersion: c.MinVersion,
|
|
||||||
MaxVersion: c.MaxVersion,
|
|
||||||
CurvePreferences: c.CurvePreferences,
|
|
||||||
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
|
|
||||||
Renegotiation: c.Renegotiation,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -23,19 +23,22 @@ github.com/gotestyourself/gotestyourself cf3a5ab914a2efa8bc838d09f5918c1d44d029
|
||||||
github.com/google/go-cmp v0.2.0
|
github.com/google/go-cmp v0.2.0
|
||||||
|
|
||||||
github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
|
github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
|
||||||
github.com/imdario/mergo 0.2.1
|
github.com/imdario/mergo v0.3.5
|
||||||
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
|
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
|
||||||
|
|
||||||
# buildkit
|
# buildkit
|
||||||
github.com/moby/buildkit 43e758232a0ac7d50c6a11413186e16684fc1e4f
|
github.com/moby/buildkit b062a2d8ddbaa477c25c63d68a9cffbb43f6e474
|
||||||
github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f
|
github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
|
||||||
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
||||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||||
|
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
|
||||||
|
github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
|
||||||
|
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
|
||||||
|
|
||||||
#get libnetwork packages
|
#get libnetwork packages
|
||||||
|
|
||||||
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly
|
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly
|
||||||
github.com/docker/libnetwork 3931ba4d815e385ab97093c64477b82f14dadefb
|
github.com/docker/libnetwork 19279f0492417475b6bfbd0aa529f73e8f178fb5
|
||||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||||
|
@ -72,8 +75,8 @@ github.com/pborman/uuid v1.0
|
||||||
|
|
||||||
google.golang.org/grpc v1.12.0
|
google.golang.org/grpc v1.12.0
|
||||||
|
|
||||||
# When updating, also update RUNC_COMMIT in hack/dockerfile/install/runc accordingly
|
# This does not need to match RUNC_COMMIT as it is used for helper packages but should be newer or equal
|
||||||
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
|
github.com/opencontainers/runc 0e561642f81e84ebd0b3afd6ec510c75a2ccb71b
|
||||||
github.com/opencontainers/runtime-spec v1.0.1
|
github.com/opencontainers/runtime-spec v1.0.1
|
||||||
github.com/opencontainers/image-spec v1.0.1
|
github.com/opencontainers/image-spec v1.0.1
|
||||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||||
|
@ -112,11 +115,11 @@ github.com/googleapis/gax-go v2.0.0
|
||||||
google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
|
google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
|
||||||
|
|
||||||
# containerd
|
# containerd
|
||||||
github.com/containerd/containerd c7083eed5d8633d54c25fe81aa609010a4f2e495
|
github.com/containerd/containerd 63522d9eaa5a0443d225642c4b6f4f5fdedf932b
|
||||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||||
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
|
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
|
||||||
github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130
|
github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130
|
||||||
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
|
github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08
|
||||||
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd
|
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd
|
||||||
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
|
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
|
||||||
github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577
|
github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577
|
||||||
|
@ -131,7 +134,7 @@ github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b65068
|
||||||
golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491
|
golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491
|
||||||
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
|
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
|
||||||
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
|
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
|
||||||
github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
|
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
|
||||||
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
||||||
github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
|
github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8
|
||||||
github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
|
github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,2 @@
|
||||||
|
go-shlex is a simple lexer for go that supports shell-style quoting,
|
||||||
|
commenting, and escaping.
|
|
@ -0,0 +1,417 @@
|
||||||
|
/*
|
||||||
|
Copyright 2012 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package shlex implements a simple lexer which splits input in to tokens using
|
||||||
|
shell-style rules for quoting and commenting.
|
||||||
|
|
||||||
|
The basic use case uses the default ASCII lexer to split a string into sub-strings:
|
||||||
|
|
||||||
|
shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
|
||||||
|
|
||||||
|
To process a stream of strings:
|
||||||
|
|
||||||
|
l := NewLexer(os.Stdin)
|
||||||
|
for ; token, err := l.Next(); err != nil {
|
||||||
|
// process token
|
||||||
|
}
|
||||||
|
|
||||||
|
To access the raw token stream (which includes tokens for comments):
|
||||||
|
|
||||||
|
t := NewTokenizer(os.Stdin)
|
||||||
|
for ; token, err := t.Next(); err != nil {
|
||||||
|
// process token
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
package shlex
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TokenType is a top-level token classification: A word, space, comment, unknown.
|
||||||
|
type TokenType int
|
||||||
|
|
||||||
|
// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape.
|
||||||
|
type runeTokenClass int
|
||||||
|
|
||||||
|
// the internal state used by the lexer state machine
|
||||||
|
type lexerState int
|
||||||
|
|
||||||
|
// Token is a (type, value) pair representing a lexographical token.
|
||||||
|
type Token struct {
|
||||||
|
tokenType TokenType
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal reports whether tokens a, and b, are equal.
|
||||||
|
// Two tokens are equal if both their types and values are equal. A nil token can
|
||||||
|
// never be equal to another token.
|
||||||
|
func (a *Token) Equal(b *Token) bool {
|
||||||
|
if a == nil || b == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a.tokenType != b.tokenType {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return a.value == b.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Named classes of UTF-8 runes
|
||||||
|
const (
|
||||||
|
spaceRunes = " \t\r\n"
|
||||||
|
escapingQuoteRunes = `"`
|
||||||
|
nonEscapingQuoteRunes = "'"
|
||||||
|
escapeRunes = `\`
|
||||||
|
commentRunes = "#"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Classes of rune token
|
||||||
|
const (
|
||||||
|
unknownRuneClass runeTokenClass = iota
|
||||||
|
spaceRuneClass
|
||||||
|
escapingQuoteRuneClass
|
||||||
|
nonEscapingQuoteRuneClass
|
||||||
|
escapeRuneClass
|
||||||
|
commentRuneClass
|
||||||
|
eofRuneClass
|
||||||
|
)
|
||||||
|
|
||||||
|
// Classes of lexographic token
|
||||||
|
const (
|
||||||
|
UnknownToken TokenType = iota
|
||||||
|
WordToken
|
||||||
|
SpaceToken
|
||||||
|
CommentToken
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lexer state machine states
|
||||||
|
const (
|
||||||
|
startState lexerState = iota // no runes have been seen
|
||||||
|
inWordState // processing regular runes in a word
|
||||||
|
escapingState // we have just consumed an escape rune; the next rune is literal
|
||||||
|
escapingQuotedState // we have just consumed an escape rune within a quoted string
|
||||||
|
quotingEscapingState // we are within a quoted string that supports escaping ("...")
|
||||||
|
quotingState // we are within a string that does not support escaping ('...')
|
||||||
|
commentState // we are within a comment (everything following an unquoted or unescaped #
|
||||||
|
)
|
||||||
|
|
||||||
|
// tokenClassifier is used for classifying rune characters.
|
||||||
|
type tokenClassifier map[rune]runeTokenClass
|
||||||
|
|
||||||
|
func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) {
|
||||||
|
for _, runeChar := range runes {
|
||||||
|
typeMap[runeChar] = tokenType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newDefaultClassifier creates a new classifier for ASCII characters.
|
||||||
|
func newDefaultClassifier() tokenClassifier {
|
||||||
|
t := tokenClassifier{}
|
||||||
|
t.addRuneClass(spaceRunes, spaceRuneClass)
|
||||||
|
t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass)
|
||||||
|
t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass)
|
||||||
|
t.addRuneClass(escapeRunes, escapeRuneClass)
|
||||||
|
t.addRuneClass(commentRunes, commentRuneClass)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClassifyRune classifiees a rune
|
||||||
|
func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass {
|
||||||
|
return t[runeVal]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped.
|
||||||
|
type Lexer Tokenizer
|
||||||
|
|
||||||
|
// NewLexer creates a new lexer from an input stream.
|
||||||
|
func NewLexer(r io.Reader) *Lexer {
|
||||||
|
|
||||||
|
return (*Lexer)(NewTokenizer(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next word, or an error. If there are no more words,
|
||||||
|
// the error will be io.EOF.
|
||||||
|
func (l *Lexer) Next() (string, error) {
|
||||||
|
for {
|
||||||
|
token, err := (*Tokenizer)(l).Next()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
switch token.tokenType {
|
||||||
|
case WordToken:
|
||||||
|
return token.value, nil
|
||||||
|
case CommentToken:
|
||||||
|
// skip comments
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("Unknown token type: %v", token.tokenType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tokenizer turns an input stream into a sequence of typed tokens
|
||||||
|
type Tokenizer struct {
|
||||||
|
input bufio.Reader
|
||||||
|
classifier tokenClassifier
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTokenizer creates a new tokenizer from an input stream.
|
||||||
|
func NewTokenizer(r io.Reader) *Tokenizer {
|
||||||
|
input := bufio.NewReader(r)
|
||||||
|
classifier := newDefaultClassifier()
|
||||||
|
return &Tokenizer{
|
||||||
|
input: *input,
|
||||||
|
classifier: classifier}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanStream scans the stream for the next token using the internal state machine.
|
||||||
|
// It will panic if it encounters a rune which it does not know how to handle.
|
||||||
|
func (t *Tokenizer) scanStream() (*Token, error) {
|
||||||
|
state := startState
|
||||||
|
var tokenType TokenType
|
||||||
|
var value []rune
|
||||||
|
var nextRune rune
|
||||||
|
var nextRuneType runeTokenClass
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for {
|
||||||
|
nextRune, _, err = t.input.ReadRune()
|
||||||
|
nextRuneType = t.classifier.ClassifyRune(nextRune)
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
nextRuneType = eofRuneClass
|
||||||
|
err = nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch state {
|
||||||
|
case startState: // no runes read yet
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
case spaceRuneClass:
|
||||||
|
{
|
||||||
|
}
|
||||||
|
case escapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
tokenType = WordToken
|
||||||
|
state = quotingEscapingState
|
||||||
|
}
|
||||||
|
case nonEscapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
tokenType = WordToken
|
||||||
|
state = quotingState
|
||||||
|
}
|
||||||
|
case escapeRuneClass:
|
||||||
|
{
|
||||||
|
tokenType = WordToken
|
||||||
|
state = escapingState
|
||||||
|
}
|
||||||
|
case commentRuneClass:
|
||||||
|
{
|
||||||
|
tokenType = CommentToken
|
||||||
|
state = commentState
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
tokenType = WordToken
|
||||||
|
value = append(value, nextRune)
|
||||||
|
state = inWordState
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case inWordState: // in a regular word
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case spaceRuneClass:
|
||||||
|
{
|
||||||
|
t.input.UnreadRune()
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case escapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
state = quotingEscapingState
|
||||||
|
}
|
||||||
|
case nonEscapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
state = quotingState
|
||||||
|
}
|
||||||
|
case escapeRuneClass:
|
||||||
|
{
|
||||||
|
state = escapingState
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case escapingState: // the rune after an escape character
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
err = fmt.Errorf("EOF found after escape character")
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
state = inWordState
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case escapingQuotedState: // the next rune after an escape character, in double quotes
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
err = fmt.Errorf("EOF found after escape character")
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
state = quotingEscapingState
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case quotingEscapingState: // in escaping double quotes
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
err = fmt.Errorf("EOF found when expecting closing quote")
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case escapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
state = inWordState
|
||||||
|
}
|
||||||
|
case escapeRuneClass:
|
||||||
|
{
|
||||||
|
state = escapingQuotedState
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case quotingState: // in non-escaping single quotes
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
err = fmt.Errorf("EOF found when expecting closing quote")
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case nonEscapingQuoteRuneClass:
|
||||||
|
{
|
||||||
|
state = inWordState
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case commentState: // in a comment
|
||||||
|
{
|
||||||
|
switch nextRuneType {
|
||||||
|
case eofRuneClass:
|
||||||
|
{
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
}
|
||||||
|
case spaceRuneClass:
|
||||||
|
{
|
||||||
|
if nextRune == '\n' {
|
||||||
|
state = startState
|
||||||
|
token := &Token{
|
||||||
|
tokenType: tokenType,
|
||||||
|
value: string(value)}
|
||||||
|
return token, err
|
||||||
|
} else {
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
value = append(value, nextRune)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
return nil, fmt.Errorf("Unexpected state: %v", state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next token in the stream.
|
||||||
|
func (t *Tokenizer) Next() (*Token, error) {
|
||||||
|
return t.scanStream()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split partitions a string into a slice of strings.
|
||||||
|
func Split(s string) ([]string, error) {
|
||||||
|
l := NewLexer(strings.NewReader(s))
|
||||||
|
subStrings := make([]string, 0)
|
||||||
|
for {
|
||||||
|
word, err := l.Next()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return subStrings, nil
|
||||||
|
}
|
||||||
|
return subStrings, err
|
||||||
|
}
|
||||||
|
subStrings = append(subStrings, word)
|
||||||
|
}
|
||||||
|
}
|
4871
vendor/github.com/moby/buildkit/api/services/control/control.pb.go
generated
vendored
Normal file
4871
vendor/github.com/moby/buildkit/api/services/control/control.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
121
vendor/github.com/moby/buildkit/api/services/control/control.proto
generated
vendored
Normal file
121
vendor/github.com/moby/buildkit/api/services/control/control.proto
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package moby.buildkit.v1;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
import "github.com/moby/buildkit/solver/pb/ops.proto";
|
||||||
|
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
|
||||||
|
service Control {
|
||||||
|
rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse);
|
||||||
|
rpc Prune(PruneRequest) returns (stream UsageRecord);
|
||||||
|
rpc Solve(SolveRequest) returns (SolveResponse);
|
||||||
|
rpc Status(StatusRequest) returns (stream StatusResponse);
|
||||||
|
rpc Session(stream BytesMessage) returns (stream BytesMessage);
|
||||||
|
rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse);
|
||||||
|
}
|
||||||
|
|
||||||
|
message PruneRequest {
|
||||||
|
// TODO: filter
|
||||||
|
}
|
||||||
|
|
||||||
|
message DiskUsageRequest {
|
||||||
|
string filter = 1; // FIXME: this should be containerd-compatible repeated string?
|
||||||
|
}
|
||||||
|
|
||||||
|
message DiskUsageResponse {
|
||||||
|
repeated UsageRecord record = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UsageRecord {
|
||||||
|
string ID = 1;
|
||||||
|
bool Mutable = 2;
|
||||||
|
bool InUse = 3;
|
||||||
|
int64 Size = 4;
|
||||||
|
string Parent = 5;
|
||||||
|
google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true];
|
||||||
|
int64 UsageCount = 8;
|
||||||
|
string Description = 9;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SolveRequest {
|
||||||
|
string Ref = 1;
|
||||||
|
pb.Definition Definition = 2;
|
||||||
|
string Exporter = 3;
|
||||||
|
map<string, string> ExporterAttrs = 4;
|
||||||
|
string Session = 5;
|
||||||
|
string Frontend = 6;
|
||||||
|
map<string, string> FrontendAttrs = 7;
|
||||||
|
CacheOptions Cache = 8 [(gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
message CacheOptions {
|
||||||
|
string ExportRef = 1;
|
||||||
|
repeated string ImportRefs = 2;
|
||||||
|
map<string, string> ExportAttrs = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SolveResponse {
|
||||||
|
map<string, string> ExporterResponse = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StatusRequest {
|
||||||
|
string Ref = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StatusResponse {
|
||||||
|
repeated Vertex vertexes = 1;
|
||||||
|
repeated VertexStatus statuses = 2;
|
||||||
|
repeated VertexLog logs = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Vertex {
|
||||||
|
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
repeated string inputs = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
string name = 3;
|
||||||
|
bool cached = 4;
|
||||||
|
google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ];
|
||||||
|
google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ];
|
||||||
|
string error = 7; // typed errors?
|
||||||
|
}
|
||||||
|
|
||||||
|
message VertexStatus {
|
||||||
|
string ID = 1;
|
||||||
|
string vertex = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
string name = 3;
|
||||||
|
int64 current = 4;
|
||||||
|
int64 total = 5;
|
||||||
|
// TODO: add started, completed
|
||||||
|
google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ];
|
||||||
|
google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ];
|
||||||
|
}
|
||||||
|
|
||||||
|
message VertexLog {
|
||||||
|
string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||||
|
int64 stream = 3;
|
||||||
|
bytes msg = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message BytesMessage {
|
||||||
|
bytes data = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListWorkersRequest {
|
||||||
|
repeated string filter = 1; // containerd style
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListWorkersResponse {
|
||||||
|
repeated WorkerRecord record = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message WorkerRecord {
|
||||||
|
string ID = 1;
|
||||||
|
map<string, string> Labels = 2;
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
package moby_buildkit_v1
|
||||||
|
|
||||||
|
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto
|
|
@ -0,0 +1,136 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"io/ioutil"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/util/appdefaults"
|
||||||
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Client struct {
|
||||||
|
conn *grpc.ClientConn
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClientOpt interface{}
|
||||||
|
|
||||||
|
// New returns a new buildkit client. Address can be empty for the system-default address.
|
||||||
|
func New(address string, opts ...ClientOpt) (*Client, error) {
|
||||||
|
gopts := []grpc.DialOption{
|
||||||
|
grpc.WithDialer(dialer),
|
||||||
|
grpc.FailOnNonTempDialError(true),
|
||||||
|
}
|
||||||
|
needWithInsecure := true
|
||||||
|
for _, o := range opts {
|
||||||
|
if _, ok := o.(*withBlockOpt); ok {
|
||||||
|
gopts = append(gopts, grpc.WithBlock(), grpc.FailOnNonTempDialError(true))
|
||||||
|
}
|
||||||
|
if credInfo, ok := o.(*withCredentials); ok {
|
||||||
|
opt, err := loadCredentials(credInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
gopts = append(gopts, opt)
|
||||||
|
needWithInsecure = false
|
||||||
|
}
|
||||||
|
if wt, ok := o.(*withTracer); ok {
|
||||||
|
gopts = append(gopts,
|
||||||
|
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())),
|
||||||
|
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if needWithInsecure {
|
||||||
|
gopts = append(gopts, grpc.WithInsecure())
|
||||||
|
}
|
||||||
|
if address == "" {
|
||||||
|
address = appdefaults.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
conn, err := grpc.DialContext(ctx, address, gopts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
conn: conn,
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) controlClient() controlapi.ControlClient {
|
||||||
|
return controlapi.NewControlClient(c.conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Close() error {
|
||||||
|
return c.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type withBlockOpt struct{}
|
||||||
|
|
||||||
|
func WithBlock() ClientOpt {
|
||||||
|
return &withBlockOpt{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withCredentials struct {
|
||||||
|
ServerName string
|
||||||
|
CACert string
|
||||||
|
Cert string
|
||||||
|
Key string
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCredentials configures the TLS parameters of the client.
|
||||||
|
// Arguments:
|
||||||
|
// * serverName: specifies the name of the target server
|
||||||
|
// * ca: specifies the filepath of the CA certificate to use for verification
|
||||||
|
// * cert: specifies the filepath of the client certificate
|
||||||
|
// * key: specifies the filepath of the client key
|
||||||
|
func WithCredentials(serverName, ca, cert, key string) ClientOpt {
|
||||||
|
return &withCredentials{serverName, ca, cert, key}
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
|
||||||
|
ca, err := ioutil.ReadFile(opts.CACert)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not read ca certificate")
|
||||||
|
}
|
||||||
|
|
||||||
|
certPool := x509.NewCertPool()
|
||||||
|
if ok := certPool.AppendCertsFromPEM(ca); !ok {
|
||||||
|
return nil, errors.New("failed to append ca certs")
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &tls.Config{
|
||||||
|
ServerName: opts.ServerName,
|
||||||
|
RootCAs: certPool,
|
||||||
|
}
|
||||||
|
|
||||||
|
// we will produce an error if the user forgot about either cert or key if at least one is specified
|
||||||
|
if opts.Cert != "" || opts.Key != "" {
|
||||||
|
cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not read certificate/key")
|
||||||
|
}
|
||||||
|
cfg.Certificates = []tls.Certificate{cert}
|
||||||
|
cfg.BuildNameToCertificate()
|
||||||
|
}
|
||||||
|
|
||||||
|
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTracer(t opentracing.Tracer) ClientOpt {
|
||||||
|
return &withTracer{t}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withTracer struct {
|
||||||
|
tracer opentracing.Tracer
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||||
|
addrParts := strings.SplitN(address, "://", 2)
|
||||||
|
if len(addrParts) != 2 {
|
||||||
|
return nil, errors.Errorf("invalid address %s", address)
|
||||||
|
}
|
||||||
|
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
|
||||||
|
}
|
|
@ -0,0 +1,24 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Microsoft/go-winio"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||||
|
addrParts := strings.SplitN(address, "://", 2)
|
||||||
|
if len(addrParts) != 2 {
|
||||||
|
return nil, errors.Errorf("invalid address %s", address)
|
||||||
|
}
|
||||||
|
switch addrParts[0] {
|
||||||
|
case "npipe":
|
||||||
|
address = strings.Replace(addrParts[1], "/", "\\", 0)
|
||||||
|
return winio.DialPipe(address, &timeout)
|
||||||
|
default:
|
||||||
|
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UsageInfo struct {
|
||||||
|
ID string
|
||||||
|
Mutable bool
|
||||||
|
InUse bool
|
||||||
|
Size int64
|
||||||
|
|
||||||
|
CreatedAt time.Time
|
||||||
|
LastUsedAt *time.Time
|
||||||
|
UsageCount int
|
||||||
|
Parent string
|
||||||
|
Description string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {
|
||||||
|
info := &DiskUsageInfo{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &controlapi.DiskUsageRequest{Filter: info.Filter}
|
||||||
|
resp, err := c.controlClient().DiskUsage(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to call diskusage")
|
||||||
|
}
|
||||||
|
|
||||||
|
var du []*UsageInfo
|
||||||
|
|
||||||
|
for _, d := range resp.Record {
|
||||||
|
du = append(du, &UsageInfo{
|
||||||
|
ID: d.ID,
|
||||||
|
Mutable: d.Mutable,
|
||||||
|
InUse: d.InUse,
|
||||||
|
Size: d.Size_,
|
||||||
|
Parent: d.Parent,
|
||||||
|
CreatedAt: d.CreatedAt,
|
||||||
|
Description: d.Description,
|
||||||
|
UsageCount: int(d.UsageCount),
|
||||||
|
LastUsedAt: d.LastUsedAt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(du, func(i, j int) bool {
|
||||||
|
if du[i].Size == du[j].Size {
|
||||||
|
return du[i].ID > du[j].ID
|
||||||
|
}
|
||||||
|
return du[i].Size > du[j].Size
|
||||||
|
})
|
||||||
|
|
||||||
|
return du, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type DiskUsageOption func(*DiskUsageInfo)
|
||||||
|
|
||||||
|
type DiskUsageInfo struct {
|
||||||
|
Filter string
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithFilter(f string) DiskUsageOption {
|
||||||
|
return func(di *DiskUsageInfo) {
|
||||||
|
di.Filter = f
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,8 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
const (
|
||||||
|
ExporterImage = "image"
|
||||||
|
ExporterLocal = "local"
|
||||||
|
ExporterOCI = "oci"
|
||||||
|
ExporterDocker = "docker"
|
||||||
|
)
|
|
@ -0,0 +1,45 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Vertex struct {
|
||||||
|
Digest digest.Digest
|
||||||
|
Inputs []digest.Digest
|
||||||
|
Name string
|
||||||
|
Started *time.Time
|
||||||
|
Completed *time.Time
|
||||||
|
Cached bool
|
||||||
|
Error string
|
||||||
|
}
|
||||||
|
|
||||||
|
type VertexStatus struct {
|
||||||
|
ID string
|
||||||
|
Vertex digest.Digest
|
||||||
|
Name string
|
||||||
|
Total int64
|
||||||
|
Current int64
|
||||||
|
Timestamp time.Time
|
||||||
|
Started *time.Time
|
||||||
|
Completed *time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type VertexLog struct {
|
||||||
|
Vertex digest.Digest
|
||||||
|
Stream int
|
||||||
|
Data []byte
|
||||||
|
Timestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type SolveStatus struct {
|
||||||
|
Vertexes []*Vertex
|
||||||
|
Statuses []*VertexStatus
|
||||||
|
Logs []*VertexLog
|
||||||
|
}
|
||||||
|
|
||||||
|
type SolveResponse struct {
|
||||||
|
ExporterResponse map[string]string
|
||||||
|
}
|
|
@ -0,0 +1,387 @@
|
||||||
|
package llb
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "crypto/sha256"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Meta struct {
|
||||||
|
Args []string
|
||||||
|
Env EnvList
|
||||||
|
Cwd string
|
||||||
|
User string
|
||||||
|
ProxyEnv *ProxyEnv
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewExecOp(root Output, meta Meta, readOnly bool, md OpMetadata) *ExecOp {
|
||||||
|
e := &ExecOp{meta: meta, cachedOpMetadata: md}
|
||||||
|
rootMount := &mount{
|
||||||
|
target: pb.RootMount,
|
||||||
|
source: root,
|
||||||
|
readonly: readOnly,
|
||||||
|
}
|
||||||
|
e.mounts = append(e.mounts, rootMount)
|
||||||
|
if readOnly {
|
||||||
|
e.root = root
|
||||||
|
} else {
|
||||||
|
e.root = &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)}
|
||||||
|
}
|
||||||
|
rootMount.output = e.root
|
||||||
|
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
type mount struct {
|
||||||
|
target string
|
||||||
|
readonly bool
|
||||||
|
source Output
|
||||||
|
output Output
|
||||||
|
selector string
|
||||||
|
cacheID string
|
||||||
|
tmpfs bool
|
||||||
|
// hasOutput bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecOp struct {
|
||||||
|
root Output
|
||||||
|
mounts []*mount
|
||||||
|
meta Meta
|
||||||
|
cachedPBDigest digest.Digest
|
||||||
|
cachedPB []byte
|
||||||
|
cachedOpMetadata OpMetadata
|
||||||
|
isValidated bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output {
|
||||||
|
m := &mount{
|
||||||
|
target: target,
|
||||||
|
source: source,
|
||||||
|
}
|
||||||
|
for _, o := range opt {
|
||||||
|
o(m)
|
||||||
|
}
|
||||||
|
e.mounts = append(e.mounts, m)
|
||||||
|
if m.readonly {
|
||||||
|
m.output = source
|
||||||
|
} else if m.tmpfs {
|
||||||
|
m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)}
|
||||||
|
} else {
|
||||||
|
m.output = &output{vertex: e, getIndex: e.getMountIndexFn(m)}
|
||||||
|
}
|
||||||
|
e.cachedPB = nil
|
||||||
|
e.isValidated = false
|
||||||
|
return m.output
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExecOp) GetMount(target string) Output {
|
||||||
|
for _, m := range e.mounts {
|
||||||
|
if m.target == target {
|
||||||
|
return m.output
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExecOp) Validate() error {
|
||||||
|
if e.isValidated {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(e.meta.Args) == 0 {
|
||||||
|
return errors.Errorf("arguments are required")
|
||||||
|
}
|
||||||
|
if e.meta.Cwd == "" {
|
||||||
|
return errors.Errorf("working directory is required")
|
||||||
|
}
|
||||||
|
for _, m := range e.mounts {
|
||||||
|
if m.source != nil {
|
||||||
|
if err := m.source.Vertex().Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.isValidated = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExecOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
|
||||||
|
if e.cachedPB != nil {
|
||||||
|
return e.cachedPBDigest, e.cachedPB, &e.cachedOpMetadata, nil
|
||||||
|
}
|
||||||
|
if err := e.Validate(); err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
// make sure mounts are sorted
|
||||||
|
sort.Slice(e.mounts, func(i, j int) bool {
|
||||||
|
return e.mounts[i].target < e.mounts[j].target
|
||||||
|
})
|
||||||
|
|
||||||
|
peo := &pb.ExecOp{
|
||||||
|
Meta: &pb.Meta{
|
||||||
|
Args: e.meta.Args,
|
||||||
|
Env: e.meta.Env.ToArray(),
|
||||||
|
Cwd: e.meta.Cwd,
|
||||||
|
User: e.meta.User,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if p := e.meta.ProxyEnv; p != nil {
|
||||||
|
peo.Meta.ProxyEnv = &pb.ProxyEnv{
|
||||||
|
HttpProxy: p.HttpProxy,
|
||||||
|
HttpsProxy: p.HttpsProxy,
|
||||||
|
FtpProxy: p.FtpProxy,
|
||||||
|
NoProxy: p.NoProxy,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pop := &pb.Op{
|
||||||
|
Op: &pb.Op_Exec{
|
||||||
|
Exec: peo,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
outIndex := 0
|
||||||
|
for _, m := range e.mounts {
|
||||||
|
inputIndex := pb.InputIndex(len(pop.Inputs))
|
||||||
|
if m.source != nil {
|
||||||
|
if m.tmpfs {
|
||||||
|
return "", nil, nil, errors.Errorf("tmpfs mounts must use scratch")
|
||||||
|
}
|
||||||
|
inp, err := m.source.ToInput()
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newInput := true
|
||||||
|
|
||||||
|
for i, inp2 := range pop.Inputs {
|
||||||
|
if *inp == *inp2 {
|
||||||
|
inputIndex = pb.InputIndex(i)
|
||||||
|
newInput = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if newInput {
|
||||||
|
pop.Inputs = append(pop.Inputs, inp)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
inputIndex = pb.Empty
|
||||||
|
}
|
||||||
|
|
||||||
|
outputIndex := pb.OutputIndex(-1)
|
||||||
|
if !m.readonly && m.cacheID == "" && !m.tmpfs {
|
||||||
|
outputIndex = pb.OutputIndex(outIndex)
|
||||||
|
outIndex++
|
||||||
|
}
|
||||||
|
|
||||||
|
pm := &pb.Mount{
|
||||||
|
Input: inputIndex,
|
||||||
|
Dest: m.target,
|
||||||
|
Readonly: m.readonly,
|
||||||
|
Output: outputIndex,
|
||||||
|
Selector: m.selector,
|
||||||
|
}
|
||||||
|
if m.cacheID != "" {
|
||||||
|
pm.MountType = pb.MountType_CACHE
|
||||||
|
pm.CacheOpt = &pb.CacheOpt{
|
||||||
|
ID: m.cacheID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m.tmpfs {
|
||||||
|
pm.MountType = pb.MountType_TMPFS
|
||||||
|
}
|
||||||
|
peo.Mounts = append(peo.Mounts, pm)
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, err := pop.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
e.cachedPBDigest = digest.FromBytes(dt)
|
||||||
|
e.cachedPB = dt
|
||||||
|
return e.cachedPBDigest, dt, &e.cachedOpMetadata, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExecOp) Output() Output {
|
||||||
|
return e.root
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExecOp) Inputs() (inputs []Output) {
|
||||||
|
mm := map[Output]struct{}{}
|
||||||
|
for _, m := range e.mounts {
|
||||||
|
if m.source != nil {
|
||||||
|
mm[m.source] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for o := range mm {
|
||||||
|
inputs = append(inputs, o)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) {
|
||||||
|
return func() (pb.OutputIndex, error) {
|
||||||
|
// make sure mounts are sorted
|
||||||
|
sort.Slice(e.mounts, func(i, j int) bool {
|
||||||
|
return e.mounts[i].target < e.mounts[j].target
|
||||||
|
})
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for _, m2 := range e.mounts {
|
||||||
|
if m2.readonly || m2.cacheID != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if m == m2 {
|
||||||
|
return pb.OutputIndex(i), nil
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return pb.OutputIndex(0), errors.Errorf("invalid mount: %s", m.target)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecState struct {
|
||||||
|
State
|
||||||
|
exec *ExecOp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ExecState) AddMount(target string, source State, opt ...MountOption) State {
|
||||||
|
return source.WithOutput(e.exec.AddMount(target, source.Output(), opt...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ExecState) GetMount(target string) State {
|
||||||
|
return NewState(e.exec.GetMount(target))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ExecState) Root() State {
|
||||||
|
return e.State
|
||||||
|
}
|
||||||
|
|
||||||
|
type MountOption func(*mount)
|
||||||
|
|
||||||
|
func Readonly(m *mount) {
|
||||||
|
m.readonly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func SourcePath(src string) MountOption {
|
||||||
|
return func(m *mount) {
|
||||||
|
m.selector = src
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func AsPersistentCacheDir(id string) MountOption {
|
||||||
|
return func(m *mount) {
|
||||||
|
m.cacheID = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Tmpfs() MountOption {
|
||||||
|
return func(m *mount) {
|
||||||
|
m.tmpfs = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunOption interface {
|
||||||
|
SetRunOption(es *ExecInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
type runOptionFunc func(*ExecInfo)
|
||||||
|
|
||||||
|
func (fn runOptionFunc) SetRunOption(ei *ExecInfo) {
|
||||||
|
fn(ei)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Shlex(str string) RunOption {
|
||||||
|
return Shlexf(str)
|
||||||
|
}
|
||||||
|
func Shlexf(str string, v ...interface{}) RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.State = shlexf(str, v...)(ei.State)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Args(a []string) RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.State = args(a...)(ei.State)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func AddEnv(key, value string) RunOption {
|
||||||
|
return AddEnvf(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func AddEnvf(key, value string, v ...interface{}) RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.State = ei.State.AddEnvf(key, value, v...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func User(str string) RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.State = ei.State.User(str)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Dir(str string) RunOption {
|
||||||
|
return Dirf(str)
|
||||||
|
}
|
||||||
|
func Dirf(str string, v ...interface{}) RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.State = ei.State.Dirf(str, v...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Reset(s State) RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.State = ei.State.Reset(s)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func With(so ...StateOption) RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.State = ei.State.With(so...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func AddMount(dest string, mountState State, opts ...MountOption) RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.Mounts = append(ei.Mounts, MountInfo{dest, mountState.Output(), opts})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadonlyRootFS() RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.ReadonlyRootFS = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithProxy(ps ProxyEnv) RunOption {
|
||||||
|
return runOptionFunc(func(ei *ExecInfo) {
|
||||||
|
ei.ProxyEnv = &ps
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecInfo struct {
|
||||||
|
opMetaWrapper
|
||||||
|
State State
|
||||||
|
Mounts []MountInfo
|
||||||
|
ReadonlyRootFS bool
|
||||||
|
ProxyEnv *ProxyEnv
|
||||||
|
}
|
||||||
|
|
||||||
|
type MountInfo struct {
|
||||||
|
Target string
|
||||||
|
Source Output
|
||||||
|
Opts []MountOption
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProxyEnv struct {
|
||||||
|
HttpProxy string
|
||||||
|
HttpsProxy string
|
||||||
|
FtpProxy string
|
||||||
|
NoProxy string
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
package llb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Definition is the LLB definition structure with per-vertex metadata entries
|
||||||
|
// Corresponds to the Definition structure defined in solver/pb.Definition.
|
||||||
|
type Definition struct {
|
||||||
|
Def [][]byte
|
||||||
|
Metadata map[digest.Digest]OpMetadata
|
||||||
|
}
|
||||||
|
|
||||||
|
func (def *Definition) ToPB() *pb.Definition {
|
||||||
|
md := make(map[digest.Digest]OpMetadata)
|
||||||
|
for k, v := range def.Metadata {
|
||||||
|
md[k] = v
|
||||||
|
}
|
||||||
|
return &pb.Definition{
|
||||||
|
Def: def.Def,
|
||||||
|
Metadata: md,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (def *Definition) FromPB(x *pb.Definition) {
|
||||||
|
def.Def = x.Def
|
||||||
|
def.Metadata = make(map[digest.Digest]OpMetadata)
|
||||||
|
for k, v := range x.Metadata {
|
||||||
|
def.Metadata[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type OpMetadata = pb.OpMetadata
|
||||||
|
|
||||||
|
func WriteTo(def *Definition, w io.Writer) error {
|
||||||
|
b, err := def.ToPB().Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = w.Write(b)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadFrom(r io.Reader) (*Definition, error) {
|
||||||
|
b, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var pbDef pb.Definition
|
||||||
|
if err := pbDef.Unmarshal(b); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var def Definition
|
||||||
|
def.FromPB(&pbDef)
|
||||||
|
return &def, nil
|
||||||
|
}
|
|
@ -0,0 +1,152 @@
|
||||||
|
package llb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/google/shlex"
|
||||||
|
)
|
||||||
|
|
||||||
|
type contextKeyT string
|
||||||
|
|
||||||
|
var (
|
||||||
|
keyArgs = contextKeyT("llb.exec.args")
|
||||||
|
keyDir = contextKeyT("llb.exec.dir")
|
||||||
|
keyEnv = contextKeyT("llb.exec.env")
|
||||||
|
keyUser = contextKeyT("llb.exec.user")
|
||||||
|
)
|
||||||
|
|
||||||
|
func addEnv(key, value string) StateOption {
|
||||||
|
return addEnvf(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addEnvf(key, value string, v ...interface{}) StateOption {
|
||||||
|
return func(s State) State {
|
||||||
|
return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, fmt.Sprintf(value, v...)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dir(str string) StateOption {
|
||||||
|
return dirf(str)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dirf(str string, v ...interface{}) StateOption {
|
||||||
|
return func(s State) State {
|
||||||
|
value := fmt.Sprintf(str, v...)
|
||||||
|
if !path.IsAbs(value) {
|
||||||
|
prev := getDir(s)
|
||||||
|
if prev == "" {
|
||||||
|
prev = "/"
|
||||||
|
}
|
||||||
|
value = path.Join(prev, value)
|
||||||
|
}
|
||||||
|
return s.WithValue(keyDir, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func user(str string) StateOption {
|
||||||
|
return func(s State) State {
|
||||||
|
return s.WithValue(keyUser, str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func reset(s_ State) StateOption {
|
||||||
|
return func(s State) State {
|
||||||
|
s = NewState(s.Output())
|
||||||
|
s.ctx = s_.ctx
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEnv(s State) EnvList {
|
||||||
|
v := s.Value(keyEnv)
|
||||||
|
if v != nil {
|
||||||
|
return v.(EnvList)
|
||||||
|
}
|
||||||
|
return EnvList{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDir(s State) string {
|
||||||
|
v := s.Value(keyDir)
|
||||||
|
if v != nil {
|
||||||
|
return v.(string)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func getArgs(s State) []string {
|
||||||
|
v := s.Value(keyArgs)
|
||||||
|
if v != nil {
|
||||||
|
return v.([]string)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUser(s State) string {
|
||||||
|
v := s.Value(keyUser)
|
||||||
|
if v != nil {
|
||||||
|
return v.(string)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func args(args ...string) StateOption {
|
||||||
|
return func(s State) State {
|
||||||
|
return s.WithValue(keyArgs, args)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func shlexf(str string, v ...interface{}) StateOption {
|
||||||
|
return func(s State) State {
|
||||||
|
arg, err := shlex.Split(fmt.Sprintf(str, v...))
|
||||||
|
if err != nil {
|
||||||
|
// TODO: handle error
|
||||||
|
}
|
||||||
|
return args(arg...)(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type EnvList []KeyValue
|
||||||
|
|
||||||
|
type KeyValue struct {
|
||||||
|
key string
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e EnvList) AddOrReplace(k, v string) EnvList {
|
||||||
|
e = e.Delete(k)
|
||||||
|
e = append(e, KeyValue{key: k, value: v})
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e EnvList) Delete(k string) EnvList {
|
||||||
|
e = append([]KeyValue(nil), e...)
|
||||||
|
if i, ok := e.Index(k); ok {
|
||||||
|
return append(e[:i], e[i+1:]...)
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e EnvList) Get(k string) (string, bool) {
|
||||||
|
if index, ok := e.Index(k); ok {
|
||||||
|
return e[index].value, true
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e EnvList) Index(k string) (int, bool) {
|
||||||
|
for i, kv := range e {
|
||||||
|
if kv.key == k {
|
||||||
|
return i, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e EnvList) ToArray() []string {
|
||||||
|
out := make([]string, 0, len(e))
|
||||||
|
for _, kv := range e {
|
||||||
|
out = append(out, kv.key+"="+kv.value)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
package llb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func WithMetaResolver(mr ImageMetaResolver) ImageOption {
|
||||||
|
return ImageOptionFunc(func(ii *ImageInfo) {
|
||||||
|
ii.metaResolver = mr
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImageMetaResolver interface {
|
||||||
|
ResolveImageConfig(ctx context.Context, ref string) (digest.Digest, []byte, error)
|
||||||
|
}
|
|
@ -0,0 +1,359 @@
|
||||||
|
package llb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
_ "crypto/sha256"
|
||||||
|
"encoding/json"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SourceOp struct {
|
||||||
|
id string
|
||||||
|
attrs map[string]string
|
||||||
|
output Output
|
||||||
|
cachedPBDigest digest.Digest
|
||||||
|
cachedPB []byte
|
||||||
|
cachedOpMetadata OpMetadata
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSource(id string, attrs map[string]string, md OpMetadata) *SourceOp {
|
||||||
|
s := &SourceOp{
|
||||||
|
id: id,
|
||||||
|
attrs: attrs,
|
||||||
|
cachedOpMetadata: md,
|
||||||
|
}
|
||||||
|
s.output = &output{vertex: s}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SourceOp) Validate() error {
|
||||||
|
if s.err != nil {
|
||||||
|
return s.err
|
||||||
|
}
|
||||||
|
if s.id == "" {
|
||||||
|
return errors.Errorf("source identifier can't be empty")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SourceOp) Marshal() (digest.Digest, []byte, *OpMetadata, error) {
|
||||||
|
if s.cachedPB != nil {
|
||||||
|
return s.cachedPBDigest, s.cachedPB, &s.cachedOpMetadata, nil
|
||||||
|
}
|
||||||
|
if err := s.Validate(); err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
proto := &pb.Op{
|
||||||
|
Op: &pb.Op_Source{
|
||||||
|
Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
dt, err := proto.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
s.cachedPB = dt
|
||||||
|
s.cachedPBDigest = digest.FromBytes(dt)
|
||||||
|
return s.cachedPBDigest, dt, &s.cachedOpMetadata, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SourceOp) Output() Output {
|
||||||
|
return s.output
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SourceOp) Inputs() []Output {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Source(id string) State {
|
||||||
|
return NewState(NewSource(id, nil, OpMetadata{}).Output())
|
||||||
|
}
|
||||||
|
|
||||||
|
func Image(ref string, opts ...ImageOption) State {
|
||||||
|
r, err := reference.ParseNormalizedNamed(ref)
|
||||||
|
if err == nil {
|
||||||
|
ref = reference.TagNameOnly(r).String()
|
||||||
|
}
|
||||||
|
var info ImageInfo
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetImageOption(&info)
|
||||||
|
}
|
||||||
|
src := NewSource("docker-image://"+ref, nil, info.Metadata()) // controversial
|
||||||
|
if err != nil {
|
||||||
|
src.err = err
|
||||||
|
}
|
||||||
|
if info.metaResolver != nil {
|
||||||
|
_, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref)
|
||||||
|
if err != nil {
|
||||||
|
src.err = err
|
||||||
|
} else {
|
||||||
|
var img struct {
|
||||||
|
Config struct {
|
||||||
|
Env []string `json:"Env,omitempty"`
|
||||||
|
WorkingDir string `json:"WorkingDir,omitempty"`
|
||||||
|
User string `json:"User,omitempty"`
|
||||||
|
} `json:"config,omitempty"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(dt, &img); err != nil {
|
||||||
|
src.err = err
|
||||||
|
} else {
|
||||||
|
st := NewState(src.Output())
|
||||||
|
for _, env := range img.Config.Env {
|
||||||
|
parts := strings.SplitN(env, "=", 2)
|
||||||
|
if len(parts[0]) > 0 {
|
||||||
|
var v string
|
||||||
|
if len(parts) > 1 {
|
||||||
|
v = parts[1]
|
||||||
|
}
|
||||||
|
st = st.AddEnv(parts[0], v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
st = st.Dir(img.Config.WorkingDir)
|
||||||
|
return st
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NewState(src.Output())
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImageOption interface {
|
||||||
|
SetImageOption(*ImageInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImageOptionFunc func(*ImageInfo)
|
||||||
|
|
||||||
|
func (fn ImageOptionFunc) SetImageOption(ii *ImageInfo) {
|
||||||
|
fn(ii)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImageInfo struct {
|
||||||
|
opMetaWrapper
|
||||||
|
metaResolver ImageMetaResolver
|
||||||
|
}
|
||||||
|
|
||||||
|
func Git(remote, ref string, opts ...GitOption) State {
|
||||||
|
url := ""
|
||||||
|
|
||||||
|
for _, prefix := range []string{
|
||||||
|
"http://", "https://", "git://", "git@",
|
||||||
|
} {
|
||||||
|
if strings.HasPrefix(remote, prefix) {
|
||||||
|
url = strings.Split(remote, "#")[0]
|
||||||
|
remote = strings.TrimPrefix(remote, prefix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
id := remote
|
||||||
|
|
||||||
|
if ref != "" {
|
||||||
|
id += "#" + ref
|
||||||
|
}
|
||||||
|
|
||||||
|
gi := &GitInfo{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o.SetGitOption(gi)
|
||||||
|
}
|
||||||
|
attrs := map[string]string{}
|
||||||
|
if gi.KeepGitDir {
|
||||||
|
attrs[pb.AttrKeepGitDir] = "true"
|
||||||
|
}
|
||||||
|
if url != "" {
|
||||||
|
attrs[pb.AttrFullRemoteURL] = url
|
||||||
|
}
|
||||||
|
source := NewSource("git://"+id, attrs, gi.Metadata())
|
||||||
|
return NewState(source.Output())
|
||||||
|
}
|
||||||
|
|
||||||
|
type GitOption interface {
|
||||||
|
SetGitOption(*GitInfo)
|
||||||
|
}
|
||||||
|
type gitOptionFunc func(*GitInfo)
|
||||||
|
|
||||||
|
func (fn gitOptionFunc) SetGitOption(gi *GitInfo) {
|
||||||
|
fn(gi)
|
||||||
|
}
|
||||||
|
|
||||||
|
type GitInfo struct {
|
||||||
|
opMetaWrapper
|
||||||
|
KeepGitDir bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func KeepGitDir() GitOption {
|
||||||
|
return gitOptionFunc(func(gi *GitInfo) {
|
||||||
|
gi.KeepGitDir = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Scratch() State {
|
||||||
|
return NewState(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Local(name string, opts ...LocalOption) State {
|
||||||
|
gi := &LocalInfo{}
|
||||||
|
|
||||||
|
for _, o := range opts {
|
||||||
|
o.SetLocalOption(gi)
|
||||||
|
}
|
||||||
|
attrs := map[string]string{}
|
||||||
|
if gi.SessionID != "" {
|
||||||
|
attrs[pb.AttrLocalSessionID] = gi.SessionID
|
||||||
|
}
|
||||||
|
if gi.IncludePatterns != "" {
|
||||||
|
attrs[pb.AttrIncludePatterns] = gi.IncludePatterns
|
||||||
|
}
|
||||||
|
if gi.FollowPaths != "" {
|
||||||
|
attrs[pb.AttrFollowPaths] = gi.FollowPaths
|
||||||
|
}
|
||||||
|
if gi.ExcludePatterns != "" {
|
||||||
|
attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns
|
||||||
|
}
|
||||||
|
if gi.SharedKeyHint != "" {
|
||||||
|
attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint
|
||||||
|
}
|
||||||
|
|
||||||
|
source := NewSource("local://"+name, attrs, gi.Metadata())
|
||||||
|
return NewState(source.Output())
|
||||||
|
}
|
||||||
|
|
||||||
|
type LocalOption interface {
|
||||||
|
SetLocalOption(*LocalInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
type localOptionFunc func(*LocalInfo)
|
||||||
|
|
||||||
|
func (fn localOptionFunc) SetLocalOption(li *LocalInfo) {
|
||||||
|
fn(li)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SessionID(id string) LocalOption {
|
||||||
|
return localOptionFunc(func(li *LocalInfo) {
|
||||||
|
li.SessionID = id
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func IncludePatterns(p []string) LocalOption {
|
||||||
|
return localOptionFunc(func(li *LocalInfo) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
li.IncludePatterns = ""
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dt, _ := json.Marshal(p) // empty on error
|
||||||
|
li.IncludePatterns = string(dt)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func FollowPaths(p []string) LocalOption {
|
||||||
|
return localOptionFunc(func(li *LocalInfo) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
li.FollowPaths = ""
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dt, _ := json.Marshal(p) // empty on error
|
||||||
|
li.FollowPaths = string(dt)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExcludePatterns(p []string) LocalOption {
|
||||||
|
return localOptionFunc(func(li *LocalInfo) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
li.ExcludePatterns = ""
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dt, _ := json.Marshal(p) // empty on error
|
||||||
|
li.ExcludePatterns = string(dt)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func SharedKeyHint(h string) LocalOption {
|
||||||
|
return localOptionFunc(func(li *LocalInfo) {
|
||||||
|
li.SharedKeyHint = h
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type LocalInfo struct {
|
||||||
|
opMetaWrapper
|
||||||
|
SessionID string
|
||||||
|
IncludePatterns string
|
||||||
|
ExcludePatterns string
|
||||||
|
FollowPaths string
|
||||||
|
SharedKeyHint string
|
||||||
|
}
|
||||||
|
|
||||||
|
func HTTP(url string, opts ...HTTPOption) State {
|
||||||
|
hi := &HTTPInfo{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o.SetHTTPOption(hi)
|
||||||
|
}
|
||||||
|
attrs := map[string]string{}
|
||||||
|
if hi.Checksum != "" {
|
||||||
|
attrs[pb.AttrHTTPChecksum] = hi.Checksum.String()
|
||||||
|
}
|
||||||
|
if hi.Filename != "" {
|
||||||
|
attrs[pb.AttrHTTPFilename] = hi.Filename
|
||||||
|
}
|
||||||
|
if hi.Perm != 0 {
|
||||||
|
attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8)
|
||||||
|
}
|
||||||
|
if hi.UID != 0 {
|
||||||
|
attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID)
|
||||||
|
}
|
||||||
|
if hi.UID != 0 {
|
||||||
|
attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID)
|
||||||
|
}
|
||||||
|
|
||||||
|
source := NewSource(url, attrs, hi.Metadata())
|
||||||
|
return NewState(source.Output())
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPInfo struct {
|
||||||
|
opMetaWrapper
|
||||||
|
Checksum digest.Digest
|
||||||
|
Filename string
|
||||||
|
Perm int
|
||||||
|
UID int
|
||||||
|
GID int
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPOption interface {
|
||||||
|
SetHTTPOption(*HTTPInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpOptionFunc func(*HTTPInfo)
|
||||||
|
|
||||||
|
func (fn httpOptionFunc) SetHTTPOption(hi *HTTPInfo) {
|
||||||
|
fn(hi)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Checksum(dgst digest.Digest) HTTPOption {
|
||||||
|
return httpOptionFunc(func(hi *HTTPInfo) {
|
||||||
|
hi.Checksum = dgst
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Chmod(perm os.FileMode) HTTPOption {
|
||||||
|
return httpOptionFunc(func(hi *HTTPInfo) {
|
||||||
|
hi.Perm = int(perm) & 0777
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Filename(name string) HTTPOption {
|
||||||
|
return httpOptionFunc(func(hi *HTTPInfo) {
|
||||||
|
hi.Filename = name
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Chown(uid, gid int) HTTPOption {
|
||||||
|
return httpOptionFunc(func(hi *HTTPInfo) {
|
||||||
|
hi.UID = uid
|
||||||
|
hi.GID = gid
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,316 @@
|
||||||
|
package llb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
"github.com/moby/buildkit/util/system"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StateOption func(State) State
|
||||||
|
|
||||||
|
type Output interface {
|
||||||
|
ToInput() (*pb.Input, error)
|
||||||
|
Vertex() Vertex
|
||||||
|
}
|
||||||
|
|
||||||
|
type Vertex interface {
|
||||||
|
Validate() error
|
||||||
|
Marshal() (digest.Digest, []byte, *OpMetadata, error)
|
||||||
|
Output() Output
|
||||||
|
Inputs() []Output
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewState(o Output) State {
|
||||||
|
s := State{
|
||||||
|
out: o,
|
||||||
|
ctx: context.Background(),
|
||||||
|
}
|
||||||
|
s = dir("/")(s)
|
||||||
|
s = addEnv("PATH", system.DefaultPathEnv)(s)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type State struct {
|
||||||
|
out Output
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) WithValue(k, v interface{}) State {
|
||||||
|
return State{
|
||||||
|
out: s.out,
|
||||||
|
ctx: context.WithValue(s.ctx, k, v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) Value(k interface{}) interface{} {
|
||||||
|
return s.ctx.Value(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) Marshal(md ...MetadataOpt) (*Definition, error) {
|
||||||
|
def := &Definition{
|
||||||
|
Metadata: make(map[digest.Digest]OpMetadata, 0),
|
||||||
|
}
|
||||||
|
if s.Output() == nil {
|
||||||
|
return def, nil
|
||||||
|
}
|
||||||
|
def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, md)
|
||||||
|
if err != nil {
|
||||||
|
return def, err
|
||||||
|
}
|
||||||
|
inp, err := s.Output().ToInput()
|
||||||
|
if err != nil {
|
||||||
|
return def, err
|
||||||
|
}
|
||||||
|
proto := &pb.Op{Inputs: []*pb.Input{inp}}
|
||||||
|
dt, err := proto.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return def, err
|
||||||
|
}
|
||||||
|
def.Def = append(def.Def, dt)
|
||||||
|
return def, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, md []MetadataOpt) (*Definition, error) {
|
||||||
|
if _, ok := vertexCache[v]; ok {
|
||||||
|
return def, nil
|
||||||
|
}
|
||||||
|
for _, inp := range v.Inputs() {
|
||||||
|
var err error
|
||||||
|
def, err = marshal(inp.Vertex(), def, cache, vertexCache, md)
|
||||||
|
if err != nil {
|
||||||
|
return def, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst, dt, opMeta, err := v.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return def, err
|
||||||
|
}
|
||||||
|
vertexCache[v] = struct{}{}
|
||||||
|
if opMeta != nil {
|
||||||
|
m := mergeMetadata(def.Metadata[dgst], *opMeta)
|
||||||
|
for _, f := range md {
|
||||||
|
f.SetMetadataOption(&m)
|
||||||
|
}
|
||||||
|
def.Metadata[dgst] = m
|
||||||
|
}
|
||||||
|
if _, ok := cache[dgst]; ok {
|
||||||
|
return def, nil
|
||||||
|
}
|
||||||
|
def.Def = append(def.Def, dt)
|
||||||
|
cache[dgst] = struct{}{}
|
||||||
|
return def, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) Validate() error {
|
||||||
|
return s.Output().Vertex().Validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) Output() Output {
|
||||||
|
return s.out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) WithOutput(o Output) State {
|
||||||
|
return State{
|
||||||
|
out: o,
|
||||||
|
ctx: s.ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) Run(ro ...RunOption) ExecState {
|
||||||
|
ei := &ExecInfo{State: s}
|
||||||
|
for _, o := range ro {
|
||||||
|
o.SetRunOption(ei)
|
||||||
|
}
|
||||||
|
meta := Meta{
|
||||||
|
Args: getArgs(ei.State),
|
||||||
|
Cwd: getDir(ei.State),
|
||||||
|
Env: getEnv(ei.State),
|
||||||
|
User: getUser(ei.State),
|
||||||
|
ProxyEnv: ei.ProxyEnv,
|
||||||
|
}
|
||||||
|
|
||||||
|
exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Metadata())
|
||||||
|
for _, m := range ei.Mounts {
|
||||||
|
exec.AddMount(m.Target, m.Source, m.Opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ExecState{
|
||||||
|
State: s.WithOutput(exec.Output()),
|
||||||
|
exec: exec,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) AddEnv(key, value string) State {
|
||||||
|
return s.AddEnvf(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) AddEnvf(key, value string, v ...interface{}) State {
|
||||||
|
return addEnvf(key, value, v...)(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) Dir(str string) State {
|
||||||
|
return s.Dirf(str)
|
||||||
|
}
|
||||||
|
func (s State) Dirf(str string, v ...interface{}) State {
|
||||||
|
return dirf(str, v...)(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) GetEnv(key string) (string, bool) {
|
||||||
|
return getEnv(s).Get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) GetDir() string {
|
||||||
|
return getDir(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) GetArgs() []string {
|
||||||
|
return getArgs(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) Reset(s2 State) State {
|
||||||
|
return reset(s2)(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) User(v string) State {
|
||||||
|
return user(v)(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) With(so ...StateOption) State {
|
||||||
|
for _, o := range so {
|
||||||
|
s = o(s)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type output struct {
|
||||||
|
vertex Vertex
|
||||||
|
getIndex func() (pb.OutputIndex, error)
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *output) ToInput() (*pb.Input, error) {
|
||||||
|
if o.err != nil {
|
||||||
|
return nil, o.err
|
||||||
|
}
|
||||||
|
var index pb.OutputIndex
|
||||||
|
if o.getIndex != nil {
|
||||||
|
var err error
|
||||||
|
index, err = o.getIndex()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dgst, _, _, err := o.vertex.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &pb.Input{Digest: dgst, Index: index}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *output) Vertex() Vertex {
|
||||||
|
return o.vertex
|
||||||
|
}
|
||||||
|
|
||||||
|
type MetadataOpt interface {
|
||||||
|
SetMetadataOption(*OpMetadata)
|
||||||
|
RunOption
|
||||||
|
LocalOption
|
||||||
|
HTTPOption
|
||||||
|
ImageOption
|
||||||
|
GitOption
|
||||||
|
}
|
||||||
|
|
||||||
|
type metadataOptFunc func(m *OpMetadata)
|
||||||
|
|
||||||
|
func (fn metadataOptFunc) SetMetadataOption(m *OpMetadata) {
|
||||||
|
fn(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fn metadataOptFunc) SetRunOption(ei *ExecInfo) {
|
||||||
|
ei.ApplyMetadata(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fn metadataOptFunc) SetLocalOption(li *LocalInfo) {
|
||||||
|
li.ApplyMetadata(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fn metadataOptFunc) SetHTTPOption(hi *HTTPInfo) {
|
||||||
|
hi.ApplyMetadata(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fn metadataOptFunc) SetImageOption(ii *ImageInfo) {
|
||||||
|
ii.ApplyMetadata(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fn metadataOptFunc) SetGitOption(gi *GitInfo) {
|
||||||
|
gi.ApplyMetadata(fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeMetadata(m1, m2 OpMetadata) OpMetadata {
|
||||||
|
if m2.IgnoreCache {
|
||||||
|
m1.IgnoreCache = true
|
||||||
|
}
|
||||||
|
if len(m2.Description) > 0 {
|
||||||
|
if m1.Description == nil {
|
||||||
|
m1.Description = make(map[string]string)
|
||||||
|
}
|
||||||
|
for k, v := range m2.Description {
|
||||||
|
m1.Description[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m2.ExportCache != nil {
|
||||||
|
m1.ExportCache = m2.ExportCache
|
||||||
|
}
|
||||||
|
|
||||||
|
return m1
|
||||||
|
}
|
||||||
|
|
||||||
|
var IgnoreCache = metadataOptFunc(func(md *OpMetadata) {
|
||||||
|
md.IgnoreCache = true
|
||||||
|
})
|
||||||
|
|
||||||
|
func WithDescription(m map[string]string) MetadataOpt {
|
||||||
|
return metadataOptFunc(func(md *OpMetadata) {
|
||||||
|
md.Description = m
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithExportCache forces results for this vertex to be exported with the cache
|
||||||
|
func WithExportCache() MetadataOpt {
|
||||||
|
return metadataOptFunc(func(md *OpMetadata) {
|
||||||
|
md.ExportCache = &pb.ExportCache{Value: true}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutExportCache sets results for this vertex to be not exported with
|
||||||
|
// the cache
|
||||||
|
func WithoutExportCache() MetadataOpt {
|
||||||
|
return metadataOptFunc(func(md *OpMetadata) {
|
||||||
|
// ExportCache with value false means to disable exporting
|
||||||
|
md.ExportCache = &pb.ExportCache{Value: false}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutDefaultExportCache resets the cache export for the vertex to use
|
||||||
|
// the default defined by the build configuration.
|
||||||
|
func WithoutDefaultExportCache() MetadataOpt {
|
||||||
|
return metadataOptFunc(func(md *OpMetadata) {
|
||||||
|
// nil means no vertex based config has been set
|
||||||
|
md.ExportCache = nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type opMetaWrapper struct {
|
||||||
|
OpMetadata
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *opMetaWrapper) ApplyMetadata(f func(m *OpMetadata)) {
|
||||||
|
f(&mw.OpMetadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *opMetaWrapper) Metadata() OpMetadata {
|
||||||
|
return mw.OpMetadata
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error {
|
||||||
|
info := &PruneInfo{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &controlapi.PruneRequest{}
|
||||||
|
cl, err := c.controlClient().Prune(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to call prune")
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
d, err := cl.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ch != nil {
|
||||||
|
ch <- UsageInfo{
|
||||||
|
ID: d.ID,
|
||||||
|
Mutable: d.Mutable,
|
||||||
|
InUse: d.InUse,
|
||||||
|
Size: d.Size_,
|
||||||
|
Parent: d.Parent,
|
||||||
|
CreatedAt: d.CreatedAt,
|
||||||
|
Description: d.Description,
|
||||||
|
UsageCount: int(d.UsageCount),
|
||||||
|
LastUsedAt: d.LastUsedAt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type PruneOption func(*PruneInfo)
|
||||||
|
|
||||||
|
type PruneInfo struct {
|
||||||
|
}
|
|
@ -0,0 +1,251 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
"github.com/moby/buildkit/identity"
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"github.com/moby/buildkit/session/filesync"
|
||||||
|
"github.com/moby/buildkit/session/grpchijack"
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SolveOpt struct {
|
||||||
|
Exporter string
|
||||||
|
ExporterAttrs map[string]string
|
||||||
|
ExporterOutput io.WriteCloser // for ExporterOCI and ExporterDocker
|
||||||
|
ExporterOutputDir string // for ExporterLocal
|
||||||
|
LocalDirs map[string]string
|
||||||
|
SharedKey string
|
||||||
|
Frontend string
|
||||||
|
FrontendAttrs map[string]string
|
||||||
|
ExportCache string
|
||||||
|
ExportCacheAttrs map[string]string
|
||||||
|
ImportCache []string
|
||||||
|
Session []session.Attachable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Solve calls Solve on the controller.
|
||||||
|
// def must be nil if (and only if) opt.Frontend is set.
|
||||||
|
func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) {
|
||||||
|
defer func() {
|
||||||
|
if statusChan != nil {
|
||||||
|
close(statusChan)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if opt.Frontend == "" && def == nil {
|
||||||
|
return nil, errors.New("invalid empty definition")
|
||||||
|
}
|
||||||
|
if opt.Frontend != "" && def != nil {
|
||||||
|
return nil, errors.Errorf("invalid definition for frontend %s", opt.Frontend)
|
||||||
|
}
|
||||||
|
|
||||||
|
syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ref := identity.NewID()
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
statusContext, cancelStatus := context.WithCancel(context.Background())
|
||||||
|
defer cancelStatus()
|
||||||
|
|
||||||
|
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||||
|
statusContext = opentracing.ContextWithSpan(statusContext, span)
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := session.NewSession(statusContext, defaultSessionName(), opt.SharedKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to create session")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(syncedDirs) > 0 {
|
||||||
|
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range opt.Session {
|
||||||
|
s.Allow(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch opt.Exporter {
|
||||||
|
case ExporterLocal:
|
||||||
|
if opt.ExporterOutput != nil {
|
||||||
|
return nil, errors.New("output file writer is not supported by local exporter")
|
||||||
|
}
|
||||||
|
if opt.ExporterOutputDir == "" {
|
||||||
|
return nil, errors.New("output directory is required for local exporter")
|
||||||
|
}
|
||||||
|
s.Allow(filesync.NewFSSyncTargetDir(opt.ExporterOutputDir))
|
||||||
|
case ExporterOCI, ExporterDocker:
|
||||||
|
if opt.ExporterOutputDir != "" {
|
||||||
|
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
|
||||||
|
}
|
||||||
|
if opt.ExporterOutput == nil {
|
||||||
|
return nil, errors.Errorf("output file writer is required for %s exporter", opt.Exporter)
|
||||||
|
}
|
||||||
|
s.Allow(filesync.NewFSSyncTarget(opt.ExporterOutput))
|
||||||
|
default:
|
||||||
|
if opt.ExporterOutput != nil {
|
||||||
|
return nil, errors.Errorf("output file writer is not supported by %s exporter", opt.Exporter)
|
||||||
|
}
|
||||||
|
if opt.ExporterOutputDir != "" {
|
||||||
|
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
eg.Go(func() error {
|
||||||
|
return s.Run(statusContext, grpchijack.Dialer(c.controlClient()))
|
||||||
|
})
|
||||||
|
|
||||||
|
var res *SolveResponse
|
||||||
|
eg.Go(func() error {
|
||||||
|
defer func() { // make sure the Status ends cleanly on build errors
|
||||||
|
go func() {
|
||||||
|
<-time.After(3 * time.Second)
|
||||||
|
cancelStatus()
|
||||||
|
}()
|
||||||
|
logrus.Debugf("stopping session")
|
||||||
|
s.Close()
|
||||||
|
}()
|
||||||
|
var pbd *pb.Definition
|
||||||
|
if def != nil {
|
||||||
|
pbd = def.ToPB()
|
||||||
|
}
|
||||||
|
resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
|
||||||
|
Ref: ref,
|
||||||
|
Definition: pbd,
|
||||||
|
Exporter: opt.Exporter,
|
||||||
|
ExporterAttrs: opt.ExporterAttrs,
|
||||||
|
Session: s.ID(),
|
||||||
|
Frontend: opt.Frontend,
|
||||||
|
FrontendAttrs: opt.FrontendAttrs,
|
||||||
|
Cache: controlapi.CacheOptions{
|
||||||
|
ExportRef: opt.ExportCache,
|
||||||
|
ImportRefs: opt.ImportCache,
|
||||||
|
ExportAttrs: opt.ExportCacheAttrs,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to solve")
|
||||||
|
}
|
||||||
|
res = &SolveResponse{
|
||||||
|
ExporterResponse: resp.ExporterResponse,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
eg.Go(func() error {
|
||||||
|
stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{
|
||||||
|
Ref: ref,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to get status")
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, "failed to receive status")
|
||||||
|
}
|
||||||
|
s := SolveStatus{}
|
||||||
|
for _, v := range resp.Vertexes {
|
||||||
|
s.Vertexes = append(s.Vertexes, &Vertex{
|
||||||
|
Digest: v.Digest,
|
||||||
|
Inputs: v.Inputs,
|
||||||
|
Name: v.Name,
|
||||||
|
Started: v.Started,
|
||||||
|
Completed: v.Completed,
|
||||||
|
Error: v.Error,
|
||||||
|
Cached: v.Cached,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, v := range resp.Statuses {
|
||||||
|
s.Statuses = append(s.Statuses, &VertexStatus{
|
||||||
|
ID: v.ID,
|
||||||
|
Vertex: v.Vertex,
|
||||||
|
Name: v.Name,
|
||||||
|
Total: v.Total,
|
||||||
|
Current: v.Current,
|
||||||
|
Timestamp: v.Timestamp,
|
||||||
|
Started: v.Started,
|
||||||
|
Completed: v.Completed,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for _, v := range resp.Logs {
|
||||||
|
s.Logs = append(s.Logs, &VertexLog{
|
||||||
|
Vertex: v.Vertex,
|
||||||
|
Stream: int(v.Stream),
|
||||||
|
Data: v.Msg,
|
||||||
|
Timestamp: v.Timestamp,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if statusChan != nil {
|
||||||
|
statusChan <- &s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
|
||||||
|
for _, d := range localDirs {
|
||||||
|
fi, err := os.Stat(d)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not find %s", d)
|
||||||
|
}
|
||||||
|
if !fi.IsDir() {
|
||||||
|
return nil, errors.Errorf("%s not a directory", d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dirs := make([]filesync.SyncedDir, 0, len(localDirs))
|
||||||
|
if def == nil {
|
||||||
|
for name, d := range localDirs {
|
||||||
|
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d})
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, dt := range def.Def {
|
||||||
|
var op pb.Op
|
||||||
|
if err := (&op).Unmarshal(dt); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to parse llb proto op")
|
||||||
|
}
|
||||||
|
if src := op.GetSource(); src != nil {
|
||||||
|
if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property
|
||||||
|
name := strings.TrimPrefix(src.Identifier, "local://")
|
||||||
|
d, ok := localDirs[name]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("local directory %s not enabled", name)
|
||||||
|
}
|
||||||
|
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d}) // TODO: excludes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dirs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultSessionName() string {
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
return filepath.Base(wd)
|
||||||
|
}
|
|
@ -0,0 +1,49 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WorkerInfo struct {
|
||||||
|
ID string
|
||||||
|
Labels map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) {
|
||||||
|
info := &ListWorkersInfo{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &controlapi.ListWorkersRequest{Filter: info.Filter}
|
||||||
|
resp, err := c.controlClient().ListWorkers(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to list workers")
|
||||||
|
}
|
||||||
|
|
||||||
|
var wi []*WorkerInfo
|
||||||
|
|
||||||
|
for _, w := range resp.Record {
|
||||||
|
wi = append(wi, &WorkerInfo{
|
||||||
|
ID: w.ID,
|
||||||
|
Labels: w.Labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return wi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ListWorkersOption func(*ListWorkersInfo)
|
||||||
|
|
||||||
|
type ListWorkersInfo struct {
|
||||||
|
Filter []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithWorkerFilter(f []string) ListWorkersOption {
|
||||||
|
return func(wi *ListWorkersInfo) {
|
||||||
|
wi.Filter = f
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string, string, error) {
|
||||||
|
return func(host string) (string, string, error) {
|
||||||
|
client := NewAuthClient(c.Conn())
|
||||||
|
|
||||||
|
resp, err := client.Credentials(ctx, &CredentialsRequest{
|
||||||
|
Host: host,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if st, ok := status.FromError(err); ok && st.Code() == codes.Unimplemented {
|
||||||
|
return "", "", nil
|
||||||
|
}
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
return resp.Username, resp.Secret, nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,673 @@
|
||||||
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
|
// source: auth.proto
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package auth is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
auth.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
CredentialsRequest
|
||||||
|
CredentialsResponse
|
||||||
|
*/
|
||||||
|
package auth
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import context "golang.org/x/net/context"
|
||||||
|
import grpc "google.golang.org/grpc"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
type CredentialsRequest struct {
|
||||||
|
Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CredentialsRequest) Reset() { *m = CredentialsRequest{} }
|
||||||
|
func (*CredentialsRequest) ProtoMessage() {}
|
||||||
|
func (*CredentialsRequest) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
|
||||||
|
|
||||||
|
func (m *CredentialsRequest) GetHost() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Host
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type CredentialsResponse struct {
|
||||||
|
Username string `protobuf:"bytes,1,opt,name=Username,proto3" json:"Username,omitempty"`
|
||||||
|
Secret string `protobuf:"bytes,2,opt,name=Secret,proto3" json:"Secret,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CredentialsResponse) Reset() { *m = CredentialsResponse{} }
|
||||||
|
func (*CredentialsResponse) ProtoMessage() {}
|
||||||
|
func (*CredentialsResponse) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
|
||||||
|
|
||||||
|
func (m *CredentialsResponse) GetUsername() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Username
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CredentialsResponse) GetSecret() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Secret
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*CredentialsRequest)(nil), "moby.filesync.v1.CredentialsRequest")
|
||||||
|
proto.RegisterType((*CredentialsResponse)(nil), "moby.filesync.v1.CredentialsResponse")
|
||||||
|
}
|
||||||
|
func (this *CredentialsRequest) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
return this == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*CredentialsRequest)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(CredentialsRequest)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
return this == nil
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Host != that1.Host {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *CredentialsResponse) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
return this == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*CredentialsResponse)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(CredentialsResponse)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
return this == nil
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Username != that1.Username {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Secret != that1.Secret {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *CredentialsRequest) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 5)
|
||||||
|
s = append(s, "&auth.CredentialsRequest{")
|
||||||
|
s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n")
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func (this *CredentialsResponse) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 6)
|
||||||
|
s = append(s, "&auth.CredentialsResponse{")
|
||||||
|
s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n")
|
||||||
|
s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n")
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringAuth(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ context.Context
|
||||||
|
var _ grpc.ClientConn
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the grpc package it is being compiled against.
|
||||||
|
const _ = grpc.SupportPackageIsVersion4
|
||||||
|
|
||||||
|
// Client API for Auth service
|
||||||
|
|
||||||
|
type AuthClient interface {
|
||||||
|
Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type authClient struct {
|
||||||
|
cc *grpc.ClientConn
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAuthClient(cc *grpc.ClientConn) AuthClient {
|
||||||
|
return &authClient{cc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *authClient) Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) {
|
||||||
|
out := new(CredentialsResponse)
|
||||||
|
err := grpc.Invoke(ctx, "/moby.filesync.v1.Auth/Credentials", in, out, c.cc, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server API for Auth service
|
||||||
|
|
||||||
|
type AuthServer interface {
|
||||||
|
Credentials(context.Context, *CredentialsRequest) (*CredentialsResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
|
||||||
|
s.RegisterService(&_Auth_serviceDesc, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Auth_Credentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(CredentialsRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(AuthServer).Credentials(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: "/moby.filesync.v1.Auth/Credentials",
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(AuthServer).Credentials(ctx, req.(*CredentialsRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _Auth_serviceDesc = grpc.ServiceDesc{
|
||||||
|
ServiceName: "moby.filesync.v1.Auth",
|
||||||
|
HandlerType: (*AuthServer)(nil),
|
||||||
|
Methods: []grpc.MethodDesc{
|
||||||
|
{
|
||||||
|
MethodName: "Credentials",
|
||||||
|
Handler: _Auth_Credentials_Handler,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Streams: []grpc.StreamDesc{},
|
||||||
|
Metadata: "auth.proto",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CredentialsRequest) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CredentialsRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Host) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Host)))
|
||||||
|
i += copy(dAtA[i:], m.Host)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CredentialsResponse) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CredentialsResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Username) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Username)))
|
||||||
|
i += copy(dAtA[i:], m.Username)
|
||||||
|
}
|
||||||
|
if len(m.Secret) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintAuth(dAtA, i, uint64(len(m.Secret)))
|
||||||
|
i += copy(dAtA[i:], m.Secret)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *CredentialsRequest) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Host)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *CredentialsResponse) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Username)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Secret)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAuth(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovAuth(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozAuth(x uint64) (n int) {
|
||||||
|
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *CredentialsRequest) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&CredentialsRequest{`,
|
||||||
|
`Host:` + fmt.Sprintf("%v", this.Host) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func (this *CredentialsResponse) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&CredentialsResponse{`,
|
||||||
|
`Username:` + fmt.Sprintf("%v", this.Username) + `,`,
|
||||||
|
`Secret:` + fmt.Sprintf("%v", this.Secret) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringAuth(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *CredentialsRequest) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: CredentialsRequest: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: CredentialsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Host = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *CredentialsResponse) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: CredentialsResponse: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: CredentialsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Username = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Secret = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipAuth(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipAuth(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthAuth
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAuth
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipAuth(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
|
||||||
|
|
||||||
|
var fileDescriptorAuth = []byte{
|
||||||
|
// 224 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9,
|
||||||
|
0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0xcb, 0xcc,
|
||||||
|
0x49, 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x33, 0x54, 0xd2, 0xe0, 0x12, 0x72, 0x2e, 0x4a, 0x4d,
|
||||||
|
0x49, 0xcd, 0x2b, 0xc9, 0x4c, 0xcc, 0x29, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12,
|
||||||
|
0xe2, 0x62, 0xf1, 0xc8, 0x2f, 0x2e, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x95,
|
||||||
|
0x3c, 0xb9, 0x84, 0x51, 0x54, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0x49, 0x71, 0x71, 0x84,
|
||||||
|
0x16, 0xa7, 0x16, 0xe5, 0x25, 0xe6, 0xa6, 0x42, 0x95, 0xc3, 0xf9, 0x42, 0x62, 0x5c, 0x6c, 0xc1,
|
||||||
|
0xa9, 0xc9, 0x45, 0xa9, 0x25, 0x12, 0x4c, 0x60, 0x19, 0x28, 0xcf, 0x28, 0x89, 0x8b, 0xc5, 0xb1,
|
||||||
|
0xb4, 0x24, 0x43, 0x28, 0x8a, 0x8b, 0x1b, 0xc9, 0x48, 0x21, 0x15, 0x3d, 0x74, 0xe7, 0xe9, 0x61,
|
||||||
|
0xba, 0x4d, 0x4a, 0x95, 0x80, 0x2a, 0x88, 0xbb, 0x9c, 0x8c, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1,
|
||||||
|
0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e,
|
||||||
|
0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31,
|
||||||
|
0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x14, 0x0b, 0x28, 0x90, 0x92, 0xd8, 0xc0,
|
||||||
|
0xa1, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x73, 0xf3, 0xd5, 0x33, 0x01, 0x00, 0x00,
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package moby.filesync.v1;
|
||||||
|
|
||||||
|
option go_package = "auth";
|
||||||
|
|
||||||
|
service Auth{
|
||||||
|
rpc Credentials(CredentialsRequest) returns (CredentialsResponse);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
message CredentialsRequest {
|
||||||
|
string Host = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CredentialsResponse {
|
||||||
|
string Username = 1;
|
||||||
|
string Secret = 2;
|
||||||
|
}
|
44
vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go
generated
vendored
Normal file
44
vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
package authprovider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/config"
|
||||||
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"github.com/moby/buildkit/session/auth"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewDockerAuthProvider() session.Attachable {
|
||||||
|
return &authProvider{
|
||||||
|
config: config.LoadDefaultConfigFile(ioutil.Discard),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type authProvider struct {
|
||||||
|
config *configfile.ConfigFile
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *authProvider) Register(server *grpc.Server) {
|
||||||
|
auth.RegisterAuthServer(server, ap)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) {
|
||||||
|
if req.Host == "registry-1.docker.io" {
|
||||||
|
req.Host = "https://index.docker.io/v1/"
|
||||||
|
}
|
||||||
|
ac, err := ap.config.GetAuthConfig(req.Host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res := &auth.CredentialsResponse{}
|
||||||
|
if ac.IdentityToken != "" {
|
||||||
|
res.Secret = ac.IdentityToken
|
||||||
|
} else {
|
||||||
|
res.Username = ac.Username
|
||||||
|
res.Secret = ac.Password
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
package auth
|
||||||
|
|
||||||
|
//go:generate protoc --gogoslick_out=plugins=grpc:. auth.proto
|
|
@ -12,10 +12,11 @@ import (
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error {
|
func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes, followPaths []string, progress progressCb, _map func(*fsutil.Stat) bool) error {
|
||||||
return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
|
return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
|
||||||
ExcludePatterns: excludes,
|
ExcludePatterns: excludes,
|
||||||
IncludePatterns: includes,
|
IncludePatterns: includes,
|
||||||
|
FollowPaths: followPaths,
|
||||||
Map: _map,
|
Map: _map,
|
||||||
}, progress)
|
}, progress)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ const (
|
||||||
keyOverrideExcludes = "override-excludes"
|
keyOverrideExcludes = "override-excludes"
|
||||||
keyIncludePatterns = "include-patterns"
|
keyIncludePatterns = "include-patterns"
|
||||||
keyExcludePatterns = "exclude-patterns"
|
keyExcludePatterns = "exclude-patterns"
|
||||||
|
keyFollowPaths = "followpaths"
|
||||||
keyDirName = "dir-name"
|
keyDirName = "dir-name"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -87,6 +88,8 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
|
||||||
}
|
}
|
||||||
includes := opts[keyIncludePatterns]
|
includes := opts[keyIncludePatterns]
|
||||||
|
|
||||||
|
followPaths := opts[keyFollowPaths]
|
||||||
|
|
||||||
var progress progressCb
|
var progress progressCb
|
||||||
if sp.p != nil {
|
if sp.p != nil {
|
||||||
progress = sp.p
|
progress = sp.p
|
||||||
|
@ -98,7 +101,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
|
||||||
doneCh = sp.doneCh
|
doneCh = sp.doneCh
|
||||||
sp.doneCh = nil
|
sp.doneCh = nil
|
||||||
}
|
}
|
||||||
err := pr.sendFn(stream, dir.Dir, includes, excludes, progress, dir.Map)
|
err := pr.sendFn(stream, dir.Dir, includes, excludes, followPaths, progress, dir.Map)
|
||||||
if doneCh != nil {
|
if doneCh != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
doneCh <- err
|
doneCh <- err
|
||||||
|
@ -117,7 +120,7 @@ type progressCb func(int, bool)
|
||||||
|
|
||||||
type protocol struct {
|
type protocol struct {
|
||||||
name string
|
name string
|
||||||
sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error
|
sendFn func(stream grpc.Stream, srcDir string, includes, excludes, followPaths []string, progress progressCb, _map func(*fsutil.Stat) bool) error
|
||||||
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error
|
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,6 +145,7 @@ type FSSendRequestOpt struct {
|
||||||
Name string
|
Name string
|
||||||
IncludePatterns []string
|
IncludePatterns []string
|
||||||
ExcludePatterns []string
|
ExcludePatterns []string
|
||||||
|
FollowPaths []string
|
||||||
OverrideExcludes bool // deprecated: this is used by docker/cli for automatically loading .dockerignore from the directory
|
OverrideExcludes bool // deprecated: this is used by docker/cli for automatically loading .dockerignore from the directory
|
||||||
DestDir string
|
DestDir string
|
||||||
CacheUpdater CacheUpdater
|
CacheUpdater CacheUpdater
|
||||||
|
@ -181,6 +185,10 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
|
||||||
opts[keyExcludePatterns] = opt.ExcludePatterns
|
opts[keyExcludePatterns] = opt.ExcludePatterns
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opt.FollowPaths != nil {
|
||||||
|
opts[keyFollowPaths] = opt.FollowPaths
|
||||||
|
}
|
||||||
|
|
||||||
opts[keyDirName] = []string{opt.Name}
|
opts[keyDirName] = []string{opt.Name}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
@ -261,7 +269,7 @@ func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progres
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return sendDiffCopy(cc, srcPath, nil, nil, progress, nil)
|
return sendDiffCopy(cc, srcPath, nil, nil, nil, progress, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) {
|
func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) {
|
||||||
|
|
|
@ -0,0 +1,156 @@
|
||||||
|
package grpchijack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"github.com/moby/buildkit/session"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Dialer(api controlapi.ControlClient) session.Dialer {
|
||||||
|
return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
|
||||||
|
|
||||||
|
meta = lowerHeaders(meta)
|
||||||
|
|
||||||
|
md := metadata.MD(meta)
|
||||||
|
|
||||||
|
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||||
|
|
||||||
|
stream, err := api.Session(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c, _ := streamToConn(stream)
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func streamToConn(stream grpc.Stream) (net.Conn, <-chan struct{}) {
|
||||||
|
closeCh := make(chan struct{})
|
||||||
|
c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh}
|
||||||
|
return c, closeCh
|
||||||
|
}
|
||||||
|
|
||||||
|
type conn struct {
|
||||||
|
stream grpc.Stream
|
||||||
|
buf []byte
|
||||||
|
lastBuf []byte
|
||||||
|
|
||||||
|
closedOnce sync.Once
|
||||||
|
readMu sync.Mutex
|
||||||
|
err error
|
||||||
|
closeCh chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Read(b []byte) (n int, err error) {
|
||||||
|
c.readMu.Lock()
|
||||||
|
defer c.readMu.Unlock()
|
||||||
|
|
||||||
|
if c.lastBuf != nil {
|
||||||
|
n := copy(b, c.lastBuf)
|
||||||
|
c.lastBuf = c.lastBuf[n:]
|
||||||
|
if len(c.lastBuf) == 0 {
|
||||||
|
c.lastBuf = nil
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
m := new(controlapi.BytesMessage)
|
||||||
|
m.Data = c.buf
|
||||||
|
|
||||||
|
if err := c.stream.RecvMsg(m); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
c.buf = m.Data[:cap(m.Data)]
|
||||||
|
|
||||||
|
n = copy(b, m.Data)
|
||||||
|
if n < len(m.Data) {
|
||||||
|
c.lastBuf = m.Data[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Write(b []byte) (int, error) {
|
||||||
|
m := &controlapi.BytesMessage{Data: b}
|
||||||
|
if err := c.stream.SendMsg(m); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Close() (err error) {
|
||||||
|
c.closedOnce.Do(func() {
|
||||||
|
defer func() {
|
||||||
|
close(c.closeCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if cs, ok := c.stream.(grpc.ClientStream); ok {
|
||||||
|
err = cs.CloseSend()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.readMu.Lock()
|
||||||
|
for {
|
||||||
|
m := new(controlapi.BytesMessage)
|
||||||
|
m.Data = c.buf
|
||||||
|
err = c.stream.RecvMsg(m)
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c.buf = m.Data[:cap(m.Data)]
|
||||||
|
c.lastBuf = append(c.lastBuf, c.buf...)
|
||||||
|
}
|
||||||
|
c.readMu.Unlock()
|
||||||
|
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) LocalAddr() net.Addr {
|
||||||
|
return dummyAddr{}
|
||||||
|
}
|
||||||
|
func (c *conn) RemoteAddr() net.Addr {
|
||||||
|
return dummyAddr{}
|
||||||
|
}
|
||||||
|
func (c *conn) SetDeadline(t time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (c *conn) SetReadDeadline(t time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (c *conn) SetWriteDeadline(t time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummyAddr struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d dummyAddr) Network() string {
|
||||||
|
return "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d dummyAddr) String() string {
|
||||||
|
return "localhost"
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowerHeaders(in map[string][]string) map[string][]string {
|
||||||
|
out := map[string][]string{}
|
||||||
|
for k := range in {
|
||||||
|
out[strings.ToLower(k)] = in[k]
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
package grpchijack
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Hijack(stream controlapi.Control_SessionServer) (net.Conn, <-chan struct{}, map[string][]string) {
|
||||||
|
md, _ := metadata.FromIncomingContext(stream.Context())
|
||||||
|
c, closeCh := streamToConn(stream)
|
||||||
|
return c, closeCh, md
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
const AttrKeepGitDir = "git.keepgitdir"
|
||||||
|
const AttrFullRemoteURL = "git.fullurl"
|
||||||
|
const AttrLocalSessionID = "local.session"
|
||||||
|
const AttrIncludePatterns = "local.includepattern"
|
||||||
|
const AttrFollowPaths = "local.followpaths"
|
||||||
|
const AttrExcludePatterns = "local.excludepatterns"
|
||||||
|
const AttrSharedKeyHint = "local.sharedkeyhint"
|
||||||
|
const AttrLLBDefinitionFilename = "llbbuild.filename"
|
||||||
|
|
||||||
|
const AttrHTTPChecksum = "http.checksum"
|
||||||
|
const AttrHTTPFilename = "http.filename"
|
||||||
|
const AttrHTTPPerm = "http.perm"
|
||||||
|
const AttrHTTPUID = "http.uid"
|
||||||
|
const AttrHTTPGID = "http.gid"
|
|
@ -0,0 +1,12 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
type InputIndex int64
|
||||||
|
type OutputIndex int64
|
||||||
|
|
||||||
|
const RootMount = "/"
|
||||||
|
const SkipOutput OutputIndex = -1
|
||||||
|
const Empty InputIndex = -1
|
||||||
|
const LLBBuilder InputIndex = -1
|
||||||
|
|
||||||
|
const LLBDefinitionInput = "buildkit.llb.definition"
|
||||||
|
const LLBDefaultDefinitionFile = LLBDefinitionInput
|
|
@ -0,0 +1,3 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,137 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
// Package pb provides the protobuf definition of LLB: low-level builder instruction.
|
||||||
|
// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph.
|
||||||
|
package pb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
// Op represents a vertex of the LLB DAG.
|
||||||
|
message Op {
|
||||||
|
// inputs is a set of input edges.
|
||||||
|
repeated Input inputs = 1;
|
||||||
|
oneof op {
|
||||||
|
ExecOp exec = 2;
|
||||||
|
SourceOp source = 3;
|
||||||
|
CopyOp copy = 4;
|
||||||
|
BuildOp build = 5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Input represents an input edge for an Op.
|
||||||
|
message Input {
|
||||||
|
// digest of the marshaled input Op
|
||||||
|
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
// output index of the input Op
|
||||||
|
int64 index = 2 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecOp executes a command in a container.
|
||||||
|
message ExecOp {
|
||||||
|
Meta meta = 1;
|
||||||
|
repeated Mount mounts = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Meta is a set of arguments for ExecOp.
|
||||||
|
// Meta is unrelated to LLB metadata.
|
||||||
|
// FIXME: rename (ExecContext? ExecArgs?)
|
||||||
|
message Meta {
|
||||||
|
repeated string args = 1;
|
||||||
|
repeated string env = 2;
|
||||||
|
string cwd = 3;
|
||||||
|
string user = 4;
|
||||||
|
ProxyEnv proxy_env = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount specifies how to mount an input Op as a filesystem.
|
||||||
|
message Mount {
|
||||||
|
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
|
||||||
|
string selector = 2;
|
||||||
|
string dest = 3;
|
||||||
|
int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false];
|
||||||
|
bool readonly = 5;
|
||||||
|
MountType mountType = 6;
|
||||||
|
CacheOpt cacheOpt = 20;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum MountType {
|
||||||
|
BIND = 0;
|
||||||
|
SECRET = 1;
|
||||||
|
SSH = 2;
|
||||||
|
CACHE = 3;
|
||||||
|
TMPFS = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CacheOpt {
|
||||||
|
string ID = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyOp copies files across Ops.
|
||||||
|
message CopyOp {
|
||||||
|
repeated CopySource src = 1;
|
||||||
|
string dest = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopySource specifies a source for CopyOp.
|
||||||
|
message CopySource {
|
||||||
|
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
|
||||||
|
string selector = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// SourceOp specifies a source such as build contexts and images.
|
||||||
|
message SourceOp {
|
||||||
|
// TODO: use source type or any type instead of URL protocol.
|
||||||
|
// identifier e.g. local://, docker-image://, git://, https://...
|
||||||
|
string identifier = 1;
|
||||||
|
// attrs are defined in attr.go
|
||||||
|
map<string, string> attrs = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildOp is used for nested build invocation.
|
||||||
|
message BuildOp {
|
||||||
|
int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
|
||||||
|
map<string, BuildInput> inputs = 2;
|
||||||
|
Definition def = 3;
|
||||||
|
map<string, string> attrs = 4;
|
||||||
|
// outputs
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildInput is used for BuildOp.
|
||||||
|
message BuildInput {
|
||||||
|
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time.
|
||||||
|
message OpMetadata {
|
||||||
|
// ignore_cache specifies to ignore the cache for this Op.
|
||||||
|
bool ignore_cache = 1;
|
||||||
|
// Description can be used for keeping any text fields that builder doesn't parse
|
||||||
|
map<string, string> description = 2;
|
||||||
|
WorkerConstraint worker_constraint = 3;
|
||||||
|
ExportCache export_cache = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExportCache {
|
||||||
|
bool Value = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProxyEnv {
|
||||||
|
string http_proxy = 1;
|
||||||
|
string https_proxy = 2;
|
||||||
|
string ftp_proxy = 3;
|
||||||
|
string no_proxy = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// WorkerConstraint is experimental and likely to be changed.
|
||||||
|
message WorkerConstraint {
|
||||||
|
repeated string filter = 1; // containerd-style filter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Definition is the LLB definition structure with per-vertex metadata entries
|
||||||
|
message Definition {
|
||||||
|
// def is a list of marshaled Op messages
|
||||||
|
repeated bytes def = 1;
|
||||||
|
// metadata contains metadata for the each of the Op messages.
|
||||||
|
// A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future.
|
||||||
|
map<string, OpMetadata> metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package appcontext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
var appContextCache context.Context
|
||||||
|
var appContextOnce sync.Once
|
||||||
|
|
||||||
|
// Context returns a static context that reacts to termination signals of the
|
||||||
|
// running process. Useful in CLI tools.
|
||||||
|
func Context() context.Context {
|
||||||
|
appContextOnce.Do(func() {
|
||||||
|
signals := make(chan os.Signal, 2048)
|
||||||
|
signal.Notify(signals, terminationSignals...)
|
||||||
|
|
||||||
|
const exitLimit = 3
|
||||||
|
retries := 0
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
appContextCache = ctx
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
<-signals
|
||||||
|
cancel()
|
||||||
|
retries++
|
||||||
|
if retries >= exitLimit {
|
||||||
|
logrus.Errorf("got %d SIGTERM/SIGINTs, forcing shutdown", retries)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
return appContextCache
|
||||||
|
}
|
11
vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go
generated
vendored
Normal file
11
vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package appcontext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var terminationSignals = []os.Signal{unix.SIGTERM, unix.SIGINT}
|
7
vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go
generated
vendored
Normal file
7
vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
package appcontext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var terminationSignals = []os.Signal{os.Interrupt}
|
55
vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go
generated
vendored
Normal file
55
vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package appdefaults
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Address = "unix:///run/buildkit/buildkitd.sock"
|
||||||
|
Root = "/var/lib/buildkit"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock
|
||||||
|
func UserAddress() string {
|
||||||
|
// pam_systemd sets XDG_RUNTIME_DIR but not other dirs.
|
||||||
|
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||||
|
if xdgRuntimeDir != "" {
|
||||||
|
dirs := strings.Split(xdgRuntimeDir, ":")
|
||||||
|
return "unix://" + filepath.Join(dirs[0], "buildkit", "buildkitd.sock")
|
||||||
|
}
|
||||||
|
return Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureUserAddressDir sets sticky bit on XDG_RUNTIME_DIR if XDG_RUNTIME_DIR is set.
|
||||||
|
// See https://github.com/opencontainers/runc/issues/1694
|
||||||
|
func EnsureUserAddressDir() error {
|
||||||
|
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||||
|
if xdgRuntimeDir != "" {
|
||||||
|
dirs := strings.Split(xdgRuntimeDir, ":")
|
||||||
|
dir := filepath.Join(dirs[0], "buildkit")
|
||||||
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Chmod(dir, 0700|os.ModeSticky)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserRoot typically returns /home/$USER/.local/share/buildkit
|
||||||
|
func UserRoot() string {
|
||||||
|
// pam_systemd sets XDG_RUNTIME_DIR but not other dirs.
|
||||||
|
xdgDataHome := os.Getenv("XDG_DATA_HOME")
|
||||||
|
if xdgDataHome != "" {
|
||||||
|
dirs := strings.Split(xdgDataHome, ":")
|
||||||
|
return filepath.Join(dirs[0], "buildkit")
|
||||||
|
}
|
||||||
|
home := os.Getenv("HOME")
|
||||||
|
if home != "" {
|
||||||
|
return filepath.Join(home, ".local", "share", "buildkit")
|
||||||
|
}
|
||||||
|
return Root
|
||||||
|
}
|
18
vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go
generated
vendored
Normal file
18
vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
package appdefaults
|
||||||
|
|
||||||
|
const (
|
||||||
|
Address = "npipe:////./pipe/buildkitd"
|
||||||
|
Root = ".buildstate"
|
||||||
|
)
|
||||||
|
|
||||||
|
func UserAddress() string {
|
||||||
|
return Address
|
||||||
|
}
|
||||||
|
|
||||||
|
func EnsureUserAddressDir() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UserRoot() string {
|
||||||
|
return Root
|
||||||
|
}
|
425
vendor/github.com/moby/buildkit/util/progress/progressui/display.go
generated
vendored
Normal file
425
vendor/github.com/moby/buildkit/util/progress/progressui/display.go
generated
vendored
Normal file
|
@ -0,0 +1,425 @@
|
||||||
|
package progressui
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/morikuni/aec"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/tonistiigi/units"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
|
)
|
||||||
|
|
||||||
|
func DisplaySolveStatus(ctx context.Context, c console.Console, w io.Writer, ch chan *client.SolveStatus) error {
|
||||||
|
|
||||||
|
modeConsole := c != nil
|
||||||
|
|
||||||
|
disp := &display{c: c}
|
||||||
|
printer := &textMux{w: w}
|
||||||
|
|
||||||
|
t := newTrace(w)
|
||||||
|
|
||||||
|
var done bool
|
||||||
|
ticker := time.NewTicker(100 * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
displayLimiter := rate.NewLimiter(rate.Every(70*time.Millisecond), 1)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-ticker.C:
|
||||||
|
case ss, ok := <-ch:
|
||||||
|
if ok {
|
||||||
|
t.update(ss)
|
||||||
|
} else {
|
||||||
|
done = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if modeConsole {
|
||||||
|
if done {
|
||||||
|
disp.print(t.displayInfo(), true)
|
||||||
|
t.printErrorLogs(c)
|
||||||
|
return nil
|
||||||
|
} else if displayLimiter.Allow() {
|
||||||
|
disp.print(t.displayInfo(), false)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if done || displayLimiter.Allow() {
|
||||||
|
printer.print(t)
|
||||||
|
if done {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type displayInfo struct {
|
||||||
|
startTime time.Time
|
||||||
|
jobs []job
|
||||||
|
countTotal int
|
||||||
|
countCompleted int
|
||||||
|
}
|
||||||
|
|
||||||
|
type job struct {
|
||||||
|
startTime *time.Time
|
||||||
|
completedTime *time.Time
|
||||||
|
name string
|
||||||
|
status string
|
||||||
|
hasError bool
|
||||||
|
isCanceled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type trace struct {
|
||||||
|
w io.Writer
|
||||||
|
localTimeDiff time.Duration
|
||||||
|
vertexes []*vertex
|
||||||
|
byDigest map[digest.Digest]*vertex
|
||||||
|
nextIndex int
|
||||||
|
updates map[digest.Digest]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type vertex struct {
|
||||||
|
*client.Vertex
|
||||||
|
statuses []*status
|
||||||
|
byID map[string]*status
|
||||||
|
indent string
|
||||||
|
index int
|
||||||
|
|
||||||
|
logs [][]byte
|
||||||
|
logsPartial bool
|
||||||
|
logsOffset int
|
||||||
|
prev *client.Vertex
|
||||||
|
events []string
|
||||||
|
lastBlockTime *time.Time
|
||||||
|
count int
|
||||||
|
statusUpdates map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *vertex) update(c int) {
|
||||||
|
if v.count == 0 {
|
||||||
|
now := time.Now()
|
||||||
|
v.lastBlockTime = &now
|
||||||
|
}
|
||||||
|
v.count += c
|
||||||
|
}
|
||||||
|
|
||||||
|
type status struct {
|
||||||
|
*client.VertexStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTrace(w io.Writer) *trace {
|
||||||
|
return &trace{
|
||||||
|
byDigest: make(map[digest.Digest]*vertex),
|
||||||
|
updates: make(map[digest.Digest]struct{}),
|
||||||
|
w: w,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trace) triggerVertexEvent(v *client.Vertex) {
|
||||||
|
if v.Started == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var old client.Vertex
|
||||||
|
vtx := t.byDigest[v.Digest]
|
||||||
|
if v := vtx.prev; v != nil {
|
||||||
|
old = *v
|
||||||
|
}
|
||||||
|
|
||||||
|
var ev []string
|
||||||
|
if v.Digest != old.Digest {
|
||||||
|
ev = append(ev, fmt.Sprintf("%13s %s", "digest:", v.Digest))
|
||||||
|
}
|
||||||
|
if v.Name != old.Name {
|
||||||
|
ev = append(ev, fmt.Sprintf("%13s %q", "name:", v.Name))
|
||||||
|
}
|
||||||
|
if v.Started != old.Started {
|
||||||
|
if v.Started != nil && old.Started == nil || !v.Started.Equal(*old.Started) {
|
||||||
|
ev = append(ev, fmt.Sprintf("%13s %v", "started:", v.Started))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v.Completed != old.Completed && v.Completed != nil {
|
||||||
|
ev = append(ev, fmt.Sprintf("%13s %v", "completed:", v.Completed))
|
||||||
|
if v.Started != nil {
|
||||||
|
ev = append(ev, fmt.Sprintf("%13s %v", "duration:", v.Completed.Sub(*v.Started)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v.Cached != old.Cached {
|
||||||
|
ev = append(ev, fmt.Sprintf("%13s %v", "cached:", v.Cached))
|
||||||
|
}
|
||||||
|
if v.Error != old.Error {
|
||||||
|
ev = append(ev, fmt.Sprintf("%13s %q", "error:", v.Error))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ev) > 0 {
|
||||||
|
vtx.events = append(vtx.events, ev...)
|
||||||
|
vtx.update(len(ev))
|
||||||
|
t.updates[v.Digest] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.byDigest[v.Digest].prev = v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trace) update(s *client.SolveStatus) {
|
||||||
|
for _, v := range s.Vertexes {
|
||||||
|
prev, ok := t.byDigest[v.Digest]
|
||||||
|
if !ok {
|
||||||
|
t.nextIndex++
|
||||||
|
t.byDigest[v.Digest] = &vertex{
|
||||||
|
byID: make(map[string]*status),
|
||||||
|
statusUpdates: make(map[string]struct{}),
|
||||||
|
index: t.nextIndex,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.triggerVertexEvent(v)
|
||||||
|
if v.Started != nil && (prev == nil || prev.Started == nil) {
|
||||||
|
if t.localTimeDiff == 0 {
|
||||||
|
t.localTimeDiff = time.Since(*v.Started)
|
||||||
|
}
|
||||||
|
t.vertexes = append(t.vertexes, t.byDigest[v.Digest])
|
||||||
|
}
|
||||||
|
t.byDigest[v.Digest].Vertex = v
|
||||||
|
}
|
||||||
|
for _, s := range s.Statuses {
|
||||||
|
v, ok := t.byDigest[s.Vertex]
|
||||||
|
if !ok {
|
||||||
|
continue // shouldn't happen
|
||||||
|
}
|
||||||
|
prev, ok := v.byID[s.ID]
|
||||||
|
if !ok {
|
||||||
|
v.byID[s.ID] = &status{VertexStatus: s}
|
||||||
|
}
|
||||||
|
if s.Started != nil && (prev == nil || prev.Started == nil) {
|
||||||
|
v.statuses = append(v.statuses, v.byID[s.ID])
|
||||||
|
}
|
||||||
|
v.byID[s.ID].VertexStatus = s
|
||||||
|
v.statusUpdates[s.ID] = struct{}{}
|
||||||
|
t.updates[v.Digest] = struct{}{}
|
||||||
|
v.update(1)
|
||||||
|
}
|
||||||
|
for _, l := range s.Logs {
|
||||||
|
v, ok := t.byDigest[l.Vertex]
|
||||||
|
if !ok {
|
||||||
|
continue // shouldn't happen
|
||||||
|
}
|
||||||
|
complete := split(l.Data, byte('\n'), func(dt []byte) {
|
||||||
|
if v.logsPartial && len(v.logs) != 0 {
|
||||||
|
v.logs[len(v.logs)-1] = append(v.logs[len(v.logs)-1], dt...)
|
||||||
|
} else {
|
||||||
|
ts := time.Duration(0)
|
||||||
|
if v.Started != nil {
|
||||||
|
ts = l.Timestamp.Sub(*v.Started)
|
||||||
|
}
|
||||||
|
v.logs = append(v.logs, []byte(fmt.Sprintf("#%d %s %s", v.index, fmt.Sprintf("%#.4g", ts.Seconds())[:5], dt)))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
v.logsPartial = !complete
|
||||||
|
t.updates[v.Digest] = struct{}{}
|
||||||
|
v.update(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trace) printErrorLogs(f io.Writer) {
|
||||||
|
for _, v := range t.vertexes {
|
||||||
|
if v.Error != "" && !strings.HasSuffix(v.Error, context.Canceled.Error()) {
|
||||||
|
fmt.Fprintln(f, "------")
|
||||||
|
fmt.Fprintf(f, " > %s:\n", v.Name)
|
||||||
|
for _, l := range v.logs {
|
||||||
|
f.Write(l)
|
||||||
|
fmt.Fprintln(f)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(f, "------")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *trace) displayInfo() (d displayInfo) {
|
||||||
|
d.startTime = time.Now()
|
||||||
|
if t.localTimeDiff != 0 {
|
||||||
|
d.startTime = (*t.vertexes[0].Started).Add(t.localTimeDiff)
|
||||||
|
}
|
||||||
|
d.countTotal = len(t.byDigest)
|
||||||
|
for _, v := range t.byDigest {
|
||||||
|
if v.Completed != nil {
|
||||||
|
d.countCompleted++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range t.vertexes {
|
||||||
|
j := job{
|
||||||
|
startTime: addTime(v.Started, t.localTimeDiff),
|
||||||
|
completedTime: addTime(v.Completed, t.localTimeDiff),
|
||||||
|
name: strings.Replace(v.Name, "\t", " ", -1),
|
||||||
|
}
|
||||||
|
if v.Error != "" {
|
||||||
|
if strings.HasSuffix(v.Error, context.Canceled.Error()) {
|
||||||
|
j.isCanceled = true
|
||||||
|
j.name = "CANCELED " + j.name
|
||||||
|
} else {
|
||||||
|
j.hasError = true
|
||||||
|
j.name = "ERROR " + j.name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v.Cached {
|
||||||
|
j.name = "CACHED " + j.name
|
||||||
|
}
|
||||||
|
j.name = v.indent + j.name
|
||||||
|
d.jobs = append(d.jobs, j)
|
||||||
|
for _, s := range v.statuses {
|
||||||
|
j := job{
|
||||||
|
startTime: addTime(s.Started, t.localTimeDiff),
|
||||||
|
completedTime: addTime(s.Completed, t.localTimeDiff),
|
||||||
|
name: v.indent + "=> " + s.ID,
|
||||||
|
}
|
||||||
|
if s.Total != 0 {
|
||||||
|
j.status = fmt.Sprintf("%.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total))
|
||||||
|
} else if s.Current != 0 {
|
||||||
|
j.status = fmt.Sprintf("%.2f", units.Bytes(s.Current))
|
||||||
|
}
|
||||||
|
d.jobs = append(d.jobs, j)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func split(dt []byte, sep byte, fn func([]byte)) bool {
|
||||||
|
if len(dt) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
if len(dt) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
idx := bytes.IndexByte(dt, sep)
|
||||||
|
if idx == -1 {
|
||||||
|
fn(dt)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
fn(dt[:idx])
|
||||||
|
dt = dt[idx+1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addTime(tm *time.Time, d time.Duration) *time.Time {
|
||||||
|
if tm == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
t := (*tm).Add(d)
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
|
type display struct {
|
||||||
|
c console.Console
|
||||||
|
lineCount int
|
||||||
|
repeated bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (disp *display) print(d displayInfo, all bool) {
|
||||||
|
// this output is inspired by Buck
|
||||||
|
width := 80
|
||||||
|
height := 10
|
||||||
|
size, err := disp.c.Size()
|
||||||
|
if err == nil && size.Width > 0 && size.Height > 0 {
|
||||||
|
width = int(size.Width)
|
||||||
|
height = int(size.Height)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !all {
|
||||||
|
d.jobs = wrapHeight(d.jobs, height-2)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := aec.EmptyBuilder
|
||||||
|
for i := 0; i <= disp.lineCount; i++ {
|
||||||
|
b = b.Up(1)
|
||||||
|
}
|
||||||
|
if !disp.repeated {
|
||||||
|
b = b.Down(1)
|
||||||
|
}
|
||||||
|
disp.repeated = true
|
||||||
|
fmt.Fprint(disp.c, b.Column(0).ANSI)
|
||||||
|
|
||||||
|
statusStr := ""
|
||||||
|
if d.countCompleted > 0 && d.countCompleted == d.countTotal && all {
|
||||||
|
statusStr = "FINISHED"
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprint(disp.c, aec.Hide)
|
||||||
|
defer fmt.Fprint(disp.c, aec.Show)
|
||||||
|
|
||||||
|
out := fmt.Sprintf("[+] Building %.1fs (%d/%d) %s", time.Since(d.startTime).Seconds(), d.countCompleted, d.countTotal, statusStr)
|
||||||
|
out = align(out, "", width)
|
||||||
|
fmt.Fprintln(disp.c, out)
|
||||||
|
lineCount := 0
|
||||||
|
for _, j := range d.jobs {
|
||||||
|
endTime := time.Now()
|
||||||
|
if j.completedTime != nil {
|
||||||
|
endTime = *j.completedTime
|
||||||
|
}
|
||||||
|
if j.startTime == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dt := endTime.Sub(*j.startTime).Seconds()
|
||||||
|
if dt < 0.05 {
|
||||||
|
dt = 0
|
||||||
|
}
|
||||||
|
pfx := " => "
|
||||||
|
timer := fmt.Sprintf(" %3.1fs\n", dt)
|
||||||
|
status := j.status
|
||||||
|
showStatus := false
|
||||||
|
|
||||||
|
left := width - len(pfx) - len(timer) - 1
|
||||||
|
if status != "" {
|
||||||
|
if left+len(status) > 20 {
|
||||||
|
showStatus = true
|
||||||
|
left -= len(status) + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if left < 12 { // too small screen to show progress
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(j.name) > left {
|
||||||
|
j.name = j.name[:left]
|
||||||
|
}
|
||||||
|
|
||||||
|
out := pfx + j.name
|
||||||
|
if showStatus {
|
||||||
|
out += " " + status
|
||||||
|
}
|
||||||
|
|
||||||
|
out = align(out, timer, width)
|
||||||
|
if j.completedTime != nil {
|
||||||
|
color := aec.BlueF
|
||||||
|
if j.isCanceled {
|
||||||
|
color = aec.YellowF
|
||||||
|
} else if j.hasError {
|
||||||
|
color = aec.RedF
|
||||||
|
}
|
||||||
|
out = aec.Apply(out, color)
|
||||||
|
}
|
||||||
|
fmt.Fprint(disp.c, out)
|
||||||
|
lineCount++
|
||||||
|
}
|
||||||
|
disp.lineCount = lineCount
|
||||||
|
}
|
||||||
|
|
||||||
|
func align(l, r string, w int) string {
|
||||||
|
return fmt.Sprintf("%-[2]*[1]s %[3]s", l, w-len(r)-1, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapHeight(j []job, limit int) []job {
|
||||||
|
if len(j) > limit {
|
||||||
|
j = j[len(j)-limit:]
|
||||||
|
}
|
||||||
|
return j
|
||||||
|
}
|
214
vendor/github.com/moby/buildkit/util/progress/progressui/printer.go
generated
vendored
Normal file
214
vendor/github.com/moby/buildkit/util/progress/progressui/printer.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
||||||
|
package progressui
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/tonistiigi/units"
|
||||||
|
)
|
||||||
|
|
||||||
|
const antiFlicker = 5 * time.Second
|
||||||
|
const maxDelay = 10 * time.Second
|
||||||
|
|
||||||
|
type textMux struct {
|
||||||
|
w io.Writer
|
||||||
|
current digest.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
|
||||||
|
v, ok := t.byDigest[dgst]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if dgst != p.current {
|
||||||
|
if p.current != "" {
|
||||||
|
old := t.byDigest[p.current]
|
||||||
|
if old.logsPartial {
|
||||||
|
fmt.Fprintln(p.w, "")
|
||||||
|
}
|
||||||
|
old.logsOffset = 0
|
||||||
|
old.count = 0
|
||||||
|
fmt.Fprintf(p.w, "#%d ...\n", v.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(p.w, "\n#%d %s\n", v.index, limitString(v.Name, 72))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(v.events) != 0 {
|
||||||
|
v.logsOffset = 0
|
||||||
|
}
|
||||||
|
for _, ev := range v.events {
|
||||||
|
fmt.Fprintf(p.w, "#%d %s\n", v.index, ev)
|
||||||
|
}
|
||||||
|
v.events = v.events[:0]
|
||||||
|
|
||||||
|
for _, s := range v.statuses {
|
||||||
|
if _, ok := v.statusUpdates[s.ID]; ok {
|
||||||
|
var bytes string
|
||||||
|
if s.Total != 0 {
|
||||||
|
bytes = fmt.Sprintf(" %.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total))
|
||||||
|
} else if s.Current != 0 {
|
||||||
|
bytes = fmt.Sprintf(" %.2f", units.Bytes(s.Current))
|
||||||
|
}
|
||||||
|
var tm string
|
||||||
|
endTime := s.Timestamp
|
||||||
|
if s.Completed != nil {
|
||||||
|
endTime = *s.Completed
|
||||||
|
}
|
||||||
|
if s.Started != nil {
|
||||||
|
diff := endTime.Sub(*s.Started).Seconds()
|
||||||
|
if diff > 0.01 {
|
||||||
|
tm = fmt.Sprintf(" %.1fs", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.Completed != nil {
|
||||||
|
tm += " done"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(p.w, "#%d %s%s%s\n", v.index, s.ID, bytes, tm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v.statusUpdates = map[string]struct{}{}
|
||||||
|
|
||||||
|
for i, l := range v.logs {
|
||||||
|
if i == 0 {
|
||||||
|
l = l[v.logsOffset:]
|
||||||
|
}
|
||||||
|
fmt.Fprintf(p.w, "%s", []byte(l))
|
||||||
|
if i != len(v.logs)-1 || !v.logsPartial {
|
||||||
|
fmt.Fprintln(p.w, "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(v.logs) > 0 {
|
||||||
|
if v.logsPartial {
|
||||||
|
v.logs = v.logs[len(v.logs)-1:]
|
||||||
|
v.logsOffset = len(v.logs[0])
|
||||||
|
} else {
|
||||||
|
v.logs = nil
|
||||||
|
v.logsOffset = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.current = dgst
|
||||||
|
|
||||||
|
if v.Completed != nil {
|
||||||
|
p.current = ""
|
||||||
|
v.count = 0
|
||||||
|
fmt.Fprintf(p.w, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(t.updates, dgst)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *textMux) print(t *trace) {
|
||||||
|
|
||||||
|
completed := map[digest.Digest]struct{}{}
|
||||||
|
rest := map[digest.Digest]struct{}{}
|
||||||
|
|
||||||
|
for dgst := range t.updates {
|
||||||
|
v, ok := t.byDigest[dgst]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v.Vertex.Completed != nil {
|
||||||
|
completed[dgst] = struct{}{}
|
||||||
|
} else {
|
||||||
|
rest[dgst] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
current := p.current
|
||||||
|
|
||||||
|
// items that have completed need to be printed first
|
||||||
|
if _, ok := completed[current]; ok {
|
||||||
|
p.printVtx(t, current)
|
||||||
|
}
|
||||||
|
|
||||||
|
for dgst := range completed {
|
||||||
|
if dgst != current {
|
||||||
|
p.printVtx(t, dgst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rest) == 0 {
|
||||||
|
if current != "" {
|
||||||
|
if v := t.byDigest[current]; v.Started != nil && v.Completed == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// make any open vertex active
|
||||||
|
for dgst, v := range t.byDigest {
|
||||||
|
if v.Started != nil && v.Completed == nil {
|
||||||
|
p.printVtx(t, dgst)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// now print the active one
|
||||||
|
if _, ok := rest[current]; ok {
|
||||||
|
p.printVtx(t, current)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := map[digest.Digest]*vtxStat{}
|
||||||
|
now := time.Now()
|
||||||
|
sum := 0.0
|
||||||
|
var max digest.Digest
|
||||||
|
if current != "" {
|
||||||
|
rest[current] = struct{}{}
|
||||||
|
}
|
||||||
|
for dgst := range rest {
|
||||||
|
v, ok := t.byDigest[dgst]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tm := now.Sub(*v.lastBlockTime)
|
||||||
|
speed := float64(v.count) / tm.Seconds()
|
||||||
|
overLimit := tm > maxDelay && dgst != current
|
||||||
|
stats[dgst] = &vtxStat{blockTime: tm, speed: speed, overLimit: overLimit}
|
||||||
|
sum += speed
|
||||||
|
if overLimit || max == "" || stats[max].speed < speed {
|
||||||
|
max = dgst
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for dgst := range stats {
|
||||||
|
stats[dgst].share = stats[dgst].speed / sum
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := completed[current]; ok || current == "" {
|
||||||
|
p.printVtx(t, max)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// show items that were hidden
|
||||||
|
for dgst := range rest {
|
||||||
|
if stats[dgst].overLimit {
|
||||||
|
p.printVtx(t, dgst)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fair split between vertexes
|
||||||
|
if 1.0/(1.0-stats[current].share)*antiFlicker.Seconds() < stats[current].blockTime.Seconds() {
|
||||||
|
p.printVtx(t, max)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type vtxStat struct {
|
||||||
|
blockTime time.Duration
|
||||||
|
speed float64
|
||||||
|
share float64
|
||||||
|
overLimit bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func limitString(s string, l int) string {
|
||||||
|
if len(s) > l {
|
||||||
|
return s[:l] + "..."
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
// DefaultPathEnv is unix style list of directories to search for
|
||||||
|
// executables. Each directory is separated from the next by a colon
|
||||||
|
// ':' character .
|
||||||
|
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||||
|
|
||||||
|
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
||||||
|
// is the system drive. This is a no-op on Linux.
|
||||||
|
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||||
|
return path, nil
|
||||||
|
}
|
|
@ -0,0 +1,37 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
|
||||||
|
// the container. Docker has no context of what the default path should be.
|
||||||
|
const DefaultPathEnv = ""
|
||||||
|
|
||||||
|
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
||||||
|
// This is used, for example, when validating a user provided path in docker cp.
|
||||||
|
// If a drive letter is supplied, it must be the system drive. The drive letter
|
||||||
|
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
|
||||||
|
// need the path in this syntax so that it can ultimately be contatenated with
|
||||||
|
// a Windows long-path which doesn't support drive-letters. Examples:
|
||||||
|
// C: --> Fail
|
||||||
|
// C:\ --> \
|
||||||
|
// a --> a
|
||||||
|
// /a --> \a
|
||||||
|
// d:\ --> Fail
|
||||||
|
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||||
|
if len(path) == 2 && string(path[1]) == ":" {
|
||||||
|
return "", fmt.Errorf("No relative path specified in %q", path)
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(path) || len(path) < 2 {
|
||||||
|
return filepath.FromSlash(path), nil
|
||||||
|
}
|
||||||
|
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
|
||||||
|
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
|
||||||
|
}
|
||||||
|
return filepath.FromSlash(path[2:]), nil
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
// +build linux,seccomp
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var seccompSupported bool
|
||||||
|
var seccompOnce sync.Once
|
||||||
|
|
||||||
|
func SeccompSupported() bool {
|
||||||
|
seccompOnce.Do(func() {
|
||||||
|
seccompSupported = getSeccompSupported()
|
||||||
|
})
|
||||||
|
return seccompSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSeccompSupported() bool {
|
||||||
|
if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
|
||||||
|
// Make sure the kernel has CONFIG_SECCOMP_FILTER.
|
||||||
|
if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
// +build !linux,seccomp
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
func SeccompSupported() bool {
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
// +build !seccomp
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
func SeccompSupported() bool {
|
||||||
|
return false
|
||||||
|
}
|
|
@ -6,7 +6,7 @@ github.com/davecgh/go-spew v1.1.0
|
||||||
github.com/pmezard/go-difflib v1.0.0
|
github.com/pmezard/go-difflib v1.0.0
|
||||||
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993
|
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993
|
||||||
|
|
||||||
github.com/containerd/containerd e1428ef05460da40720d622c803262e6fc8d3477
|
github.com/containerd/containerd 63522d9eaa5a0443d225642c4b6f4f5fdedf932b
|
||||||
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
|
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
|
||||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||||
github.com/sirupsen/logrus v1.0.0
|
github.com/sirupsen/logrus v1.0.0
|
||||||
|
@ -23,7 +23,7 @@ github.com/Microsoft/go-winio v0.4.7
|
||||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||||
github.com/opencontainers/runtime-spec v1.0.1
|
github.com/opencontainers/runtime-spec v1.0.1
|
||||||
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5
|
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5
|
||||||
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
|
github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08
|
||||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||||
|
@ -36,11 +36,10 @@ github.com/docker/go-units v0.3.1
|
||||||
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
|
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
|
||||||
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||||
|
|
||||||
github.com/BurntSushi/locker a6e239ea1c69bff1cfdb20c4b73dadf52f784b6a
|
|
||||||
github.com/docker/docker 71cd53e4a197b303c6ba086bd584ffd67a884281
|
github.com/docker/docker 71cd53e4a197b303c6ba086bd584ffd67a884281
|
||||||
github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
|
github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
|
||||||
|
|
||||||
github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f
|
github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
|
||||||
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
|
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
|
||||||
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
||||||
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
|
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
|
||||||
|
|
|
@ -9,5 +9,12 @@ import (
|
||||||
|
|
||||||
func chtimes(path string, un int64) error {
|
func chtimes(path string, un int64) error {
|
||||||
mtime := time.Unix(0, un)
|
mtime := time.Unix(0, un)
|
||||||
|
fi, err := os.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if fi.Mode()&os.ModeSymlink != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return os.Chtimes(path, mtime, mtime)
|
return os.Chtimes(path, mtime, mtime)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
package fsutil
|
package fsutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"hash"
|
"hash"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type walkerFn func(ctx context.Context, pathC chan<- *currentPath) error
|
type walkerFn func(ctx context.Context, pathC chan<- *currentPath) error
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
package fsutil
|
package fsutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package fsutil
|
package fsutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
@ -9,9 +10,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -80,9 +80,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
p = filepath.FromSlash(p)
|
destPath := filepath.Join(dw.dest, filepath.FromSlash(p))
|
||||||
|
|
||||||
destPath := filepath.Join(dw.dest, p)
|
|
||||||
|
|
||||||
if kind == ChangeKindDelete {
|
if kind == ChangeKindDelete {
|
||||||
// todo: no need to validate if diff is trusted but is it always?
|
// todo: no need to validate if diff is trusted but is it always?
|
||||||
|
@ -102,8 +100,10 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
|
||||||
return errors.Errorf("%s invalid change without stat information", p)
|
return errors.Errorf("%s invalid change without stat information", p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
statCopy := *stat
|
||||||
|
|
||||||
if dw.filter != nil {
|
if dw.filter != nil {
|
||||||
if ok := dw.filter(stat); !ok {
|
if ok := dw.filter(&statCopy); !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
|
||||||
}
|
}
|
||||||
|
|
||||||
if oldFi != nil && fi.IsDir() && oldFi.IsDir() {
|
if oldFi != nil && fi.IsDir() && oldFi.IsDir() {
|
||||||
if err := rewriteMetadata(destPath, stat); err != nil {
|
if err := rewriteMetadata(destPath, &statCopy); err != nil {
|
||||||
return errors.Wrapf(err, "error setting dir metadata for %s", destPath)
|
return errors.Wrapf(err, "error setting dir metadata for %s", destPath)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -141,16 +141,16 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
|
||||||
return errors.Wrapf(err, "failed to create dir %s", newPath)
|
return errors.Wrapf(err, "failed to create dir %s", newPath)
|
||||||
}
|
}
|
||||||
case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0:
|
case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0:
|
||||||
if err := handleTarTypeBlockCharFifo(newPath, stat); err != nil {
|
if err := handleTarTypeBlockCharFifo(newPath, &statCopy); err != nil {
|
||||||
return errors.Wrapf(err, "failed to create device %s", newPath)
|
return errors.Wrapf(err, "failed to create device %s", newPath)
|
||||||
}
|
}
|
||||||
case fi.Mode()&os.ModeSymlink != 0:
|
case fi.Mode()&os.ModeSymlink != 0:
|
||||||
if err := os.Symlink(stat.Linkname, newPath); err != nil {
|
if err := os.Symlink(statCopy.Linkname, newPath); err != nil {
|
||||||
return errors.Wrapf(err, "failed to symlink %s", newPath)
|
return errors.Wrapf(err, "failed to symlink %s", newPath)
|
||||||
}
|
}
|
||||||
case stat.Linkname != "":
|
case statCopy.Linkname != "":
|
||||||
if err := os.Link(filepath.Join(dw.dest, stat.Linkname), newPath); err != nil {
|
if err := os.Link(filepath.Join(dw.dest, statCopy.Linkname), newPath); err != nil {
|
||||||
return errors.Wrapf(err, "failed to link %s to %s", newPath, stat.Linkname)
|
return errors.Wrapf(err, "failed to link %s to %s", newPath, statCopy.Linkname)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
isRegularFile = true
|
isRegularFile = true
|
||||||
|
@ -170,7 +170,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := rewriteMetadata(newPath, stat); err != nil {
|
if err := rewriteMetadata(newPath, &statCopy); err != nil {
|
||||||
return errors.Wrapf(err, "error setting metadata for %s", newPath)
|
return errors.Wrapf(err, "error setting metadata for %s", newPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -272,14 +272,27 @@ func (hw *hashedWriter) Digest() digest.Digest {
|
||||||
}
|
}
|
||||||
|
|
||||||
type lazyFileWriter struct {
|
type lazyFileWriter struct {
|
||||||
dest string
|
dest string
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
f *os.File
|
f *os.File
|
||||||
|
fileMode *os.FileMode
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lfw *lazyFileWriter) Write(dt []byte) (int, error) {
|
func (lfw *lazyFileWriter) Write(dt []byte) (int, error) {
|
||||||
if lfw.f == nil {
|
if lfw.f == nil {
|
||||||
file, err := os.OpenFile(lfw.dest, os.O_WRONLY, 0) //todo: windows
|
file, err := os.OpenFile(lfw.dest, os.O_WRONLY, 0) //todo: windows
|
||||||
|
if os.IsPermission(err) {
|
||||||
|
// retry after chmod
|
||||||
|
fi, er := os.Stat(lfw.dest)
|
||||||
|
if er == nil {
|
||||||
|
mode := fi.Mode()
|
||||||
|
lfw.fileMode = &mode
|
||||||
|
er = os.Chmod(lfw.dest, mode|0222)
|
||||||
|
if er == nil {
|
||||||
|
file, err = os.OpenFile(lfw.dest, os.O_WRONLY, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.Wrapf(err, "failed to open %s", lfw.dest)
|
return 0, errors.Wrapf(err, "failed to open %s", lfw.dest)
|
||||||
}
|
}
|
||||||
|
@ -289,10 +302,14 @@ func (lfw *lazyFileWriter) Write(dt []byte) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lfw *lazyFileWriter) Close() error {
|
func (lfw *lazyFileWriter) Close() error {
|
||||||
|
var err error
|
||||||
if lfw.f != nil {
|
if lfw.f != nil {
|
||||||
return lfw.f.Close()
|
err = lfw.f.Close()
|
||||||
}
|
}
|
||||||
return nil
|
if err == nil && lfw.fileMode != nil {
|
||||||
|
err = os.Chmod(lfw.dest, *lfw.fileMode)
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func mkdev(major int64, minor int64) uint32 {
|
func mkdev(major int64, minor int64) uint32 {
|
||||||
|
|
|
@ -0,0 +1,150 @@
|
||||||
|
package fsutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
strings "strings"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func FollowLinks(root string, paths []string) ([]string, error) {
|
||||||
|
r := &symlinkResolver{root: root, resolved: map[string]struct{}{}}
|
||||||
|
for _, p := range paths {
|
||||||
|
if err := r.append(p); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res := make([]string, 0, len(r.resolved))
|
||||||
|
for r := range r.resolved {
|
||||||
|
res = append(res, r)
|
||||||
|
}
|
||||||
|
sort.Strings(res)
|
||||||
|
return dedupePaths(res), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type symlinkResolver struct {
|
||||||
|
root string
|
||||||
|
resolved map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *symlinkResolver) append(p string) error {
|
||||||
|
p = filepath.Join(".", p)
|
||||||
|
current := "."
|
||||||
|
for {
|
||||||
|
parts := strings.SplitN(p, string(filepath.Separator), 2)
|
||||||
|
current = filepath.Join(current, parts[0])
|
||||||
|
|
||||||
|
targets, err := r.readSymlink(current, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p = ""
|
||||||
|
if len(parts) == 2 {
|
||||||
|
p = parts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if p == "" || targets != nil {
|
||||||
|
if _, ok := r.resolved[current]; ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if targets != nil {
|
||||||
|
r.resolved[current] = struct{}{}
|
||||||
|
for _, target := range targets {
|
||||||
|
if err := r.append(filepath.Join(target, p)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if p == "" {
|
||||||
|
r.resolved[current] = struct{}{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *symlinkResolver) readSymlink(p string, allowWildcard bool) ([]string, error) {
|
||||||
|
realPath := filepath.Join(r.root, p)
|
||||||
|
base := filepath.Base(p)
|
||||||
|
if allowWildcard && containsWildcards(base) {
|
||||||
|
fis, err := ioutil.ReadDir(filepath.Dir(realPath))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, errors.Wrapf(err, "failed to read dir %s", filepath.Dir(realPath))
|
||||||
|
}
|
||||||
|
var out []string
|
||||||
|
for _, f := range fis {
|
||||||
|
if ok, _ := filepath.Match(base, f.Name()); ok {
|
||||||
|
res, err := r.readSymlink(filepath.Join(filepath.Dir(p), f.Name()), false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out = append(out, res...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err := os.Lstat(realPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, errors.Wrapf(err, "failed to lstat %s", realPath)
|
||||||
|
}
|
||||||
|
if fi.Mode()&os.ModeSymlink == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
link, err := os.Readlink(realPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to readlink %s", realPath)
|
||||||
|
}
|
||||||
|
link = filepath.Clean(link)
|
||||||
|
if filepath.IsAbs(link) {
|
||||||
|
return []string{link}, nil
|
||||||
|
}
|
||||||
|
return []string{
|
||||||
|
filepath.Join(string(filepath.Separator), filepath.Join(filepath.Dir(p), link)),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsWildcards(name string) bool {
|
||||||
|
isWindows := runtime.GOOS == "windows"
|
||||||
|
for i := 0; i < len(name); i++ {
|
||||||
|
ch := name[i]
|
||||||
|
if ch == '\\' && !isWindows {
|
||||||
|
i++
|
||||||
|
} else if ch == '*' || ch == '?' || ch == '[' {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// dedupePaths expects input as a sorted list
|
||||||
|
func dedupePaths(in []string) []string {
|
||||||
|
out := make([]string, 0, len(in))
|
||||||
|
var last string
|
||||||
|
for _, s := range in {
|
||||||
|
// if one of the paths is root there is no filter
|
||||||
|
if s == "." {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(s, last+string(filepath.Separator)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, s)
|
||||||
|
last = s
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
|
@ -1,12 +1,12 @@
|
||||||
package fsutil
|
package fsutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
package fsutil
|
package fsutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package fsutil
|
package fsutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -9,13 +10,15 @@ import (
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/fileutils"
|
"github.com/docker/docker/pkg/fileutils"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type WalkOpt struct {
|
type WalkOpt struct {
|
||||||
IncludePatterns []string
|
IncludePatterns []string
|
||||||
ExcludePatterns []string
|
ExcludePatterns []string
|
||||||
Map func(*Stat) bool
|
// FollowPaths contains symlinks that are resolved into include patterns
|
||||||
|
// before performing the fs walk
|
||||||
|
FollowPaths []string
|
||||||
|
Map func(*Stat) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error {
|
func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error {
|
||||||
|
@ -39,8 +42,25 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var includePatterns []string
|
||||||
|
if opt != nil && opt.IncludePatterns != nil {
|
||||||
|
includePatterns = make([]string, len(opt.IncludePatterns))
|
||||||
|
for k := range opt.IncludePatterns {
|
||||||
|
includePatterns[k] = filepath.Clean(opt.IncludePatterns[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if opt != nil && opt.FollowPaths != nil {
|
||||||
|
targets, err := FollowLinks(p, opt.FollowPaths)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if targets != nil {
|
||||||
|
includePatterns = append(includePatterns, targets...)
|
||||||
|
includePatterns = dedupePaths(includePatterns)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var lastIncludedDir string
|
var lastIncludedDir string
|
||||||
var includePatternPrefixes []string
|
|
||||||
|
|
||||||
seenFiles := make(map[uint64]string)
|
seenFiles := make(map[uint64]string)
|
||||||
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) (retErr error) {
|
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) (retErr error) {
|
||||||
|
@ -66,34 +86,34 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt != nil {
|
if opt != nil {
|
||||||
if opt.IncludePatterns != nil {
|
if includePatterns != nil {
|
||||||
if includePatternPrefixes == nil {
|
skip := false
|
||||||
includePatternPrefixes = patternPrefixes(opt.IncludePatterns)
|
|
||||||
}
|
|
||||||
matched := false
|
|
||||||
if lastIncludedDir != "" {
|
if lastIncludedDir != "" {
|
||||||
if strings.HasPrefix(path, lastIncludedDir+string(filepath.Separator)) {
|
if strings.HasPrefix(path, lastIncludedDir+string(filepath.Separator)) {
|
||||||
matched = true
|
skip = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !matched {
|
|
||||||
for _, p := range opt.IncludePatterns {
|
if !skip {
|
||||||
if m, _ := filepath.Match(p, path); m {
|
matched := false
|
||||||
|
partial := true
|
||||||
|
for _, p := range includePatterns {
|
||||||
|
if ok, p := matchPrefix(p, path); ok {
|
||||||
matched = true
|
matched = true
|
||||||
break
|
if !p {
|
||||||
|
partial = false
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if matched && fi.IsDir() {
|
if !matched {
|
||||||
lastIncludedDir = path
|
if fi.IsDir() {
|
||||||
}
|
|
||||||
}
|
|
||||||
if !matched {
|
|
||||||
if !fi.IsDir() {
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
if noPossiblePrefixMatch(path, includePatternPrefixes) {
|
|
||||||
return filepath.SkipDir
|
return filepath.SkipDir
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !partial && fi.IsDir() {
|
||||||
|
lastIncludedDir = path
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -131,13 +151,13 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
|
||||||
stat := &Stat{
|
stat := &Stat{
|
||||||
Path: path,
|
Path: path,
|
||||||
Mode: uint32(fi.Mode()),
|
Mode: uint32(fi.Mode()),
|
||||||
Size_: fi.Size(),
|
|
||||||
ModTime: fi.ModTime().UnixNano(),
|
ModTime: fi.ModTime().UnixNano(),
|
||||||
}
|
}
|
||||||
|
|
||||||
setUnixOpt(fi, stat, path, seenFiles)
|
setUnixOpt(fi, stat, path, seenFiles)
|
||||||
|
|
||||||
if !fi.IsDir() {
|
if !fi.IsDir() {
|
||||||
|
stat.Size_ = fi.Size()
|
||||||
if fi.Mode()&os.ModeSymlink != 0 {
|
if fi.Mode()&os.ModeSymlink != 0 {
|
||||||
link, err := os.Readlink(origpath)
|
link, err := os.Readlink(origpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -199,29 +219,28 @@ func (s *StatInfo) Sys() interface{} {
|
||||||
return s.Stat
|
return s.Stat
|
||||||
}
|
}
|
||||||
|
|
||||||
func patternPrefixes(patterns []string) []string {
|
func matchPrefix(pattern, name string) (bool, bool) {
|
||||||
pfxs := make([]string, 0, len(patterns))
|
count := strings.Count(name, string(filepath.Separator))
|
||||||
for _, ptrn := range patterns {
|
partial := false
|
||||||
idx := strings.IndexFunc(ptrn, func(ch rune) bool {
|
if strings.Count(pattern, string(filepath.Separator)) > count {
|
||||||
return ch == '*' || ch == '?' || ch == '[' || ch == '\\'
|
pattern = trimUntilIndex(pattern, string(filepath.Separator), count)
|
||||||
})
|
partial = true
|
||||||
if idx == -1 {
|
|
||||||
idx = len(ptrn)
|
|
||||||
}
|
|
||||||
pfxs = append(pfxs, ptrn[:idx])
|
|
||||||
}
|
}
|
||||||
return pfxs
|
m, _ := filepath.Match(pattern, name)
|
||||||
|
return m, partial
|
||||||
}
|
}
|
||||||
|
|
||||||
func noPossiblePrefixMatch(p string, pfxs []string) bool {
|
func trimUntilIndex(str, sep string, count int) string {
|
||||||
for _, pfx := range pfxs {
|
s := str
|
||||||
chk := p
|
i := 0
|
||||||
if len(pfx) < len(p) {
|
c := 0
|
||||||
chk = p[:len(pfx)]
|
for {
|
||||||
}
|
idx := strings.Index(s, sep)
|
||||||
if strings.HasPrefix(pfx, chk) {
|
s = s[idx+len(sep):]
|
||||||
return false
|
i += idx + len(sep)
|
||||||
|
c++
|
||||||
|
if c > count {
|
||||||
|
return str[:i-len(sep)]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2017 Tõnis Tiigi
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,117 @@
|
||||||
|
/*
|
||||||
|
Simple byte size formatting.
|
||||||
|
|
||||||
|
This package implements types that can be used in stdlib formatting functions
|
||||||
|
like `fmt.Printf` to control the output of the expected printed string.
|
||||||
|
|
||||||
|
|
||||||
|
Floating point flags %f and %g print the value in using the correct unit
|
||||||
|
suffix. Decimal units are default, # switches to binary units. If a value is
|
||||||
|
best represented as full bytes, integer bytes are printed instead.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
fmt.Printf("%.2f", 123 * B) => "123B"
|
||||||
|
fmt.Printf("%.2f", 1234 * B) => "1.23kB"
|
||||||
|
fmt.Printf("%g", 1200 * B) => "1.2kB"
|
||||||
|
fmt.Printf("%#g", 1024 * B) => "1KiB"
|
||||||
|
|
||||||
|
|
||||||
|
Integer flag %d always prints the value in bytes. # flag adds an unit prefix.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
fmt.Printf("%d", 1234 * B) => "1234"
|
||||||
|
fmt.Printf("%#d", 1234 * B) => "1234B"
|
||||||
|
|
||||||
|
%v is equal to %g
|
||||||
|
|
||||||
|
*/
|
||||||
|
package units
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Bytes int64
|
||||||
|
|
||||||
|
const (
|
||||||
|
B Bytes = 1 << (10 * iota)
|
||||||
|
KiB
|
||||||
|
MiB
|
||||||
|
GiB
|
||||||
|
TiB
|
||||||
|
PiB
|
||||||
|
EiB
|
||||||
|
|
||||||
|
KB = 1e3 * B
|
||||||
|
MB = 1e3 * KB
|
||||||
|
GB = 1e3 * MB
|
||||||
|
TB = 1e3 * GB
|
||||||
|
PB = 1e3 * TB
|
||||||
|
EB = 1e3 * PB
|
||||||
|
)
|
||||||
|
|
||||||
|
var units = map[bool][]string{
|
||||||
|
false: []string{
|
||||||
|
"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB",
|
||||||
|
},
|
||||||
|
true: []string{
|
||||||
|
"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Bytes) Format(f fmt.State, c rune) {
|
||||||
|
switch c {
|
||||||
|
case 'f', 'g':
|
||||||
|
fv, unit, ok := b.floatValue(f.Flag('#'))
|
||||||
|
if !ok {
|
||||||
|
b.formatInt(f, 'd', true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
big.NewFloat(fv).Format(f, c)
|
||||||
|
io.WriteString(f, unit)
|
||||||
|
case 'd':
|
||||||
|
b.formatInt(f, c, f.Flag('#'))
|
||||||
|
default:
|
||||||
|
if f.Flag('#') {
|
||||||
|
fmt.Fprintf(f, "bytes(%d)", int64(b))
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(f, "%g", b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Bytes) formatInt(f fmt.State, c rune, withUnit bool) {
|
||||||
|
big.NewInt(int64(b)).Format(f, c)
|
||||||
|
if withUnit {
|
||||||
|
io.WriteString(f, "B")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Bytes) floatValue(binary bool) (float64, string, bool) {
|
||||||
|
i := 0
|
||||||
|
var baseUnit Bytes = 1
|
||||||
|
if b < 0 {
|
||||||
|
baseUnit *= -1
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
next := baseUnit
|
||||||
|
if binary {
|
||||||
|
next *= 1 << 10
|
||||||
|
} else {
|
||||||
|
next *= 1e3
|
||||||
|
}
|
||||||
|
if (baseUnit > 0 && b >= next) || (baseUnit < 0 && b <= next) {
|
||||||
|
i++
|
||||||
|
baseUnit = next
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
return 0, "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
return float64(b) / math.Abs(float64(baseUnit)), units[binary][i], true
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
#### Simple byte size formatting.
|
||||||
|
|
||||||
|
This package implements types that can be used in stdlib formatting functions
|
||||||
|
like `fmt.Printf` to control the output of the expected printed string.
|
||||||
|
|
||||||
|
Floating point flags `%f` and %g print the value in using the correct unit
|
||||||
|
suffix. Decimal units are default, `#` switches to binary units. If a value is
|
||||||
|
best represented as full bytes, integer bytes are printed instead.
|
||||||
|
|
||||||
|
##### Examples:
|
||||||
|
|
||||||
|
```
|
||||||
|
fmt.Printf("%.2f", 123 * B) => "123B"
|
||||||
|
fmt.Printf("%.2f", 1234 * B) => "1.23kB"
|
||||||
|
fmt.Printf("%g", 1200 * B) => "1.2kB"
|
||||||
|
fmt.Printf("%#g", 1024 * B) => "1KiB"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Integer flag `%d` always prints the value in bytes. `#` flag adds an unit prefix.
|
||||||
|
|
||||||
|
##### Examples:
|
||||||
|
|
||||||
|
```
|
||||||
|
fmt.Printf("%d", 1234 * B) => "1234"
|
||||||
|
fmt.Printf("%#d", 1234 * B) => "1234B"
|
||||||
|
```
|
||||||
|
|
||||||
|
`%v` is equal to `%g`
|
Loading…
Reference in New Issue