vendor: update buildkit to 62e55427

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2019-04-02 23:23:23 -07:00
parent f40f9c240a
commit 198407c56b
99 changed files with 15519 additions and 1677 deletions

View File

@ -662,13 +662,22 @@ func parseOutputs(inp []string) ([]types.ImageBuildOutput, error) {
if err != nil {
return nil, err
}
if len(fields) == 1 && fields[0] == s {
outs = append(outs, types.ImageBuildOutput{
Type: "local",
Attrs: map[string]string{
"dest": s,
},
})
if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") {
if s == "-" {
outs = append(outs, types.ImageBuildOutput{
Type: "tar",
Attrs: map[string]string{
"dest": s,
},
})
} else {
outs = append(outs, types.ImageBuildOutput{
Type: "local",
Attrs: map[string]string{
"dest": s,
},
})
}
continue
}

View File

@ -67,6 +67,8 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
contextDir string
)
stdoutUsed := false
switch {
case options.contextFromStdin():
if options.dockerfileFromStdin() {
@ -123,7 +125,8 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
}
for _, out := range outputs {
if out.Type == "local" {
switch out.Type {
case "local":
// dest is handled on client side for local exporter
outDir, ok := out.Attrs["dest"]
if !ok {
@ -131,6 +134,27 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
}
delete(out.Attrs, "dest")
s.Allow(filesync.NewFSSyncTargetDir(outDir))
case "tar":
// dest is handled on client side for tar exporter
outFile, ok := out.Attrs["dest"]
if !ok {
return errors.Errorf("dest is required for tar output")
}
var w io.WriteCloser
if outFile == "-" {
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
return errors.Errorf("refusing to write output to console")
}
w = os.Stdout
stdoutUsed = true
} else {
f, err := os.Create(outFile)
if err != nil {
return errors.Wrapf(err, "failed to open %s", outFile)
}
w = f
}
s.Allow(filesync.NewFSSyncTarget(w))
}
}
@ -200,14 +224,14 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
buildOptions.SessionID = s.ID()
buildOptions.BuildID = buildID
buildOptions.Outputs = outputs
return doBuild(ctx, eg, dockerCli, options, buildOptions)
return doBuild(ctx, eg, dockerCli, stdoutUsed, options, buildOptions)
})
return eg.Wait()
}
//nolint: gocyclo
func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, options buildOptions, buildOptions types.ImageBuildOptions) (finalErr error) {
func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, stdoutUsed bool, options buildOptions, buildOptions types.ImageBuildOptions) (finalErr error) {
response, err := dockerCli.Client().ImageBuild(context.Background(), nil, buildOptions)
if err != nil {
return err
@ -265,7 +289,7 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
return nil
})
} else {
displayStatus(os.Stdout, t.displayCh)
displayStatus(os.Stderr, t.displayCh)
}
defer close(t.displayCh)
@ -300,7 +324,7 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
//
// TODO: we may want to use Aux messages with ID "moby.image.id" regardless of options.quiet (i.e. don't send HTTP param q=1)
// instead of assuming that output is image ID if options.quiet.
if options.quiet {
if options.quiet && !stdoutUsed {
imageID = buf.String()
fmt.Fprint(dockerCli.Out(), imageID)
}
@ -317,7 +341,7 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
return err
}
func resetUIDAndGID(s *fsutiltypes.Stat) bool {
func resetUIDAndGID(_ string, s *fsutiltypes.Stat) bool {
s.Uid = 0
s.Gid = 0
return true

View File

@ -4,7 +4,7 @@ github.com/asaskevich/govalidator f9ffefc3facfbe0caee3fea233cbb6e8208f4541
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/beorn7/perks 3a771d992973f24aa725d07868b467d1ddfceafb
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
github.com/containerd/containerd a15b6e2097c48b632dbdc63254bad4c62b69e709
github.com/containerd/containerd ceba56893a76f22cf0126c46d835c80fb3833408
github.com/containerd/continuity bd77b46c8352f74eb12c85bdc01f4b90f69d66b4
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
@ -53,7 +53,7 @@ github.com/Microsoft/hcsshim v0.8.6
github.com/Microsoft/go-winio v0.4.12
github.com/miekg/pkcs11 6120d95c0e9576ccf4a78ba40855809dca31a9ed
github.com/mitchellh/mapstructure f15292f7a699fcc1a38a80977f80a046874ba8ac
github.com/moby/buildkit 520201006c9dc676da9cf9655337ac711f7f127d
github.com/moby/buildkit 62e5542790fe1d1cf9ca6302d5a8b988df99159a
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
@ -76,7 +76,9 @@ github.com/spf13/cobra v0.0.3
github.com/spf13/pflag 4cb166e4f25ac4e8016a3595bbf7ea2e9aa85a2c https://github.com/thaJeztah/pflag.git
github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
github.com/theupdateframework/notary v0.6.1
github.com/tonistiigi/fsutil 1ec1983587cde7e8ac2978e354ff5360af622464
github.com/tonistiigi/fsutil 3bbb99cdbd76619ab717299830c60f6f2a533a6b
github.com/jaguilar/vt100 ad4c4a5743050fb7f88ce968dca9422f72a0e3f2 git://github.com/tonistiigi/vt100.git
github.com/gofrs/flock 7f43ea2e6a643ad441fc12d0ecc0d3388b300c53 # v0.7.0
github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b

View File

@ -20,8 +20,8 @@ import (
"context"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"sync"
"github.com/containerd/containerd/defaults"
@ -222,46 +222,76 @@ type DirectIO struct {
cio
}
var _ IO = &DirectIO{}
var (
_ IO = &DirectIO{}
_ IO = &logURI{}
)
// LogFile creates a file on disk that logs the task's STDOUT,STDERR.
// If the log file already exists, the logs will be appended to the file.
func LogFile(path string) Creator {
// LogURI provides the raw logging URI
func LogURI(uri *url.URL) Creator {
return func(_ string) (IO, error) {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return nil, err
}
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
f.Close()
return &logIO{
return &logURI{
config: Config{
Stdout: path,
Stderr: path,
Stdout: uri.String(),
Stderr: uri.String(),
},
}, nil
}
}
type logIO struct {
// BinaryIO forwards container STDOUT|STDERR directly to a logging binary
func BinaryIO(binary string, args map[string]string) Creator {
return func(_ string) (IO, error) {
uri := &url.URL{
Scheme: "binary",
Host: binary,
}
for k, v := range args {
uri.Query().Set(k, v)
}
return &logURI{
config: Config{
Stdout: uri.String(),
Stderr: uri.String(),
},
}, nil
}
}
// LogFile creates a file on disk that logs the task's STDOUT,STDERR.
// If the log file already exists, the logs will be appended to the file.
func LogFile(path string) Creator {
return func(_ string) (IO, error) {
uri := &url.URL{
Scheme: "file",
Host: path,
}
return &logURI{
config: Config{
Stdout: uri.String(),
Stderr: uri.String(),
},
}, nil
}
}
type logURI struct {
config Config
}
func (l *logIO) Config() Config {
func (l *logURI) Config() Config {
return l.config
}
func (l *logIO) Cancel() {
func (l *logURI) Cancel() {
}
func (l *logIO) Wait() {
func (l *logURI) Wait() {
}
func (l *logIO) Close() error {
func (l *logURI) Close() error {
return nil
}

View File

@ -300,6 +300,10 @@ type RemoteContext struct {
// MaxConcurrentDownloads is the max concurrent content downloads for each pull.
MaxConcurrentDownloads int
// AppendDistributionSourceLabel allows fetcher to add distribute source
// label for each blob content, which doesn't work for legacy schema1.
AppendDistributionSourceLabel bool
}
func defaultRemoteContext() *RemoteContext {

View File

@ -194,3 +194,12 @@ func WithMaxConcurrentDownloads(max int) RemoteOpt {
return nil
}
}
// WithAppendDistributionSourceLabel allows fetcher to add distribute source
// label for each blob content, which doesn't work for legacy schema1.
func WithAppendDistributionSourceLabel() RemoteOpt {
return func(_ *Client, c *RemoteContext) error {
c.AppendDistributionSourceLabel = true
return nil
}
}

View File

@ -0,0 +1,51 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"sync"
"github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
)
// Handles locking references
var (
// locks lets us lock in process
locks = map[string]struct{}{}
locksMu sync.Mutex
)
func tryLock(ref string) error {
locksMu.Lock()
defer locksMu.Unlock()
if _, ok := locks[ref]; ok {
return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", ref)
}
locks[ref] = struct{}{}
return nil
}
func unlock(ref string) {
locksMu.Lock()
defer locksMu.Unlock()
delete(locks, ref)
}

View File

@ -0,0 +1,40 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"os"
)
// readerat implements io.ReaderAt in a completely stateless manner by opening
// the referenced file for each call to ReadAt.
type sizeReaderAt struct {
size int64
fp *os.File
}
func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) {
return ra.fp.ReadAt(p, offset)
}
func (ra sizeReaderAt) Size() int64 {
return ra.size
}
func (ra sizeReaderAt) Close() error {
return ra.fp.Close()
}

View File

@ -0,0 +1,656 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"context"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/filters"
"github.com/containerd/containerd/log"
"github.com/containerd/continuity"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
var bufPool = sync.Pool{
New: func() interface{} {
buffer := make([]byte, 1<<20)
return &buffer
},
}
// LabelStore is used to store mutable labels for digests
type LabelStore interface {
// Get returns all the labels for the given digest
Get(digest.Digest) (map[string]string, error)
// Set sets all the labels for a given digest
Set(digest.Digest, map[string]string) error
// Update replaces the given labels for a digest,
// a key with an empty value removes a label.
Update(digest.Digest, map[string]string) (map[string]string, error)
}
// Store is digest-keyed store for content. All data written into the store is
// stored under a verifiable digest.
//
// Store can generally support multi-reader, single-writer ingest of data,
// including resumable ingest.
type store struct {
root string
ls LabelStore
}
// NewStore returns a local content store
func NewStore(root string) (content.Store, error) {
return NewLabeledStore(root, nil)
}
// NewLabeledStore returns a new content store using the provided label store
//
// Note: content stores which are used underneath a metadata store may not
// require labels and should use `NewStore`. `NewLabeledStore` is primarily
// useful for tests or standalone implementations.
func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
return nil, err
}
return &store{
root: root,
ls: ls,
}, nil
}
func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
p := s.blobPath(dgst)
fi, err := os.Stat(p)
if err != nil {
if os.IsNotExist(err) {
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
}
return content.Info{}, err
}
var labels map[string]string
if s.ls != nil {
labels, err = s.ls.Get(dgst)
if err != nil {
return content.Info{}, err
}
}
return s.info(dgst, fi, labels), nil
}
func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info {
return content.Info{
Digest: dgst,
Size: fi.Size(),
CreatedAt: fi.ModTime(),
UpdatedAt: getATime(fi),
Labels: labels,
}
}
// ReaderAt returns an io.ReaderAt for the blob.
func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
p := s.blobPath(desc.Digest)
fi, err := os.Stat(p)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p)
}
fp, err := os.Open(p)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p)
}
return sizeReaderAt{size: fi.Size(), fp: fp}, nil
}
// Delete removes a blob by its digest.
//
// While this is safe to do concurrently, safe exist-removal logic must hold
// some global lock on the store.
func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
if err := os.RemoveAll(s.blobPath(dgst)); err != nil {
if !os.IsNotExist(err) {
return err
}
return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
}
return nil
}
func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
if s.ls == nil {
return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store")
}
p := s.blobPath(info.Digest)
fi, err := os.Stat(p)
if err != nil {
if os.IsNotExist(err) {
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest)
}
return content.Info{}, err
}
var (
all bool
labels map[string]string
)
if len(fieldpaths) > 0 {
for _, path := range fieldpaths {
if strings.HasPrefix(path, "labels.") {
if labels == nil {
labels = map[string]string{}
}
key := strings.TrimPrefix(path, "labels.")
labels[key] = info.Labels[key]
continue
}
switch path {
case "labels":
all = true
labels = info.Labels
default:
return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest)
}
}
} else {
all = true
labels = info.Labels
}
if all {
err = s.ls.Set(info.Digest, labels)
} else {
labels, err = s.ls.Update(info.Digest, labels)
}
if err != nil {
return content.Info{}, err
}
info = s.info(info.Digest, fi, labels)
info.UpdatedAt = time.Now()
if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil {
log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest)
}
return info, nil
}
func (s *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
// TODO: Support filters
root := filepath.Join(s.root, "blobs")
var alg digest.Algorithm
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !fi.IsDir() && !alg.Available() {
return nil
}
// TODO(stevvooe): There are few more cases with subdirs that should be
// handled in case the layout gets corrupted. This isn't strict enough
// and may spew bad data.
if path == root {
return nil
}
if filepath.Dir(path) == root {
alg = digest.Algorithm(filepath.Base(path))
if !alg.Available() {
alg = ""
return filepath.SkipDir
}
// descending into a hash directory
return nil
}
dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
if err := dgst.Validate(); err != nil {
// log error but don't report
log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
// if we see this, it could mean some sort of corruption of the
// store or extra paths not expected previously.
}
var labels map[string]string
if s.ls != nil {
labels, err = s.ls.Get(dgst)
if err != nil {
return err
}
}
return fn(s.info(dgst, fi, labels))
})
}
func (s *store) Status(ctx context.Context, ref string) (content.Status, error) {
return s.status(s.ingestRoot(ref))
}
func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
fp, err := os.Open(filepath.Join(s.root, "ingest"))
if err != nil {
return nil, err
}
defer fp.Close()
fis, err := fp.Readdir(-1)
if err != nil {
return nil, err
}
filter, err := filters.ParseAll(fs...)
if err != nil {
return nil, err
}
var active []content.Status
for _, fi := range fis {
p := filepath.Join(s.root, "ingest", fi.Name())
stat, err := s.status(p)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
// TODO(stevvooe): This is a common error if uploads are being
// completed while making this listing. Need to consider taking a
// lock on the whole store to coordinate this aspect.
//
// Another option is to cleanup downloads asynchronously and
// coordinate this method with the cleanup process.
//
// For now, we just skip them, as they really don't exist.
continue
}
if filter.Match(adaptStatus(stat)) {
active = append(active, stat)
}
}
return active, nil
}
// WalkStatusRefs is used to walk all status references
// Failed status reads will be logged and ignored, if
// this function is called while references are being altered,
// these error messages may be produced.
func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error {
fp, err := os.Open(filepath.Join(s.root, "ingest"))
if err != nil {
return err
}
defer fp.Close()
fis, err := fp.Readdir(-1)
if err != nil {
return err
}
for _, fi := range fis {
rf := filepath.Join(s.root, "ingest", fi.Name(), "ref")
ref, err := readFileString(rf)
if err != nil {
log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref")
continue
}
if err := fn(ref); err != nil {
return err
}
}
return nil
}
// status works like stat above except uses the path to the ingest.
func (s *store) status(ingestPath string) (content.Status, error) {
dp := filepath.Join(ingestPath, "data")
fi, err := os.Stat(dp)
if err != nil {
if os.IsNotExist(err) {
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
}
return content.Status{}, err
}
ref, err := readFileString(filepath.Join(ingestPath, "ref"))
if err != nil {
if os.IsNotExist(err) {
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
}
return content.Status{}, err
}
startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat"))
if err != nil {
return content.Status{}, errors.Wrapf(err, "could not read startedat")
}
updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat"))
if err != nil {
return content.Status{}, errors.Wrapf(err, "could not read updatedat")
}
// because we don't write updatedat on every write, the mod time may
// actually be more up to date.
if fi.ModTime().After(updatedAt) {
updatedAt = fi.ModTime()
}
return content.Status{
Ref: ref,
Offset: fi.Size(),
Total: s.total(ingestPath),
UpdatedAt: updatedAt,
StartedAt: startedAt,
}, nil
}
func adaptStatus(status content.Status) filters.Adaptor {
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "ref":
return status.Ref, true
}
return "", false
})
}
// total attempts to resolve the total expected size for the write.
func (s *store) total(ingestPath string) int64 {
totalS, err := readFileString(filepath.Join(ingestPath, "total"))
if err != nil {
return 0
}
total, err := strconv.ParseInt(totalS, 10, 64)
if err != nil {
// represents a corrupted file, should probably remove.
return 0
}
return total
}
// Writer begins or resumes the active writer identified by ref. If the writer
// is already in use, an error is returned. Only one writer may be in use per
// ref at a time.
//
// The argument `ref` is used to uniquely identify a long-lived writer transaction.
func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
var wOpts content.WriterOpts
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil, err
}
}
// TODO(AkihiroSuda): we could create a random string or one calculated based on the context
// https://github.com/containerd/containerd/issues/2129#issuecomment-380255019
if wOpts.Ref == "" {
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
}
var lockErr error
for count := uint64(0); count < 10; count++ {
time.Sleep(time.Millisecond * time.Duration(rand.Intn(1<<count)))
if err := tryLock(wOpts.Ref); err != nil {
if !errdefs.IsUnavailable(err) {
return nil, err
}
lockErr = err
} else {
lockErr = nil
break
}
}
if lockErr != nil {
return nil, lockErr
}
w, err := s.writer(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
if err != nil {
unlock(wOpts.Ref)
return nil, err
}
return w, nil // lock is now held by w.
}
// writer provides the main implementation of the Writer method. The caller
// must hold the lock correctly and release on error if there is a problem.
func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
// TODO(stevvooe): Need to actually store expected here. We have
// code in the service that shouldn't be dealing with this.
if expected != "" {
p := s.blobPath(expected)
if _, err := os.Stat(p); err == nil {
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
}
}
path, refp, data := s.ingestPaths(ref)
var (
digester = digest.Canonical.Digester()
offset int64
startedAt time.Time
updatedAt time.Time
)
// ensure that the ingest path has been created.
if err := os.Mkdir(path, 0755); err != nil {
if !os.IsExist(err) {
return nil, err
}
status, err := s.status(path)
if err != nil {
return nil, errors.Wrap(err, "failed reading status of resume write")
}
if ref != status.Ref {
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
// layout corruption or a hash collision for the ref key.
return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
}
if total > 0 && status.Total > 0 && total != status.Total {
return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
}
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
fp, err := os.Open(data)
if err != nil {
return nil, err
}
p := bufPool.Get().(*[]byte)
offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
bufPool.Put(p)
fp.Close()
if err != nil {
return nil, err
}
updatedAt = status.UpdatedAt
startedAt = status.StartedAt
total = status.Total
} else {
startedAt = time.Now()
updatedAt = startedAt
// the ingest is new, we need to setup the target location.
// write the ref to a file for later use
if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
return nil, err
}
if writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
return nil, err
}
if writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
return nil, err
}
if total > 0 {
if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
return nil, err
}
}
}
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return nil, errors.Wrap(err, "failed to open data file")
}
if _, err := fp.Seek(offset, io.SeekStart); err != nil {
return nil, errors.Wrap(err, "could not seek to current write offset")
}
return &writer{
s: s,
fp: fp,
ref: ref,
path: path,
offset: offset,
total: total,
digester: digester,
startedAt: startedAt,
updatedAt: updatedAt,
}, nil
}
// Abort an active transaction keyed by ref. If the ingest is active, it will
// be cancelled. Any resources associated with the ingest will be cleaned.
func (s *store) Abort(ctx context.Context, ref string) error {
root := s.ingestRoot(ref)
if err := os.RemoveAll(root); err != nil {
if os.IsNotExist(err) {
return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref)
}
return err
}
return nil
}
func (s *store) blobPath(dgst digest.Digest) string {
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
}
func (s *store) ingestRoot(ref string) string {
dgst := digest.FromString(ref)
return filepath.Join(s.root, "ingest", dgst.Hex())
}
// ingestPaths are returned. The paths are the following:
//
// - root: entire ingest directory
// - ref: name of the starting ref, must be unique
// - data: file where data is written
//
func (s *store) ingestPaths(ref string) (string, string, string) {
var (
fp = s.ingestRoot(ref)
rp = filepath.Join(fp, "ref")
dp = filepath.Join(fp, "data")
)
return fp, rp, dp
}
func readFileString(path string) (string, error) {
p, err := ioutil.ReadFile(path)
return string(p), err
}
// readFileTimestamp reads a file with just a timestamp present.
func readFileTimestamp(p string) (time.Time, error) {
b, err := ioutil.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
}
return time.Time{}, err
}
var t time.Time
if err := t.UnmarshalText(b); err != nil {
return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p)
}
return t, nil
}
func writeTimestampFile(p string, t time.Time) error {
b, err := t.MarshalText()
if err != nil {
return err
}
return continuity.AtomicWriteFile(p, b, 0666)
}

View File

@ -0,0 +1,35 @@
// +build linux solaris darwin freebsd
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"os"
"syscall"
"time"
"github.com/containerd/containerd/sys"
)
func getATime(fi os.FileInfo) time.Time {
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
return sys.StatATimeAsTime(st)
}
return fi.ModTime()
}

View File

@ -0,0 +1,26 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"os"
"time"
)
func getATime(fi os.FileInfo) time.Time {
return fi.ModTime()
}

View File

@ -0,0 +1,206 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"context"
"io"
"os"
"path/filepath"
"runtime"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// writer represents a write transaction against the blob store.
type writer struct {
s *store
fp *os.File // opened data file
path string // path to writer dir
ref string // ref key
offset int64
total int64
digester digest.Digester
startedAt time.Time
updatedAt time.Time
}
func (w *writer) Status() (content.Status, error) {
return content.Status{
Ref: w.ref,
Offset: w.offset,
Total: w.total,
StartedAt: w.startedAt,
UpdatedAt: w.updatedAt,
}, nil
}
// Digest returns the current digest of the content, up to the current write.
//
// Cannot be called concurrently with `Write`.
func (w *writer) Digest() digest.Digest {
return w.digester.Digest()
}
// Write p to the transaction.
//
// Note that writes are unbuffered to the backing file. When writing, it is
// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer.
func (w *writer) Write(p []byte) (n int, err error) {
n, err = w.fp.Write(p)
w.digester.Hash().Write(p[:n])
w.offset += int64(len(p))
w.updatedAt = time.Now()
return n, err
}
func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
var base content.Info
for _, opt := range opts {
if err := opt(&base); err != nil {
return err
}
}
// Ensure even on error the writer is fully closed
defer unlock(w.ref)
fp := w.fp
w.fp = nil
if fp == nil {
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
}
if err := fp.Sync(); err != nil {
fp.Close()
return errors.Wrap(err, "sync failed")
}
fi, err := fp.Stat()
closeErr := fp.Close()
if err != nil {
return errors.Wrap(err, "stat on ingest file failed")
}
if closeErr != nil {
return errors.Wrap(err, "failed to close ingest file")
}
if size > 0 && size != fi.Size() {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size)
}
dgst := w.digester.Digest()
if expected != "" && expected != dgst {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
}
var (
ingest = filepath.Join(w.path, "data")
target = w.s.blobPath(dgst)
)
// make sure parent directories of blob exist
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
return err
}
if _, err := os.Stat(target); err == nil {
// collision with the target file!
if err := os.RemoveAll(w.path); err != nil {
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
}
return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst)
}
if err := os.Rename(ingest, target); err != nil {
return err
}
// Ingest has now been made available in the content store, attempt to complete
// setting metadata but errors should only be logged and not returned since
// the content store cannot be cleanly rolled back.
commitTime := time.Now()
if err := os.Chtimes(target, commitTime, commitTime); err != nil {
log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time")
}
// clean up!!
if err := os.RemoveAll(w.path); err != nil {
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
}
if w.s.ls != nil && base.Labels != nil {
if err := w.s.ls.Set(dgst, base.Labels); err != nil {
log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels")
}
}
// change to readonly, more important for read, but provides _some_
// protection from this point on. We use the existing perms with a mask
// only allowing reads honoring the umask on creation.
//
// This removes write and exec, only allowing read per the creation umask.
//
// NOTE: Windows does not support this operation
if runtime.GOOS != "windows" {
if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil {
log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly")
}
}
return nil
}
// Close the writer, flushing any unwritten data and leaving the progress in
// tact.
//
// If one needs to resume the transaction, a new writer can be obtained from
// `Ingester.Writer` using the same key. The write can then be continued
// from it was left off.
//
// To abandon a transaction completely, first call close then `IngestManager.Abort` to
// clean up the associated resources.
func (w *writer) Close() (err error) {
if w.fp != nil {
w.fp.Sync()
err = w.fp.Close()
writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt)
w.fp = nil
unlock(w.ref)
return
}
return nil
}
func (w *writer) Truncate(size int64) error {
if size != 0 {
return errors.New("Truncate: unsupported size")
}
w.offset = 0
w.digester.Hash().Reset()
if _, err := w.fp.Seek(0, io.SeekStart); err != nil {
return err
}
return w.fp.Truncate(0)
}

View File

@ -25,14 +25,16 @@ import (
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/archive"
"github.com/containerd/containerd/platforms"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
type importOpts struct {
indexName string
imageRefT func(string) string
dgstRefT func(digest.Digest) string
indexName string
imageRefT func(string) string
dgstRefT func(digest.Digest) string
allPlatforms bool
}
// ImportOpt allows the caller to specify import specific options
@ -64,6 +66,14 @@ func WithIndexName(name string) ImportOpt {
}
}
// WithAllPlatforms is used to import content for all platforms.
func WithAllPlatforms(allPlatforms bool) ImportOpt {
return func(c *importOpts) error {
c.allPlatforms = allPlatforms
return nil
}
}
// Import imports an image from a Tar stream using reader.
// Caller needs to specify importer. Future version may use oci.v1 as the default.
// Note that unreferrenced blobs may be imported to the content store as well.
@ -98,6 +108,10 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
Target: index,
})
}
var platformMatcher = platforms.All
if !iopts.allPlatforms {
platformMatcher = platforms.Default()
}
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
// Only save images at top level
@ -141,6 +155,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
return idx.Manifests, nil
}
handler = images.FilterPlatforms(handler, platformMatcher)
handler = images.SetChildrenLabels(cs, handler)
if err := images.Walk(ctx, handler, index); err != nil {
return nil, err

View File

@ -0,0 +1,37 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package labels
import (
"github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
)
const (
maxSize = 4096
)
// Validate a label's key and value are under 4096 bytes
func Validate(k, v string) error {
if (len(k) + len(v)) > maxSize {
if len(k) > 10 {
k = k[:10]
}
return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k)
}
return nil
}

View File

@ -33,7 +33,7 @@ import (
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/continuity/fs"
"github.com/opencontainers/image-spec/specs-go/v1"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/user"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@ -741,9 +741,11 @@ func WithCapabilities(caps []string) SpecOpts {
}
// WithAllCapabilities sets all linux capabilities for the process
var WithAllCapabilities = WithCapabilities(getAllCapabilities())
var WithAllCapabilities = WithCapabilities(GetAllCapabilities())
func getAllCapabilities() []string {
// GetAllCapabilities returns all caps up to CAP_LAST_CAP
// or CAP_BLOCK_SUSPEND on RHEL6
func GetAllCapabilities() []string {
last := capability.CAP_LAST_CAP
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
if last == capability.Cap(63) {
@ -759,6 +761,61 @@ func getAllCapabilities() []string {
return caps
}
func capsContain(caps []string, s string) bool {
for _, c := range caps {
if c == s {
return true
}
}
return false
}
func removeCap(caps *[]string, s string) {
for i, c := range *caps {
if c == s {
*caps = append((*caps)[:i], (*caps)[i+1:]...)
}
}
}
// WithAddedCapabilities adds the provided capabilities
func WithAddedCapabilities(caps []string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setCapabilities(s)
for _, c := range caps {
for _, cl := range []*[]string{
&s.Process.Capabilities.Bounding,
&s.Process.Capabilities.Effective,
&s.Process.Capabilities.Permitted,
&s.Process.Capabilities.Inheritable,
} {
if !capsContain(*cl, c) {
*cl = append(*cl, c)
}
}
}
return nil
}
}
// WithDroppedCapabilities removes the provided capabilities
func WithDroppedCapabilities(caps []string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setCapabilities(s)
for _, c := range caps {
for _, cl := range []*[]string{
&s.Process.Capabilities.Bounding,
&s.Process.Capabilities.Effective,
&s.Process.Capabilities.Permitted,
&s.Process.Capabilities.Inheritable,
} {
removeCap(cl, c)
}
}
return nil
}
}
// WithAmbientCapabilities set the Linux ambient capabilities for the process
// Ambient capabilities should only be set for non-root users or the caller should
// understand how these capabilities are used and set

View File

@ -112,8 +112,9 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
childrenHandler := images.ChildrenHandler(store)
// Set any children labels for that content
childrenHandler = images.SetChildrenLabels(store, childrenHandler)
// Filter children by platforms
childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher)
// Filter manifests by platforms but allow to handle manifest
// and configuration for not-target platforms
childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher)
// Sort and limit manifests if a finite number is needed
if limit > 0 {
childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit)
@ -130,11 +131,23 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
},
)
handler = images.Handlers(append(rCtx.BaseHandlers,
handlers := append(rCtx.BaseHandlers,
remotes.FetchHandler(store, fetcher),
convertibleHandler,
childrenHandler,
)...)
)
// append distribution source label to blob data
if rCtx.AppendDistributionSourceLabel {
appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref)
if err != nil {
return images.Image{}, err
}
handlers = append(handlers, appendDistSrcLabelHandler)
}
handler = images.Handlers(handlers...)
converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
return docker.ConvertManifest(ctx, store, desc)
@ -148,6 +161,7 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
if rCtx.MaxConcurrentDownloads > 0 {
limiter = semaphore.NewWeighted(int64(rCtx.MaxConcurrentDownloads))
}
if err := images.Dispatch(ctx, handler, limiter, desc); err != nil {
return images.Image{}, err
}

View File

@ -0,0 +1,112 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"fmt"
"net/url"
"strings"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/reference"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
var (
// labelDistributionSource describes the source blob comes from.
labelDistributionSource = "containerd.io/distribution.source"
)
// AppendDistributionSourceLabel updates the label of blob with distribution source.
func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) {
refspec, err := reference.Parse(ref)
if err != nil {
return nil, err
}
u, err := url.Parse("dummy://" + refspec.Locator)
if err != nil {
return nil, err
}
source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/")
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
info, err := manager.Info(ctx, desc.Digest)
if err != nil {
return nil, err
}
key := distributionSourceLabelKey(source)
originLabel := ""
if info.Labels != nil {
originLabel = info.Labels[key]
}
value := appendDistributionSourceLabel(originLabel, repo)
// The repo name has been limited under 256 and the distribution
// label might hit the limitation of label size, when blob data
// is used as the very, very common layer.
if err := labels.Validate(key, value); err != nil {
log.G(ctx).Warnf("skip to append distribution label: %s", err)
return nil, nil
}
info = content.Info{
Digest: desc.Digest,
Labels: map[string]string{
key: value,
},
}
_, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key))
return nil, err
}, nil
}
func appendDistributionSourceLabel(originLabel, repo string) string {
repos := []string{}
if originLabel != "" {
repos = strings.Split(originLabel, ",")
}
repos = append(repos, repo)
// use emtpy string to present duplicate items
for i := 1; i < len(repos); i++ {
tmp, j := repos[i], i-1
for ; j >= 0 && repos[j] >= tmp; j-- {
if repos[j] == tmp {
tmp = ""
}
repos[j+1] = repos[j]
}
repos[j+1] = tmp
}
i := 0
for ; i < len(repos) && repos[i] == ""; i++ {
}
return strings.Join(repos[i:], ",")
}
func distributionSourceLabelKey(source string) string {
return fmt.Sprintf("%s.%s", labelDistributionSource, source)
}

View File

@ -206,3 +206,38 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, pr
return nil
}
// FilterManifestByPlatformHandler allows Handler to handle non-target
// platform's manifest and configuration data.
func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
children, err := f(ctx, desc)
if err != nil {
return nil, err
}
// no platform information
if desc.Platform == nil || m == nil {
return children, nil
}
var descs []ocispec.Descriptor
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
if m.Match(*desc.Platform) {
descs = children
} else {
for _, child := range children {
if child.MediaType == images.MediaTypeDockerSchema2Config ||
child.MediaType == ocispec.MediaTypeImageConfig {
descs = append(descs, child)
}
}
}
default:
descs = children
}
return descs, nil
}
}

View File

@ -173,6 +173,68 @@ The Runtime v2 supports an async event model. In order for the an upstream calle
| `runtime.TaskExitEventTopic` | MUST (follow `TaskExecStartedEventTopic`) | When an exec (other than the init exec) exits expected or unexpected |
| `runtime.TaskDeleteEventTopic` | SHOULD (follow `TaskExitEventTopic` or `TaskExecAddedEventTopic` if never started) | When an exec is removed from a shim |
#### Logging
Shims may support pluggable logging via STDIO URIs.
Current supported schemes for logging are:
* fifo - Linux
* binary - Linux & Windows
* file - Linux & Windows
* npipe - Windows
Binary logging has the abilty to forward a container's STDIO to an external binary for consumption.
A sample logging driver that forwards the container's STDOUT and STDERR to `journald` is:
```go
package main
import (
"bufio"
"context"
"fmt"
"io"
"sync"
"github.com/containerd/containerd/runtime/v2/logging"
"github.com/coreos/go-systemd/journal"
)
func main() {
logging.Run(log)
}
func log(ctx context.Context, config *logging.Config, ready func() error) error {
// construct any log metadata for the container
vars := map[string]string{
"SYSLOG_IDENTIFIER": fmt.Sprintf("%s:%s", config.Namespace, config.ID),
}
var wg sync.WaitGroup
wg.Add(2)
// forward both stdout and stderr to the journal
go copy(&wg, config.Stdout, journal.PriInfo, vars)
go copy(&wg, config.Stderr, journal.PriErr, vars)
// signal that we are ready and setup for the container to be started
if err := ready(); err != nil {
return err
}
wg.Wait()
return nil
}
func copy(wg *sync.WaitGroup, r io.Reader, pri journal.Priority, vars map[string]string) {
defer wg.Done()
s := bufio.NewScanner(r)
for s.Scan() {
if s.Err() != nil {
return
}
journal.Send(s.Text(), pri, vars)
}
}
```
### Other
#### Unsupported rpcs

View File

@ -0,0 +1,463 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package contentserver
import (
"context"
"io"
"sync"
api "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
ptypes "github.com/gogo/protobuf/types"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type service struct {
store content.Store
}
var bufPool = sync.Pool{
New: func() interface{} {
buffer := make([]byte, 1<<20)
return &buffer
},
}
// New returns the content GRPC server
func New(cs content.Store) api.ContentServer {
return &service{store: cs}
}
func (s *service) Register(server *grpc.Server) error {
api.RegisterContentServer(server, s)
return nil
}
func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
if err := req.Digest.Validate(); err != nil {
return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
}
bi, err := s.store.Info(ctx, req.Digest)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
return &api.InfoResponse{
Info: infoToGRPC(bi),
}, nil
}
func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) {
if err := req.Info.Digest.Validate(); err != nil {
return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest)
}
info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
return &api.UpdateResponse{
Info: infoToGRPC(info),
}, nil
}
func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
var (
buffer []api.Info
sendBlock = func(block []api.Info) error {
// send last block
return session.Send(&api.ListContentResponse{
Info: block,
})
}
)
if err := s.store.Walk(session.Context(), func(info content.Info) error {
buffer = append(buffer, api.Info{
Digest: info.Digest,
Size_: info.Size,
CreatedAt: info.CreatedAt,
Labels: info.Labels,
})
if len(buffer) >= 100 {
if err := sendBlock(buffer); err != nil {
return err
}
buffer = buffer[:0]
}
return nil
}, req.Filters...); err != nil {
return err
}
if len(buffer) > 0 {
// send last block
if err := sendBlock(buffer); err != nil {
return err
}
}
return nil
}
func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) {
log.G(ctx).WithField("digest", req.Digest).Debugf("delete content")
if err := req.Digest.Validate(); err != nil {
return nil, status.Errorf(codes.InvalidArgument, err.Error())
}
if err := s.store.Delete(ctx, req.Digest); err != nil {
return nil, errdefs.ToGRPC(err)
}
return &ptypes.Empty{}, nil
}
func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error {
if err := req.Digest.Validate(); err != nil {
return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
}
oi, err := s.store.Info(session.Context(), req.Digest)
if err != nil {
return errdefs.ToGRPC(err)
}
ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest})
if err != nil {
return errdefs.ToGRPC(err)
}
defer ra.Close()
var (
offset = req.Offset
// size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of.
// offset+size can be larger than oi.Size.
size = req.Size_
// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
// little inefficient for work over a fast network. We can tune this later.
p = bufPool.Get().(*[]byte)
)
defer bufPool.Put(p)
if offset < 0 {
offset = 0
}
if offset > oi.Size {
return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
}
if size <= 0 || offset+size > oi.Size {
size = oi.Size - offset
}
_, err = io.CopyBuffer(
&readResponseWriter{session: session},
io.NewSectionReader(ra, offset, size), *p)
return errdefs.ToGRPC(err)
}
// readResponseWriter is a writer that places the output into ReadContentRequest messages.
//
// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
// into the buffer size.
type readResponseWriter struct {
offset int64
session api.Content_ReadServer
}
func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
if err := rw.session.Send(&api.ReadContentResponse{
Offset: rw.offset,
Data: p,
}); err != nil {
return 0, err
}
rw.offset += int64(len(p))
return len(p), nil
}
func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
status, err := s.store.Status(ctx, req.Ref)
if err != nil {
return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref)
}
var resp api.StatusResponse
resp.Status = &api.Status{
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
Ref: status.Ref,
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
}
return &resp, nil
}
func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) {
statuses, err := s.store.ListStatuses(ctx, req.Filters...)
if err != nil {
return nil, errdefs.ToGRPC(err)
}
var resp api.ListStatusesResponse
for _, status := range statuses {
resp.Statuses = append(resp.Statuses, api.Status{
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
Ref: status.Ref,
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
})
}
return &resp, nil
}
func (s *service) Write(session api.Content_WriteServer) (err error) {
var (
ctx = session.Context()
msg api.WriteContentResponse
req *api.WriteContentRequest
ref string
total int64
expected digest.Digest
)
defer func(msg *api.WriteContentResponse) {
// pump through the last message if no error was encountered
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists {
// TODO(stevvooe): Really need a log line here to track which
// errors are actually causing failure on the server side. May want
// to configure the service with an interceptor to make this work
// identically across all GRPC methods.
//
// This is pretty noisy, so we can remove it but leave it for now.
log.G(ctx).WithError(err).Error("(*service).Write failed")
}
return
}
err = session.Send(msg)
}(&msg)
// handle the very first request!
req, err = session.Recv()
if err != nil {
return err
}
ref = req.Ref
if ref == "" {
return status.Errorf(codes.InvalidArgument, "first message must have a reference")
}
fields := logrus.Fields{
"ref": ref,
}
total = req.Total
expected = req.Expected
if total > 0 {
fields["total"] = total
}
if expected != "" {
fields["expected"] = expected
}
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
log.G(ctx).Debug("(*service).Write started")
// this action locks the writer for the session.
wr, err := s.store.Writer(ctx,
content.WithRef(ref),
content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected}))
if err != nil {
return errdefs.ToGRPC(err)
}
defer wr.Close()
for {
msg.Action = req.Action
ws, err := wr.Status()
if err != nil {
return errdefs.ToGRPC(err)
}
msg.Offset = ws.Offset // always set the offset.
// NOTE(stevvooe): In general, there are two cases underwhich a remote
// writer is used.
//
// For pull, we almost always have this before fetching large content,
// through descriptors. We allow predeclaration of the expected size
// and digest.
//
// For push, it is more complex. If we want to cut through content into
// storage, we may have no expectation until we are done processing the
// content. The case here is the following:
//
// 1. Start writing content.
// 2. Compress inline.
// 3. Validate digest and size (maybe).
//
// Supporting these two paths is quite awkward but it lets both API
// users use the same writer style for each with a minimum of overhead.
if req.Expected != "" {
if expected != "" && expected != req.Expected {
log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected)
}
expected = req.Expected
if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
if err := wr.Close(); err != nil {
log.G(ctx).WithError(err).Error("failed to close writer")
}
if err := s.store.Abort(session.Context(), ref); err != nil {
log.G(ctx).WithError(err).Error("failed to abort write")
}
return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
}
}
if req.Total > 0 {
// Update the expected total. Typically, this could be seen at
// negotiation time or on a commit message.
if total > 0 && req.Total != total {
log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total)
}
total = req.Total
}
switch req.Action {
case api.WriteActionStat:
msg.Digest = wr.Digest()
msg.StartedAt = ws.StartedAt
msg.UpdatedAt = ws.UpdatedAt
msg.Total = total
case api.WriteActionWrite, api.WriteActionCommit:
if req.Offset > 0 {
// validate the offset if provided
if req.Offset != ws.Offset {
return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
}
}
if req.Offset == 0 && ws.Offset > 0 {
if err := wr.Truncate(req.Offset); err != nil {
return errors.Wrapf(err, "truncate failed")
}
msg.Offset = req.Offset
}
// issue the write if we actually have data.
if len(req.Data) > 0 {
// While this looks like we could use io.WriterAt here, because we
// maintain the offset as append only, we just issue the write.
n, err := wr.Write(req.Data)
if err != nil {
return errdefs.ToGRPC(err)
}
if n != len(req.Data) {
// TODO(stevvooe): Perhaps, we can recover this by including it
// in the offset on the write return.
return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
}
msg.Offset += int64(n)
}
if req.Action == api.WriteActionCommit {
var opts []content.Opt
if req.Labels != nil {
opts = append(opts, content.WithLabels(req.Labels))
}
if err := wr.Commit(ctx, total, expected, opts...); err != nil {
return errdefs.ToGRPC(err)
}
}
msg.Digest = wr.Digest()
}
if err := session.Send(&msg); err != nil {
return err
}
req, err = session.Recv()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
}
}
func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) {
if err := s.store.Abort(ctx, req.Ref); err != nil {
return nil, errdefs.ToGRPC(err)
}
return &ptypes.Empty{}, nil
}
func infoToGRPC(info content.Info) api.Info {
return api.Info{
Digest: info.Digest,
Size_: info.Size,
CreatedAt: info.CreatedAt,
UpdatedAt: info.UpdatedAt,
Labels: info.Labels,
}
}
func infoFromGRPC(info api.Info) content.Info {
return content.Info{
Digest: info.Digest,
Size: info.Size_,
CreatedAt: info.CreatedAt,
UpdatedAt: info.UpdatedAt,
Labels: info.Labels,
}
}

673
vendor/github.com/containerd/continuity/context.go generated vendored Normal file
View File

@ -0,0 +1,673 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"bytes"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"github.com/containerd/continuity/devices"
driverpkg "github.com/containerd/continuity/driver"
"github.com/containerd/continuity/pathdriver"
"github.com/opencontainers/go-digest"
)
var (
// ErrNotFound represents the resource not found
ErrNotFound = fmt.Errorf("not found")
// ErrNotSupported represents the resource not supported
ErrNotSupported = fmt.Errorf("not supported")
)
// Context represents a file system context for accessing resources. The
// responsibility of the context is to convert system specific resources to
// generic Resource objects. Most of this is safe path manipulation, as well
// as extraction of resource details.
type Context interface {
Apply(Resource) error
Verify(Resource) error
Resource(string, os.FileInfo) (Resource, error)
Walk(filepath.WalkFunc) error
}
// SymlinkPath is intended to give the symlink target value
// in a root context. Target and linkname are absolute paths
// not under the given root.
type SymlinkPath func(root, linkname, target string) (string, error)
// ContextOptions represents options to create a new context.
type ContextOptions struct {
Digester Digester
Driver driverpkg.Driver
PathDriver pathdriver.PathDriver
Provider ContentProvider
}
// context represents a file system context for accessing resources.
// Generally, all path qualified access and system considerations should land
// here.
type context struct {
driver driverpkg.Driver
pathDriver pathdriver.PathDriver
root string
digester Digester
provider ContentProvider
}
// NewContext returns a Context associated with root. The default driver will
// be used, as returned by NewDriver.
func NewContext(root string) (Context, error) {
return NewContextWithOptions(root, ContextOptions{})
}
// NewContextWithOptions returns a Context associate with the root.
func NewContextWithOptions(root string, options ContextOptions) (Context, error) {
// normalize to absolute path
pathDriver := options.PathDriver
if pathDriver == nil {
pathDriver = pathdriver.LocalPathDriver
}
root = pathDriver.FromSlash(root)
root, err := pathDriver.Abs(pathDriver.Clean(root))
if err != nil {
return nil, err
}
driver := options.Driver
if driver == nil {
driver, err = driverpkg.NewSystemDriver()
if err != nil {
return nil, err
}
}
digester := options.Digester
if digester == nil {
digester = simpleDigester{digest.Canonical}
}
// Check the root directory. Need to be a little careful here. We are
// allowing a link for now, but this may have odd behavior when
// canonicalizing paths. As long as all files are opened through the link
// path, this should be okay.
fi, err := driver.Stat(root)
if err != nil {
return nil, err
}
if !fi.IsDir() {
return nil, &os.PathError{Op: "NewContext", Path: root, Err: os.ErrInvalid}
}
return &context{
root: root,
driver: driver,
pathDriver: pathDriver,
digester: digester,
provider: options.Provider,
}, nil
}
// Resource returns the resource as path p, populating the entry with info
// from fi. The path p should be the path of the resource in the context,
// typically obtained through Walk or from the value of Resource.Path(). If fi
// is nil, it will be resolved.
func (c *context) Resource(p string, fi os.FileInfo) (Resource, error) {
fp, err := c.fullpath(p)
if err != nil {
return nil, err
}
if fi == nil {
fi, err = c.driver.Lstat(fp)
if err != nil {
return nil, err
}
}
base, err := newBaseResource(p, fi)
if err != nil {
return nil, err
}
base.xattrs, err = c.resolveXAttrs(fp, fi, base)
if err == ErrNotSupported {
log.Printf("resolving xattrs on %s not supported", fp)
} else if err != nil {
return nil, err
}
// TODO(stevvooe): Handle windows alternate data streams.
if fi.Mode().IsRegular() {
dgst, err := c.digest(p)
if err != nil {
return nil, err
}
return newRegularFile(*base, base.paths, fi.Size(), dgst)
}
if fi.Mode().IsDir() {
return newDirectory(*base)
}
if fi.Mode()&os.ModeSymlink != 0 {
// We handle relative links vs absolute links by including a
// beginning slash for absolute links. Effectively, the bundle's
// root is treated as the absolute link anchor.
target, err := c.driver.Readlink(fp)
if err != nil {
return nil, err
}
return newSymLink(*base, target)
}
if fi.Mode()&os.ModeNamedPipe != 0 {
return newNamedPipe(*base, base.paths)
}
if fi.Mode()&os.ModeDevice != 0 {
deviceDriver, ok := c.driver.(driverpkg.DeviceInfoDriver)
if !ok {
log.Printf("device extraction not supported %s", fp)
return nil, ErrNotSupported
}
// character and block devices merely need to recover the
// major/minor device number.
major, minor, err := deviceDriver.DeviceInfo(fi)
if err != nil {
return nil, err
}
return newDevice(*base, base.paths, major, minor)
}
log.Printf("%q (%v) is not supported", fp, fi.Mode())
return nil, ErrNotFound
}
func (c *context) verifyMetadata(resource, target Resource) error {
if target.Mode() != resource.Mode() {
return fmt.Errorf("resource %q has incorrect mode: %v != %v", target.Path(), target.Mode(), resource.Mode())
}
if target.UID() != resource.UID() {
return fmt.Errorf("unexpected uid for %q: %v != %v", target.Path(), target.UID(), resource.GID())
}
if target.GID() != resource.GID() {
return fmt.Errorf("unexpected gid for %q: %v != %v", target.Path(), target.GID(), target.GID())
}
if xattrer, ok := resource.(XAttrer); ok {
txattrer, tok := target.(XAttrer)
if !tok {
return fmt.Errorf("resource %q has xattrs but target does not support them", resource.Path())
}
// For xattrs, only ensure that we have those defined in the resource
// and their values match. We can ignore other xattrs. In other words,
// we only verify that target has the subset defined by resource.
txattrs := txattrer.XAttrs()
for attr, value := range xattrer.XAttrs() {
tvalue, ok := txattrs[attr]
if !ok {
return fmt.Errorf("resource %q target missing xattr %q", resource.Path(), attr)
}
if !bytes.Equal(value, tvalue) {
return fmt.Errorf("xattr %q value differs for resource %q", attr, resource.Path())
}
}
}
switch r := resource.(type) {
case RegularFile:
// TODO(stevvooe): Another reason to use a record-based approach. We
// have to do another type switch to get this to work. This could be
// fixed with an Equal function, but let's study this a little more to
// be sure.
t, ok := target.(RegularFile)
if !ok {
return fmt.Errorf("resource %q target not a regular file", r.Path())
}
if t.Size() != r.Size() {
return fmt.Errorf("resource %q target has incorrect size: %v != %v", t.Path(), t.Size(), r.Size())
}
case Directory:
t, ok := target.(Directory)
if !ok {
return fmt.Errorf("resource %q target not a directory", t.Path())
}
case SymLink:
t, ok := target.(SymLink)
if !ok {
return fmt.Errorf("resource %q target not a symlink", t.Path())
}
if t.Target() != r.Target() {
return fmt.Errorf("resource %q target has mismatched target: %q != %q", t.Path(), t.Target(), r.Target())
}
case Device:
t, ok := target.(Device)
if !ok {
return fmt.Errorf("resource %q is not a device", t.Path())
}
if t.Major() != r.Major() || t.Minor() != r.Minor() {
return fmt.Errorf("resource %q has mismatched major/minor numbers: %d,%d != %d,%d", t.Path(), t.Major(), t.Minor(), r.Major(), r.Minor())
}
case NamedPipe:
t, ok := target.(NamedPipe)
if !ok {
return fmt.Errorf("resource %q is not a named pipe", t.Path())
}
default:
return fmt.Errorf("cannot verify resource: %v", resource)
}
return nil
}
// Verify the resource in the context. An error will be returned a discrepancy
// is found.
func (c *context) Verify(resource Resource) error {
fp, err := c.fullpath(resource.Path())
if err != nil {
return err
}
fi, err := c.driver.Lstat(fp)
if err != nil {
return err
}
target, err := c.Resource(resource.Path(), fi)
if err != nil {
return err
}
if target.Path() != resource.Path() {
return fmt.Errorf("resource paths do not match: %q != %q", target.Path(), resource.Path())
}
if err := c.verifyMetadata(resource, target); err != nil {
return err
}
if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable {
hardlinkKey, err := newHardlinkKey(fi)
if err == errNotAHardLink {
if len(h.Paths()) > 1 {
return fmt.Errorf("%q is not a hardlink to %q", h.Paths()[1], resource.Path())
}
} else if err != nil {
return err
}
for _, path := range h.Paths()[1:] {
fpLink, err := c.fullpath(path)
if err != nil {
return err
}
fiLink, err := c.driver.Lstat(fpLink)
if err != nil {
return err
}
targetLink, err := c.Resource(path, fiLink)
if err != nil {
return err
}
hardlinkKeyLink, err := newHardlinkKey(fiLink)
if err != nil {
return err
}
if hardlinkKeyLink != hardlinkKey {
return fmt.Errorf("%q is not a hardlink to %q", path, resource.Path())
}
if err := c.verifyMetadata(resource, targetLink); err != nil {
return err
}
}
}
switch r := resource.(type) {
case RegularFile:
t, ok := target.(RegularFile)
if !ok {
return fmt.Errorf("resource %q target not a regular file", r.Path())
}
// TODO(stevvooe): This may need to get a little more sophisticated
// for digest comparison. We may want to actually calculate the
// provided digests, rather than the implementations having an
// overlap.
if !digestsMatch(t.Digests(), r.Digests()) {
return fmt.Errorf("digests for resource %q do not match: %v != %v", t.Path(), t.Digests(), r.Digests())
}
}
return nil
}
func (c *context) checkoutFile(fp string, rf RegularFile) error {
if c.provider == nil {
return fmt.Errorf("no file provider")
}
var (
r io.ReadCloser
err error
)
for _, dgst := range rf.Digests() {
r, err = c.provider.Reader(dgst)
if err == nil {
break
}
}
if err != nil {
return fmt.Errorf("file content could not be provided: %v", err)
}
defer r.Close()
return atomicWriteFile(fp, r, rf.Size(), rf.Mode())
}
// Apply the resource to the contexts. An error will be returned if the
// operation fails. Depending on the resource type, the resource may be
// created. For resource that cannot be resolved, an error will be returned.
func (c *context) Apply(resource Resource) error {
fp, err := c.fullpath(resource.Path())
if err != nil {
return err
}
if !strings.HasPrefix(fp, c.root) {
return fmt.Errorf("resource %v escapes root", resource)
}
var chmod = true
fi, err := c.driver.Lstat(fp)
if err != nil {
if !os.IsNotExist(err) {
return err
}
}
switch r := resource.(type) {
case RegularFile:
if fi == nil {
if err := c.checkoutFile(fp, r); err != nil {
return fmt.Errorf("error checking out file %q: %v", resource.Path(), err)
}
chmod = false
} else {
if !fi.Mode().IsRegular() {
return fmt.Errorf("file %q should be a regular file, but is not", resource.Path())
}
if fi.Size() != r.Size() {
if err := c.checkoutFile(fp, r); err != nil {
return fmt.Errorf("error checking out file %q: %v", resource.Path(), err)
}
} else {
for _, dgst := range r.Digests() {
f, err := os.Open(fp)
if err != nil {
return fmt.Errorf("failure opening file for read %q: %v", resource.Path(), err)
}
compared, err := dgst.Algorithm().FromReader(f)
if err == nil && dgst != compared {
if err := c.checkoutFile(fp, r); err != nil {
return fmt.Errorf("error checking out file %q: %v", resource.Path(), err)
}
break
}
if err1 := f.Close(); err == nil {
err = err1
}
if err != nil {
return fmt.Errorf("error checking digest for %q: %v", resource.Path(), err)
}
}
}
}
case Directory:
if fi == nil {
if err := c.driver.Mkdir(fp, resource.Mode()); err != nil {
return err
}
} else if !fi.Mode().IsDir() {
return fmt.Errorf("%q should be a directory, but is not", resource.Path())
}
case SymLink:
var target string // only possibly set if target resource is a symlink
if fi != nil {
if fi.Mode()&os.ModeSymlink != 0 {
target, err = c.driver.Readlink(fp)
if err != nil {
return err
}
}
}
if target != r.Target() {
if fi != nil {
if err := c.driver.Remove(fp); err != nil { // RemoveAll in case of directory?
return err
}
}
if err := c.driver.Symlink(r.Target(), fp); err != nil {
return err
}
}
case Device:
if fi == nil {
if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil {
return err
}
} else if (fi.Mode() & os.ModeDevice) == 0 {
return fmt.Errorf("%q should be a device, but is not", resource.Path())
} else {
major, minor, err := devices.DeviceInfo(fi)
if err != nil {
return err
}
if major != r.Major() || minor != r.Minor() {
if err := c.driver.Remove(fp); err != nil {
return err
}
if err := c.driver.Mknod(fp, resource.Mode(), int(r.Major()), int(r.Minor())); err != nil {
return err
}
}
}
case NamedPipe:
if fi == nil {
if err := c.driver.Mkfifo(fp, resource.Mode()); err != nil {
return err
}
} else if (fi.Mode() & os.ModeNamedPipe) == 0 {
return fmt.Errorf("%q should be a named pipe, but is not", resource.Path())
}
}
if h, isHardlinkable := resource.(Hardlinkable); isHardlinkable {
for _, path := range h.Paths() {
if path == resource.Path() {
continue
}
lp, err := c.fullpath(path)
if err != nil {
return err
}
if _, fi := c.driver.Lstat(lp); fi == nil {
c.driver.Remove(lp)
}
if err := c.driver.Link(fp, lp); err != nil {
return err
}
}
}
// Update filemode if file was not created
if chmod {
if err := c.driver.Lchmod(fp, resource.Mode()); err != nil {
return err
}
}
if err := c.driver.Lchown(fp, resource.UID(), resource.GID()); err != nil {
return err
}
if xattrer, ok := resource.(XAttrer); ok {
// For xattrs, only ensure that we have those defined in the resource
// and their values are set. We can ignore other xattrs. In other words,
// we only set xattres defined by resource but never remove.
if _, ok := resource.(SymLink); ok {
lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver)
if !ok {
return fmt.Errorf("unsupported symlink xattr for resource %q", resource.Path())
}
if err := lxattrDriver.LSetxattr(fp, xattrer.XAttrs()); err != nil {
return err
}
} else {
xattrDriver, ok := c.driver.(driverpkg.XAttrDriver)
if !ok {
return fmt.Errorf("unsupported xattr for resource %q", resource.Path())
}
if err := xattrDriver.Setxattr(fp, xattrer.XAttrs()); err != nil {
return err
}
}
}
return nil
}
// Walk provides a convenience function to call filepath.Walk correctly for
// the context. Otherwise identical to filepath.Walk, the path argument is
// corrected to be contained within the context.
func (c *context) Walk(fn filepath.WalkFunc) error {
root := c.root
fi, err := c.driver.Lstat(c.root)
if err == nil && fi.Mode()&os.ModeSymlink != 0 {
root, err = c.driver.Readlink(c.root)
if err != nil {
return err
}
}
return c.pathDriver.Walk(root, func(p string, fi os.FileInfo, err error) error {
contained, err := c.containWithRoot(p, root)
return fn(contained, fi, err)
})
}
// fullpath returns the system path for the resource, joined with the context
// root. The path p must be a part of the context.
func (c *context) fullpath(p string) (string, error) {
p = c.pathDriver.Join(c.root, p)
if !strings.HasPrefix(p, c.root) {
return "", fmt.Errorf("invalid context path")
}
return p, nil
}
// contain cleans and santizes the filesystem path p to be an absolute path,
// effectively relative to the context root.
func (c *context) contain(p string) (string, error) {
return c.containWithRoot(p, c.root)
}
// containWithRoot cleans and santizes the filesystem path p to be an absolute path,
// effectively relative to the passed root. Extra care should be used when calling this
// instead of contain. This is needed for Walk, as if context root is a symlink,
// it must be evaluated prior to the Walk
func (c *context) containWithRoot(p string, root string) (string, error) {
sanitized, err := c.pathDriver.Rel(root, p)
if err != nil {
return "", err
}
// ZOMBIES(stevvooe): In certain cases, we may want to remap these to a
// "containment error", so the caller can decide what to do.
return c.pathDriver.Join("/", c.pathDriver.Clean(sanitized)), nil
}
// digest returns the digest of the file at path p, relative to the root.
func (c *context) digest(p string) (digest.Digest, error) {
f, err := c.driver.Open(c.pathDriver.Join(c.root, p))
if err != nil {
return "", err
}
defer f.Close()
return c.digester.Digest(f)
}
// resolveXAttrs attempts to resolve the extended attributes for the resource
// at the path fp, which is the full path to the resource. If the resource
// cannot have xattrs, nil will be returned.
func (c *context) resolveXAttrs(fp string, fi os.FileInfo, base *resource) (map[string][]byte, error) {
if fi.Mode().IsRegular() || fi.Mode().IsDir() {
xattrDriver, ok := c.driver.(driverpkg.XAttrDriver)
if !ok {
log.Println("xattr extraction not supported")
return nil, ErrNotSupported
}
return xattrDriver.Getxattr(fp)
}
if fi.Mode()&os.ModeSymlink != 0 {
lxattrDriver, ok := c.driver.(driverpkg.LXAttrDriver)
if !ok {
log.Println("xattr extraction for symlinks not supported")
return nil, ErrNotSupported
}
return lxattrDriver.LGetxattr(fp)
}
return nil, nil
}

View File

@ -0,0 +1,21 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devices
import "fmt"
var ErrNotSupported = fmt.Errorf("not supported")

View File

@ -0,0 +1,74 @@
// +build linux darwin freebsd solaris
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devices
import (
"fmt"
"os"
"syscall"
"golang.org/x/sys/unix"
)
func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo")
}
dev := uint64(sys.Rdev)
return uint64(unix.Major(dev)), uint64(unix.Minor(dev)), nil
}
// mknod provides a shortcut for syscall.Mknod
func Mknod(p string, mode os.FileMode, maj, min int) error {
var (
m = syscallMode(mode.Perm())
dev uint64
)
if mode&os.ModeDevice != 0 {
dev = unix.Mkdev(uint32(maj), uint32(min))
if mode&os.ModeCharDevice != 0 {
m |= unix.S_IFCHR
} else {
m |= unix.S_IFBLK
}
} else if mode&os.ModeNamedPipe != 0 {
m |= unix.S_IFIFO
}
return unix.Mknod(p, m, int(dev))
}
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
func syscallMode(i os.FileMode) (o uint32) {
o |= uint32(i.Perm())
if i&os.ModeSetuid != 0 {
o |= unix.S_ISUID
}
if i&os.ModeSetgid != 0 {
o |= unix.S_ISGID
}
if i&os.ModeSticky != 0 {
o |= unix.S_ISVTX
}
return
}

View File

@ -0,0 +1,27 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devices
import (
"os"
"github.com/pkg/errors"
)
func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
return 0, 0, errors.Wrap(ErrNotSupported, "cannot get device info on windows")
}

104
vendor/github.com/containerd/continuity/digests.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"io"
"sort"
"github.com/opencontainers/go-digest"
)
// Digester produces a digest for a given read stream
type Digester interface {
Digest(io.Reader) (digest.Digest, error)
}
// ContentProvider produces a read stream for a given digest
type ContentProvider interface {
Reader(digest.Digest) (io.ReadCloser, error)
}
type simpleDigester struct {
algorithm digest.Algorithm
}
func (sd simpleDigester) Digest(r io.Reader) (digest.Digest, error) {
digester := sd.algorithm.Digester()
if _, err := io.Copy(digester.Hash(), r); err != nil {
return "", err
}
return digester.Digest(), nil
}
// uniqifyDigests sorts and uniqifies the provided digest, ensuring that the
// digests are not repeated and no two digests with the same algorithm have
// different values. Because a stable sort is used, this has the effect of
// "zipping" digest collections from multiple resources.
func uniqifyDigests(digests ...digest.Digest) ([]digest.Digest, error) {
sort.Stable(digestSlice(digests)) // stable sort is important for the behavior here.
seen := map[digest.Digest]struct{}{}
algs := map[digest.Algorithm][]digest.Digest{} // detect different digests.
var out []digest.Digest
// uniqify the digests
for _, d := range digests {
if _, ok := seen[d]; ok {
continue
}
seen[d] = struct{}{}
algs[d.Algorithm()] = append(algs[d.Algorithm()], d)
if len(algs[d.Algorithm()]) > 1 {
return nil, fmt.Errorf("conflicting digests for %v found", d.Algorithm())
}
out = append(out, d)
}
return out, nil
}
// digestsMatch compares the two sets of digests to see if they match.
func digestsMatch(as, bs []digest.Digest) bool {
all := append(as, bs...)
uniqified, err := uniqifyDigests(all...)
if err != nil {
// the only error uniqifyDigests returns is when the digests disagree.
return false
}
disjoint := len(as) + len(bs)
if len(uniqified) == disjoint {
// if these two sets have the same cardinality, we know both sides
// didn't share any digests.
return false
}
return true
}
type digestSlice []digest.Digest
func (p digestSlice) Len() int { return len(p) }
func (p digestSlice) Less(i, j int) bool { return p[i] < p[j] }
func (p digestSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View File

@ -0,0 +1,174 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"fmt"
"io"
"os"
)
var ErrNotSupported = fmt.Errorf("not supported")
// Driver provides all of the system-level functions in a common interface.
// The context should call these with full paths and should never use the `os`
// package or any other package to access resources on the filesystem. This
// mechanism let's us carefully control access to the context and maintain
// path and resource integrity. It also gives us an interface to reason about
// direct resource access.
//
// Implementations don't need to do much other than meet the interface. For
// example, it is not required to wrap os.FileInfo to return correct paths for
// the call to Name().
type Driver interface {
// Note that Open() returns a File interface instead of *os.File. This
// is because os.File is a struct, so if Open was to return *os.File,
// the only way to fulfill the interface would be to call os.Open()
Open(path string) (File, error)
OpenFile(path string, flag int, perm os.FileMode) (File, error)
Stat(path string) (os.FileInfo, error)
Lstat(path string) (os.FileInfo, error)
Readlink(p string) (string, error)
Mkdir(path string, mode os.FileMode) error
Remove(path string) error
Link(oldname, newname string) error
Lchmod(path string, mode os.FileMode) error
Lchown(path string, uid, gid int64) error
Symlink(oldname, newname string) error
MkdirAll(path string, perm os.FileMode) error
RemoveAll(path string) error
// TODO(aaronl): These methods might move outside the main Driver
// interface in the future as more platforms are added.
Mknod(path string, mode os.FileMode, major int, minor int) error
Mkfifo(path string, mode os.FileMode) error
}
// File is the interface for interacting with files returned by continuity's Open
// This is needed since os.File is a struct, instead of an interface, so it can't
// be used.
type File interface {
io.ReadWriteCloser
io.Seeker
Readdir(n int) ([]os.FileInfo, error)
}
func NewSystemDriver() (Driver, error) {
// TODO(stevvooe): Consider having this take a "hint" path argument, which
// would be the context root. The hint could be used to resolve required
// filesystem support when assembling the driver to use.
return &driver{}, nil
}
// XAttrDriver should be implemented on operation systems and filesystems that
// have xattr support for regular files and directories.
type XAttrDriver interface {
// Getxattr returns all of the extended attributes for the file at path.
// Typically, this takes a syscall call to Listxattr and Getxattr.
Getxattr(path string) (map[string][]byte, error)
// Setxattr sets all of the extended attributes on file at path, following
// any symbolic links, if necessary. All attributes on the target are
// replaced by the values from attr. If the operation fails to set any
// attribute, those already applied will not be rolled back.
Setxattr(path string, attr map[string][]byte) error
}
// LXAttrDriver should be implemented by drivers on operating systems and
// filesystems that support setting and getting extended attributes on
// symbolic links. If this is not implemented, extended attributes will be
// ignored on symbolic links.
type LXAttrDriver interface {
// LGetxattr returns all of the extended attributes for the file at path
// and does not follow symlinks. Typically, this takes a syscall call to
// Llistxattr and Lgetxattr.
LGetxattr(path string) (map[string][]byte, error)
// LSetxattr sets all of the extended attributes on file at path, without
// following symbolic links. All attributes on the target are replaced by
// the values from attr. If the operation fails to set any attribute,
// those already applied will not be rolled back.
LSetxattr(path string, attr map[string][]byte) error
}
type DeviceInfoDriver interface {
DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error)
}
// driver is a simple default implementation that sends calls out to the "os"
// package. Extend the "driver" type in system-specific files to add support,
// such as xattrs, which can add support at compile time.
type driver struct{}
var _ File = &os.File{}
// LocalDriver is the exported Driver struct for convenience.
var LocalDriver Driver = &driver{}
func (d *driver) Open(p string) (File, error) {
return os.Open(p)
}
func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) {
return os.OpenFile(path, flag, perm)
}
func (d *driver) Stat(p string) (os.FileInfo, error) {
return os.Stat(p)
}
func (d *driver) Lstat(p string) (os.FileInfo, error) {
return os.Lstat(p)
}
func (d *driver) Mkdir(p string, mode os.FileMode) error {
return os.Mkdir(p, mode)
}
// Remove is used to unlink files and remove directories.
// This is following the golang os package api which
// combines the operations into a higher level Remove
// function. If explicit unlinking or directory removal
// to mirror system call is required, they should be
// split up at that time.
func (d *driver) Remove(path string) error {
return os.Remove(path)
}
func (d *driver) Link(oldname, newname string) error {
return os.Link(oldname, newname)
}
func (d *driver) Lchown(name string, uid, gid int64) error {
// TODO: error out if uid excesses int bit width?
return os.Lchown(name, int(uid), int(gid))
}
func (d *driver) Symlink(oldname, newname string) error {
return os.Symlink(oldname, newname)
}
func (d *driver) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
func (d *driver) RemoveAll(path string) error {
return os.RemoveAll(path)
}

View File

@ -0,0 +1,138 @@
// +build linux darwin freebsd solaris
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"errors"
"fmt"
"os"
"sort"
"github.com/containerd/continuity/devices"
"github.com/containerd/continuity/sysx"
)
func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error {
err := devices.Mknod(path, mode, major, minor)
if err != nil {
err = &os.PathError{Op: "mknod", Path: path, Err: err}
}
return err
}
func (d *driver) Mkfifo(path string, mode os.FileMode) error {
if mode&os.ModeNamedPipe == 0 {
return errors.New("mode passed to Mkfifo does not have the named pipe bit set")
}
// mknod with a mode that has ModeNamedPipe set creates a fifo, not a
// device.
err := devices.Mknod(path, mode, 0, 0)
if err != nil {
err = &os.PathError{Op: "mkfifo", Path: path, Err: err}
}
return err
}
// Getxattr returns all of the extended attributes for the file at path p.
func (d *driver) Getxattr(p string) (map[string][]byte, error) {
xattrs, err := sysx.Listxattr(p)
if err != nil {
return nil, fmt.Errorf("listing %s xattrs: %v", p, err)
}
sort.Strings(xattrs)
m := make(map[string][]byte, len(xattrs))
for _, attr := range xattrs {
value, err := sysx.Getxattr(p, attr)
if err != nil {
return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err)
}
// NOTE(stevvooe): This append/copy tricky relies on unique
// xattrs. Break this out into an alloc/copy if xattrs are no
// longer unique.
m[attr] = append(m[attr], value...)
}
return m, nil
}
// Setxattr sets all of the extended attributes on file at path, following
// any symbolic links, if necessary. All attributes on the target are
// replaced by the values from attr. If the operation fails to set any
// attribute, those already applied will not be rolled back.
func (d *driver) Setxattr(path string, attrMap map[string][]byte) error {
for attr, value := range attrMap {
if err := sysx.Setxattr(path, attr, value, 0); err != nil {
return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err)
}
}
return nil
}
// LGetxattr returns all of the extended attributes for the file at path p
// not following symbolic links.
func (d *driver) LGetxattr(p string) (map[string][]byte, error) {
xattrs, err := sysx.LListxattr(p)
if err != nil {
return nil, fmt.Errorf("listing %s xattrs: %v", p, err)
}
sort.Strings(xattrs)
m := make(map[string][]byte, len(xattrs))
for _, attr := range xattrs {
value, err := sysx.LGetxattr(p, attr)
if err != nil {
return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err)
}
// NOTE(stevvooe): This append/copy tricky relies on unique
// xattrs. Break this out into an alloc/copy if xattrs are no
// longer unique.
m[attr] = append(m[attr], value...)
}
return m, nil
}
// LSetxattr sets all of the extended attributes on file at path, not
// following any symbolic links. All attributes on the target are
// replaced by the values from attr. If the operation fails to set any
// attribute, those already applied will not be rolled back.
func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error {
for attr, value := range attrMap {
if err := sysx.LSetxattr(path, attr, value, 0); err != nil {
return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err)
}
}
return nil
}
func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) {
return devices.DeviceInfo(fi)
}
// Readlink was forked on Windows to fix a Golang bug, use the "os" package here
func (d *driver) Readlink(p string) (string, error) {
return os.Readlink(p)
}

View File

@ -0,0 +1,43 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"os"
"github.com/containerd/continuity/sysx"
)
func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error {
return &os.PathError{Op: "mknod", Path: path, Err: ErrNotSupported}
}
func (d *driver) Mkfifo(path string, mode os.FileMode) error {
return &os.PathError{Op: "mkfifo", Path: path, Err: ErrNotSupported}
}
// Lchmod changes the mode of an file not following symlinks.
func (d *driver) Lchmod(path string, mode os.FileMode) (err error) {
// TODO: Use Window's equivalent
return os.Chmod(path, mode)
}
// Readlink is forked in order to support Volume paths which are used
// in container layers.
func (d *driver) Readlink(p string) (string, error) {
return sysx.Readlink(p)
}

View File

@ -0,0 +1,39 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"os"
"golang.org/x/sys/unix"
)
// Lchmod changes the mode of a file not following symlinks.
func (d *driver) Lchmod(path string, mode os.FileMode) error {
// On Linux, file mode is not supported for symlinks,
// and fchmodat() does not support AT_SYMLINK_NOFOLLOW,
// so symlinks need to be skipped entirely.
if st, err := os.Stat(path); err == nil && st.Mode()&os.ModeSymlink != 0 {
return nil
}
err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), 0)
if err != nil {
err = &os.PathError{Op: "lchmod", Path: path, Err: err}
}
return err
}

View File

@ -0,0 +1,34 @@
// +build darwin freebsd solaris
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"os"
"golang.org/x/sys/unix"
)
// Lchmod changes the mode of a file not following symlinks.
func (d *driver) Lchmod(path string, mode os.FileMode) error {
err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
err = &os.PathError{Op: "lchmod", Path: path, Err: err}
}
return err
}

View File

@ -0,0 +1,90 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"io"
"io/ioutil"
"os"
"sort"
)
// ReadFile works the same as ioutil.ReadFile with the Driver abstraction
func ReadFile(r Driver, filename string) ([]byte, error) {
f, err := r.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
return data, nil
}
// WriteFile works the same as ioutil.WriteFile with the Driver abstraction
func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error {
f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
defer f.Close()
n, err := f.Write(data)
if err != nil {
return err
} else if n != len(data) {
return io.ErrShortWrite
}
return nil
}
// ReadDir works the same as ioutil.ReadDir with the Driver abstraction
func ReadDir(r Driver, dirname string) ([]os.FileInfo, error) {
f, err := r.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
dirs, err := f.Readdir(-1)
if err != nil {
return nil, err
}
sort.Sort(fileInfos(dirs))
return dirs, nil
}
// Simple implementation of the sort.Interface for os.FileInfo
type fileInfos []os.FileInfo
func (fis fileInfos) Len() int {
return len(fis)
}
func (fis fileInfos) Less(i, j int) bool {
return fis[i].Name() < fis[j].Name()
}
func (fis fileInfos) Swap(i, j int) {
fis[i], fis[j] = fis[j], fis[i]
}

129
vendor/github.com/containerd/continuity/groups_unix.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// TODO(stevvooe): This needs a lot of work before we can call it useful.
type groupIndex struct {
byName map[string]*group
byGID map[int]*group
}
func getGroupIndex() (*groupIndex, error) {
f, err := os.Open("/etc/group")
if err != nil {
return nil, err
}
defer f.Close()
groups, err := parseGroups(f)
if err != nil {
return nil, err
}
return newGroupIndex(groups), nil
}
func newGroupIndex(groups []group) *groupIndex {
gi := &groupIndex{
byName: make(map[string]*group),
byGID: make(map[int]*group),
}
for i, group := range groups {
gi.byGID[group.gid] = &groups[i]
gi.byName[group.name] = &groups[i]
}
return gi
}
type group struct {
name string
gid int
members []string
}
func getGroupName(gid int) (string, error) {
f, err := os.Open("/etc/group")
if err != nil {
return "", err
}
defer f.Close()
groups, err := parseGroups(f)
if err != nil {
return "", err
}
for _, group := range groups {
if group.gid == gid {
return group.name, nil
}
}
return "", fmt.Errorf("no group for gid")
}
// parseGroups parses an /etc/group file for group names, ids and membership.
// This is unix specific.
func parseGroups(rd io.Reader) ([]group, error) {
var groups []group
scanner := bufio.NewScanner(rd)
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), "#") {
continue // skip comment
}
parts := strings.SplitN(scanner.Text(), ":", 4)
if len(parts) != 4 {
return nil, fmt.Errorf("bad entry: %q", scanner.Text())
}
name, _, sgid, smembers := parts[0], parts[1], parts[2], parts[3]
gid, err := strconv.Atoi(sgid)
if err != nil {
return nil, fmt.Errorf("bad gid: %q", gid)
}
members := strings.Split(smembers, ",")
groups = append(groups, group{
name: name,
gid: gid,
members: members,
})
}
if scanner.Err() != nil {
return nil, scanner.Err()
}
return groups, nil
}

73
vendor/github.com/containerd/continuity/hardlinks.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"os"
)
var (
errNotAHardLink = fmt.Errorf("invalid hardlink")
)
type hardlinkManager struct {
hardlinks map[hardlinkKey][]Resource
}
func newHardlinkManager() *hardlinkManager {
return &hardlinkManager{
hardlinks: map[hardlinkKey][]Resource{},
}
}
// Add attempts to add the resource to the hardlink manager. If the resource
// cannot be considered as a hardlink candidate, errNotAHardLink is returned.
func (hlm *hardlinkManager) Add(fi os.FileInfo, resource Resource) error {
if _, ok := resource.(Hardlinkable); !ok {
return errNotAHardLink
}
key, err := newHardlinkKey(fi)
if err != nil {
return err
}
hlm.hardlinks[key] = append(hlm.hardlinks[key], resource)
return nil
}
// Merge processes the current state of the hardlink manager and merges any
// shared nodes into hardlinked resources.
func (hlm *hardlinkManager) Merge() ([]Resource, error) {
var resources []Resource
for key, linked := range hlm.hardlinks {
if len(linked) < 1 {
return nil, fmt.Errorf("no hardlink entrys for dev, inode pair: %#v", key)
}
merged, err := Merge(linked...)
if err != nil {
return nil, fmt.Errorf("error merging hardlink: %v", err)
}
resources = append(resources, merged)
}
return resources, nil
}

View File

@ -0,0 +1,52 @@
// +build linux darwin freebsd solaris
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"os"
"syscall"
)
// hardlinkKey provides a tuple-key for managing hardlinks. This is system-
// specific.
type hardlinkKey struct {
dev uint64
inode uint64
}
// newHardlinkKey returns a hardlink key for the provided file info. If the
// resource does not represent a possible hardlink, errNotAHardLink will be
// returned.
func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return hardlinkKey{}, fmt.Errorf("cannot resolve (*syscall.Stat_t) from os.FileInfo")
}
if sys.Nlink < 2 {
// NOTE(stevvooe): This is not always true for all filesystems. We
// should somehow detect this and provided a slow "polyfill" that
// leverages os.SameFile if we detect a filesystem where link counts
// is not really supported.
return hardlinkKey{}, errNotAHardLink
}
return hardlinkKey{dev: uint64(sys.Dev), inode: uint64(sys.Ino)}, nil
}

View File

@ -0,0 +1,28 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import "os"
type hardlinkKey struct{}
func newHardlinkKey(fi os.FileInfo) (hardlinkKey, error) {
// NOTE(stevvooe): Obviously, this is not yet implemented. However, the
// makings of an implementation are available in src/os/types_windows.go. More
// investigation needs to be done to figure out exactly how to do this.
return hardlinkKey{}, errNotAHardLink
}

63
vendor/github.com/containerd/continuity/ioutils.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
)
// AtomicWriteFile atomically writes data to a file by first writing to a
// temp file and calling rename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
buf := bytes.NewBuffer(data)
return atomicWriteFile(filename, buf, int64(len(data)), perm)
}
// atomicWriteFile writes data to a file by first writing to a temp
// file and calling rename.
func atomicWriteFile(filename string, r io.Reader, dataSize int64, perm os.FileMode) error {
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return err
}
err = os.Chmod(f.Name(), perm)
if err != nil {
f.Close()
return err
}
n, err := io.Copy(f, r)
if err == nil && n < dataSize {
f.Close()
return io.ErrShortWrite
}
if err != nil {
f.Close()
return err
}
if err := f.Sync(); err != nil {
f.Close()
return err
}
if err := f.Close(); err != nil {
return err
}
return os.Rename(f.Name(), filename)
}

160
vendor/github.com/containerd/continuity/manifest.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"io"
"log"
"os"
"sort"
pb "github.com/containerd/continuity/proto"
"github.com/golang/protobuf/proto"
)
// Manifest provides the contents of a manifest. Users of this struct should
// not typically modify any fields directly.
type Manifest struct {
// Resources specifies all the resources for a manifest in order by path.
Resources []Resource
}
func Unmarshal(p []byte) (*Manifest, error) {
var bm pb.Manifest
if err := proto.Unmarshal(p, &bm); err != nil {
return nil, err
}
var m Manifest
for _, b := range bm.Resource {
r, err := fromProto(b)
if err != nil {
return nil, err
}
m.Resources = append(m.Resources, r)
}
return &m, nil
}
func Marshal(m *Manifest) ([]byte, error) {
var bm pb.Manifest
for _, resource := range m.Resources {
bm.Resource = append(bm.Resource, toProto(resource))
}
return proto.Marshal(&bm)
}
func MarshalText(w io.Writer, m *Manifest) error {
var bm pb.Manifest
for _, resource := range m.Resources {
bm.Resource = append(bm.Resource, toProto(resource))
}
return proto.MarshalText(w, &bm)
}
// BuildManifest creates the manifest for the given context
func BuildManifest(ctx Context) (*Manifest, error) {
resourcesByPath := map[string]Resource{}
hardlinks := newHardlinkManager()
if err := ctx.Walk(func(p string, fi os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("error walking %s: %v", p, err)
}
if p == string(os.PathSeparator) {
// skip root
return nil
}
resource, err := ctx.Resource(p, fi)
if err != nil {
if err == ErrNotFound {
return nil
}
log.Printf("error getting resource %q: %v", p, err)
return err
}
// add to the hardlink manager
if err := hardlinks.Add(fi, resource); err == nil {
// Resource has been accepted by hardlink manager so we don't add
// it to the resourcesByPath until we merge at the end.
return nil
} else if err != errNotAHardLink {
// handle any other case where we have a proper error.
return fmt.Errorf("adding hardlink %s: %v", p, err)
}
resourcesByPath[p] = resource
return nil
}); err != nil {
return nil, err
}
// merge and post-process the hardlinks.
hardlinked, err := hardlinks.Merge()
if err != nil {
return nil, err
}
for _, resource := range hardlinked {
resourcesByPath[resource.Path()] = resource
}
var resources []Resource
for _, resource := range resourcesByPath {
resources = append(resources, resource)
}
sort.Stable(ByPath(resources))
return &Manifest{
Resources: resources,
}, nil
}
// VerifyManifest verifies all the resources in a manifest
// against files from the given context.
func VerifyManifest(ctx Context, manifest *Manifest) error {
for _, resource := range manifest.Resources {
if err := ctx.Verify(resource); err != nil {
return err
}
}
return nil
}
// ApplyManifest applies on the resources in a manifest to
// the given context.
func ApplyManifest(ctx Context, manifest *Manifest) error {
for _, resource := range manifest.Resources {
if err := ctx.Apply(resource); err != nil {
return err
}
}
return nil
}

19
vendor/github.com/containerd/continuity/proto/gen.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proto
//go:generate protoc --go_out=. manifest.proto

View File

@ -0,0 +1,181 @@
// Code generated by protoc-gen-go.
// source: manifest.proto
// DO NOT EDIT!
/*
Package proto is a generated protocol buffer package.
It is generated from these files:
manifest.proto
It has these top-level messages:
Manifest
Resource
XAttr
ADSEntry
*/
package proto
import proto1 "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto1.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package
// Manifest specifies the entries in a container bundle, keyed and sorted by
// path.
type Manifest struct {
Resource []*Resource `protobuf:"bytes,1,rep,name=resource" json:"resource,omitempty"`
}
func (m *Manifest) Reset() { *m = Manifest{} }
func (m *Manifest) String() string { return proto1.CompactTextString(m) }
func (*Manifest) ProtoMessage() {}
func (*Manifest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Manifest) GetResource() []*Resource {
if m != nil {
return m.Resource
}
return nil
}
type Resource struct {
// Path specifies the path from the bundle root. If more than one
// path is present, the entry may represent a hardlink, rather than using
// a link target. The path format is operating system specific.
Path []string `protobuf:"bytes,1,rep,name=path" json:"path,omitempty"`
// Uid specifies the user id for the resource.
Uid int64 `protobuf:"varint,2,opt,name=uid" json:"uid,omitempty"`
// Gid specifies the group id for the resource.
Gid int64 `protobuf:"varint,3,opt,name=gid" json:"gid,omitempty"`
// user and group are not currently used but their field numbers have been
// reserved for future use. As such, they are marked as deprecated.
User string `protobuf:"bytes,4,opt,name=user" json:"user,omitempty"`
Group string `protobuf:"bytes,5,opt,name=group" json:"group,omitempty"`
// Mode defines the file mode and permissions. We've used the same
// bit-packing from Go's os package,
// http://golang.org/pkg/os/#FileMode, since they've done the work of
// creating a cross-platform layout.
Mode uint32 `protobuf:"varint,6,opt,name=mode" json:"mode,omitempty"`
// Size specifies the size in bytes of the resource. This is only valid
// for regular files.
Size uint64 `protobuf:"varint,7,opt,name=size" json:"size,omitempty"`
// Digest specifies the content digest of the target file. Only valid for
// regular files. The strings are formatted in OCI style, i.e. <alg>:<encoded>.
// For detailed information about the format, please refer to OCI Image Spec:
// https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification
// The digests are sorted in lexical order and implementations may choose
// which algorithms they prefer.
Digest []string `protobuf:"bytes,8,rep,name=digest" json:"digest,omitempty"`
// Target defines the target of a hard or soft link. Absolute links start
// with a slash and specify the resource relative to the bundle root.
// Relative links do not start with a slash and are relative to the
// resource path.
Target string `protobuf:"bytes,9,opt,name=target" json:"target,omitempty"`
// Major specifies the major device number for character and block devices.
Major uint64 `protobuf:"varint,10,opt,name=major" json:"major,omitempty"`
// Minor specifies the minor device number for character and block devices.
Minor uint64 `protobuf:"varint,11,opt,name=minor" json:"minor,omitempty"`
// Xattr provides storage for extended attributes for the target resource.
Xattr []*XAttr `protobuf:"bytes,12,rep,name=xattr" json:"xattr,omitempty"`
// Ads stores one or more alternate data streams for the target resource.
Ads []*ADSEntry `protobuf:"bytes,13,rep,name=ads" json:"ads,omitempty"`
}
func (m *Resource) Reset() { *m = Resource{} }
func (m *Resource) String() string { return proto1.CompactTextString(m) }
func (*Resource) ProtoMessage() {}
func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Resource) GetXattr() []*XAttr {
if m != nil {
return m.Xattr
}
return nil
}
func (m *Resource) GetAds() []*ADSEntry {
if m != nil {
return m.Ads
}
return nil
}
// XAttr encodes extended attributes for a resource.
type XAttr struct {
// Name specifies the attribute name.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Data specifies the associated data for the attribute.
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
}
func (m *XAttr) Reset() { *m = XAttr{} }
func (m *XAttr) String() string { return proto1.CompactTextString(m) }
func (*XAttr) ProtoMessage() {}
func (*XAttr) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
// ADSEntry encodes information for a Windows Alternate Data Stream.
type ADSEntry struct {
// Name specifices the stream name.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Data specifies the stream data.
// See also the description about the digest below.
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
// Digest is a CAS representation of the stream data.
//
// At least one of data or digest MUST be specified, and either one of them
// SHOULD be specified.
//
// How to access the actual data using the digest is implementation-specific,
// and implementations can choose not to implement digest.
// So, digest SHOULD be used only when the stream data is large.
Digest string `protobuf:"bytes,3,opt,name=digest" json:"digest,omitempty"`
}
func (m *ADSEntry) Reset() { *m = ADSEntry{} }
func (m *ADSEntry) String() string { return proto1.CompactTextString(m) }
func (*ADSEntry) ProtoMessage() {}
func (*ADSEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func init() {
proto1.RegisterType((*Manifest)(nil), "proto.Manifest")
proto1.RegisterType((*Resource)(nil), "proto.Resource")
proto1.RegisterType((*XAttr)(nil), "proto.XAttr")
proto1.RegisterType((*ADSEntry)(nil), "proto.ADSEntry")
}
func init() { proto1.RegisterFile("manifest.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 317 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x90, 0x4f, 0x4b, 0xf3, 0x40,
0x10, 0xc6, 0x49, 0x93, 0xf4, 0x4d, 0xa7, 0xed, 0xab, 0x2c, 0x52, 0xe6, 0x18, 0x73, 0x0a, 0x08,
0x15, 0xf4, 0xe0, 0xb9, 0xa2, 0x17, 0xc1, 0xcb, 0x7a, 0xf1, 0xba, 0xba, 0x6b, 0x5c, 0x21, 0xd9,
0xb0, 0xd9, 0x80, 0xfa, 0xe5, 0xfc, 0x6a, 0x32, 0xb3, 0x69, 0xd1, 0x9b, 0xa7, 0x3c, 0xcf, 0x6f,
0xfe, 0x64, 0xf6, 0x81, 0xff, 0xad, 0xea, 0xec, 0x8b, 0x19, 0xc2, 0xb6, 0xf7, 0x2e, 0x38, 0x91,
0xf3, 0xa7, 0xba, 0x82, 0xe2, 0x7e, 0x2a, 0x88, 0x33, 0x28, 0xbc, 0x19, 0xdc, 0xe8, 0x9f, 0x0d,
0x26, 0x65, 0x5a, 0x2f, 0x2f, 0x8e, 0x62, 0xf3, 0x56, 0x4e, 0x58, 0x1e, 0x1a, 0xaa, 0xaf, 0x19,
0x14, 0x7b, 0x2c, 0x04, 0x64, 0xbd, 0x0a, 0xaf, 0x3c, 0xb5, 0x90, 0xac, 0xc5, 0x31, 0xa4, 0xa3,
0xd5, 0x38, 0x2b, 0x93, 0x3a, 0x95, 0x24, 0x89, 0x34, 0x56, 0x63, 0x1a, 0x49, 0x63, 0xb5, 0xd8,
0x40, 0x36, 0x0e, 0xc6, 0x63, 0x56, 0x26, 0xf5, 0xe2, 0x7a, 0x86, 0x89, 0x64, 0x2f, 0x10, 0xf2,
0xc6, 0xbb, 0xb1, 0xc7, 0xfc, 0x50, 0x88, 0x80, 0xfe, 0xd4, 0x3a, 0x6d, 0x70, 0x5e, 0x26, 0xf5,
0x5a, 0xb2, 0x26, 0x36, 0xd8, 0x4f, 0x83, 0xff, 0xca, 0xa4, 0xce, 0x24, 0x6b, 0xb1, 0x81, 0xb9,
0xb6, 0x8d, 0x19, 0x02, 0x16, 0x7c, 0xd3, 0xe4, 0x88, 0x07, 0xe5, 0x1b, 0x13, 0x70, 0x41, 0xab,
0xe5, 0xe4, 0xc4, 0x09, 0xe4, 0xad, 0x7a, 0x73, 0x1e, 0x81, 0x97, 0x44, 0xc3, 0xd4, 0x76, 0xce,
0xe3, 0x72, 0xa2, 0x64, 0x44, 0x05, 0xf9, 0xbb, 0x0a, 0xc1, 0xe3, 0x8a, 0x43, 0x5a, 0x4d, 0x21,
0x3d, 0xee, 0x42, 0xf0, 0x32, 0x96, 0xc4, 0x29, 0xa4, 0x4a, 0x0f, 0xb8, 0xfe, 0x15, 0xe3, 0xee,
0xe6, 0xe1, 0xb6, 0x0b, 0xfe, 0x43, 0x52, 0xad, 0x3a, 0x87, 0x9c, 0x47, 0xe8, 0xfe, 0x4e, 0xb5,
0x94, 0x39, 0x5d, 0xc4, 0x9a, 0x98, 0x56, 0x41, 0x71, 0x7c, 0x2b, 0xc9, 0xba, 0xba, 0x83, 0x62,
0xbf, 0xe1, 0xaf, 0x33, 0x3f, 0x72, 0x48, 0xe3, 0x7b, 0xa3, 0x7b, 0x9a, 0xf3, 0x45, 0x97, 0xdf,
0x01, 0x00, 0x00, 0xff, 0xff, 0xef, 0x27, 0x99, 0xf7, 0x17, 0x02, 0x00, 0x00,
}

View File

@ -0,0 +1,97 @@
syntax = "proto3";
package proto;
// Manifest specifies the entries in a container bundle, keyed and sorted by
// path.
message Manifest {
repeated Resource resource = 1;
}
message Resource {
// Path specifies the path from the bundle root. If more than one
// path is present, the entry may represent a hardlink, rather than using
// a link target. The path format is operating system specific.
repeated string path = 1;
// NOTE(stevvooe): Need to define clear precedence for user/group/uid/gid precedence.
// Uid specifies the user id for the resource.
int64 uid = 2;
// Gid specifies the group id for the resource.
int64 gid = 3;
// user and group are not currently used but their field numbers have been
// reserved for future use. As such, they are marked as deprecated.
string user = 4 [deprecated=true]; // "deprecated" stands for "reserved" here
string group = 5 [deprecated=true]; // "deprecated" stands for "reserved" here
// Mode defines the file mode and permissions. We've used the same
// bit-packing from Go's os package,
// http://golang.org/pkg/os/#FileMode, since they've done the work of
// creating a cross-platform layout.
uint32 mode = 6;
// NOTE(stevvooe): Beyond here, we start defining type specific fields.
// Size specifies the size in bytes of the resource. This is only valid
// for regular files.
uint64 size = 7;
// Digest specifies the content digest of the target file. Only valid for
// regular files. The strings are formatted in OCI style, i.e. <alg>:<encoded>.
// For detailed information about the format, please refer to OCI Image Spec:
// https://github.com/opencontainers/image-spec/blob/master/descriptor.md#digests-and-verification
// The digests are sorted in lexical order and implementations may choose
// which algorithms they prefer.
repeated string digest = 8;
// Target defines the target of a hard or soft link. Absolute links start
// with a slash and specify the resource relative to the bundle root.
// Relative links do not start with a slash and are relative to the
// resource path.
string target = 9;
// Major specifies the major device number for character and block devices.
uint64 major = 10;
// Minor specifies the minor device number for character and block devices.
uint64 minor = 11;
// Xattr provides storage for extended attributes for the target resource.
repeated XAttr xattr = 12;
// Ads stores one or more alternate data streams for the target resource.
repeated ADSEntry ads = 13;
}
// XAttr encodes extended attributes for a resource.
message XAttr {
// Name specifies the attribute name.
string name = 1;
// Data specifies the associated data for the attribute.
bytes data = 2;
}
// ADSEntry encodes information for a Windows Alternate Data Stream.
message ADSEntry {
// Name specifices the stream name.
string name = 1;
// Data specifies the stream data.
// See also the description about the digest below.
bytes data = 2;
// Digest is a CAS representation of the stream data.
//
// At least one of data or digest MUST be specified, and either one of them
// SHOULD be specified.
//
// How to access the actual data using the digest is implementation-specific,
// and implementations can choose not to implement digest.
// So, digest SHOULD be used only when the stream data is large.
string digest = 3;
}

590
vendor/github.com/containerd/continuity/resource.go generated vendored Normal file
View File

@ -0,0 +1,590 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"errors"
"fmt"
"os"
"reflect"
"sort"
pb "github.com/containerd/continuity/proto"
"github.com/opencontainers/go-digest"
)
// TODO(stevvooe): A record based model, somewhat sketched out at the bottom
// of this file, will be more flexible. Another possibly is to tie the package
// interface directly to the protobuf type. This will have efficiency
// advantages at the cost coupling the nasty codegen types to the exported
// interface.
type Resource interface {
// Path provides the primary resource path relative to the bundle root. In
// cases where resources have more than one path, such as with hard links,
// this will return the primary path, which is often just the first entry.
Path() string
// Mode returns the
Mode() os.FileMode
UID() int64
GID() int64
}
// ByPath provides the canonical sort order for a set of resources. Use with
// sort.Stable for deterministic sorting.
type ByPath []Resource
func (bp ByPath) Len() int { return len(bp) }
func (bp ByPath) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] }
func (bp ByPath) Less(i, j int) bool { return bp[i].Path() < bp[j].Path() }
type XAttrer interface {
XAttrs() map[string][]byte
}
// Hardlinkable is an interface that a resource type satisfies if it can be a
// hardlink target.
type Hardlinkable interface {
// Paths returns all paths of the resource, including the primary path
// returned by Resource.Path. If len(Paths()) > 1, the resource is a hard
// link.
Paths() []string
}
type RegularFile interface {
Resource
XAttrer
Hardlinkable
Size() int64
Digests() []digest.Digest
}
// Merge two or more Resources into new file. Typically, this should be
// used to merge regular files as hardlinks. If the files are not identical,
// other than Paths and Digests, the merge will fail and an error will be
// returned.
func Merge(fs ...Resource) (Resource, error) {
if len(fs) < 1 {
return nil, fmt.Errorf("please provide a resource to merge")
}
if len(fs) == 1 {
return fs[0], nil
}
var paths []string
var digests []digest.Digest
bypath := map[string][]Resource{}
// The attributes are all compared against the first to make sure they
// agree before adding to the above collections. If any of these don't
// correctly validate, the merge fails.
prototype := fs[0]
xattrs := make(map[string][]byte)
// initialize xattrs for use below. All files must have same xattrs.
if prototypeXAttrer, ok := prototype.(XAttrer); ok {
for attr, value := range prototypeXAttrer.XAttrs() {
xattrs[attr] = value
}
}
for _, f := range fs {
h, isHardlinkable := f.(Hardlinkable)
if !isHardlinkable {
return nil, errNotAHardLink
}
if f.Mode() != prototype.Mode() {
return nil, fmt.Errorf("modes do not match: %v != %v", f.Mode(), prototype.Mode())
}
if f.UID() != prototype.UID() {
return nil, fmt.Errorf("uid does not match: %v != %v", f.UID(), prototype.UID())
}
if f.GID() != prototype.GID() {
return nil, fmt.Errorf("gid does not match: %v != %v", f.GID(), prototype.GID())
}
if xattrer, ok := f.(XAttrer); ok {
fxattrs := xattrer.XAttrs()
if !reflect.DeepEqual(fxattrs, xattrs) {
return nil, fmt.Errorf("resource %q xattrs do not match: %v != %v", f, fxattrs, xattrs)
}
}
for _, p := range h.Paths() {
pfs, ok := bypath[p]
if !ok {
// ensure paths are unique by only appending on a new path.
paths = append(paths, p)
}
bypath[p] = append(pfs, f)
}
if regFile, isRegFile := f.(RegularFile); isRegFile {
prototypeRegFile, prototypeIsRegFile := prototype.(RegularFile)
if !prototypeIsRegFile {
return nil, errors.New("prototype is not a regular file")
}
if regFile.Size() != prototypeRegFile.Size() {
return nil, fmt.Errorf("size does not match: %v != %v", regFile.Size(), prototypeRegFile.Size())
}
digests = append(digests, regFile.Digests()...)
} else if device, isDevice := f.(Device); isDevice {
prototypeDevice, prototypeIsDevice := prototype.(Device)
if !prototypeIsDevice {
return nil, errors.New("prototype is not a device")
}
if device.Major() != prototypeDevice.Major() {
return nil, fmt.Errorf("major number does not match: %v != %v", device.Major(), prototypeDevice.Major())
}
if device.Minor() != prototypeDevice.Minor() {
return nil, fmt.Errorf("minor number does not match: %v != %v", device.Minor(), prototypeDevice.Minor())
}
} else if _, isNamedPipe := f.(NamedPipe); isNamedPipe {
_, prototypeIsNamedPipe := prototype.(NamedPipe)
if !prototypeIsNamedPipe {
return nil, errors.New("prototype is not a named pipe")
}
} else {
return nil, errNotAHardLink
}
}
sort.Stable(sort.StringSlice(paths))
// Choose a "canonical" file. Really, it is just the first file to sort
// against. We also effectively select the very first digest as the
// "canonical" one for this file.
first := bypath[paths[0]][0]
resource := resource{
paths: paths,
mode: first.Mode(),
uid: first.UID(),
gid: first.GID(),
xattrs: xattrs,
}
switch typedF := first.(type) {
case RegularFile:
var err error
digests, err = uniqifyDigests(digests...)
if err != nil {
return nil, err
}
return &regularFile{
resource: resource,
size: typedF.Size(),
digests: digests,
}, nil
case Device:
return &device{
resource: resource,
major: typedF.Major(),
minor: typedF.Minor(),
}, nil
case NamedPipe:
return &namedPipe{
resource: resource,
}, nil
default:
return nil, errNotAHardLink
}
}
type Directory interface {
Resource
XAttrer
// Directory is a no-op method to identify directory objects by interface.
Directory()
}
type SymLink interface {
Resource
// Target returns the target of the symlink contained in the .
Target() string
}
type NamedPipe interface {
Resource
Hardlinkable
XAttrer
// Pipe is a no-op method to allow consistent resolution of NamedPipe
// interface.
Pipe()
}
type Device interface {
Resource
Hardlinkable
XAttrer
Major() uint64
Minor() uint64
}
type resource struct {
paths []string
mode os.FileMode
uid, gid int64
xattrs map[string][]byte
}
var _ Resource = &resource{}
func (r *resource) Path() string {
if len(r.paths) < 1 {
return ""
}
return r.paths[0]
}
func (r *resource) Mode() os.FileMode {
return r.mode
}
func (r *resource) UID() int64 {
return r.uid
}
func (r *resource) GID() int64 {
return r.gid
}
type regularFile struct {
resource
size int64
digests []digest.Digest
}
var _ RegularFile = &regularFile{}
// newRegularFile returns the RegularFile, using the populated base resource
// and one or more digests of the content.
func newRegularFile(base resource, paths []string, size int64, dgsts ...digest.Digest) (RegularFile, error) {
if !base.Mode().IsRegular() {
return nil, fmt.Errorf("not a regular file")
}
base.paths = make([]string, len(paths))
copy(base.paths, paths)
// make our own copy of digests
ds := make([]digest.Digest, len(dgsts))
copy(ds, dgsts)
return &regularFile{
resource: base,
size: size,
digests: ds,
}, nil
}
func (rf *regularFile) Paths() []string {
paths := make([]string, len(rf.paths))
copy(paths, rf.paths)
return paths
}
func (rf *regularFile) Size() int64 {
return rf.size
}
func (rf *regularFile) Digests() []digest.Digest {
digests := make([]digest.Digest, len(rf.digests))
copy(digests, rf.digests)
return digests
}
func (rf *regularFile) XAttrs() map[string][]byte {
xattrs := make(map[string][]byte, len(rf.xattrs))
for attr, value := range rf.xattrs {
xattrs[attr] = append(xattrs[attr], value...)
}
return xattrs
}
type directory struct {
resource
}
var _ Directory = &directory{}
func newDirectory(base resource) (Directory, error) {
if !base.Mode().IsDir() {
return nil, fmt.Errorf("not a directory")
}
return &directory{
resource: base,
}, nil
}
func (d *directory) Directory() {}
func (d *directory) XAttrs() map[string][]byte {
xattrs := make(map[string][]byte, len(d.xattrs))
for attr, value := range d.xattrs {
xattrs[attr] = append(xattrs[attr], value...)
}
return xattrs
}
type symLink struct {
resource
target string
}
var _ SymLink = &symLink{}
func newSymLink(base resource, target string) (SymLink, error) {
if base.Mode()&os.ModeSymlink == 0 {
return nil, fmt.Errorf("not a symlink")
}
return &symLink{
resource: base,
target: target,
}, nil
}
func (l *symLink) Target() string {
return l.target
}
type namedPipe struct {
resource
}
var _ NamedPipe = &namedPipe{}
func newNamedPipe(base resource, paths []string) (NamedPipe, error) {
if base.Mode()&os.ModeNamedPipe == 0 {
return nil, fmt.Errorf("not a namedpipe")
}
base.paths = make([]string, len(paths))
copy(base.paths, paths)
return &namedPipe{
resource: base,
}, nil
}
func (np *namedPipe) Pipe() {}
func (np *namedPipe) Paths() []string {
paths := make([]string, len(np.paths))
copy(paths, np.paths)
return paths
}
func (np *namedPipe) XAttrs() map[string][]byte {
xattrs := make(map[string][]byte, len(np.xattrs))
for attr, value := range np.xattrs {
xattrs[attr] = append(xattrs[attr], value...)
}
return xattrs
}
type device struct {
resource
major, minor uint64
}
var _ Device = &device{}
func newDevice(base resource, paths []string, major, minor uint64) (Device, error) {
if base.Mode()&os.ModeDevice == 0 {
return nil, fmt.Errorf("not a device")
}
base.paths = make([]string, len(paths))
copy(base.paths, paths)
return &device{
resource: base,
major: major,
minor: minor,
}, nil
}
func (d *device) Paths() []string {
paths := make([]string, len(d.paths))
copy(paths, d.paths)
return paths
}
func (d *device) XAttrs() map[string][]byte {
xattrs := make(map[string][]byte, len(d.xattrs))
for attr, value := range d.xattrs {
xattrs[attr] = append(xattrs[attr], value...)
}
return xattrs
}
func (d device) Major() uint64 {
return d.major
}
func (d device) Minor() uint64 {
return d.minor
}
// toProto converts a resource to a protobuf record. We'd like to push this
// the individual types but we want to keep this all together during
// prototyping.
func toProto(resource Resource) *pb.Resource {
b := &pb.Resource{
Path: []string{resource.Path()},
Mode: uint32(resource.Mode()),
Uid: resource.UID(),
Gid: resource.GID(),
}
if xattrer, ok := resource.(XAttrer); ok {
// Sorts the XAttrs by name for consistent ordering.
keys := []string{}
xattrs := xattrer.XAttrs()
for k := range xattrs {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
b.Xattr = append(b.Xattr, &pb.XAttr{Name: k, Data: xattrs[k]})
}
}
switch r := resource.(type) {
case RegularFile:
b.Path = r.Paths()
b.Size = uint64(r.Size())
for _, dgst := range r.Digests() {
b.Digest = append(b.Digest, dgst.String())
}
case SymLink:
b.Target = r.Target()
case Device:
b.Major, b.Minor = r.Major(), r.Minor()
b.Path = r.Paths()
case NamedPipe:
b.Path = r.Paths()
}
// enforce a few stability guarantees that may not be provided by the
// resource implementation.
sort.Strings(b.Path)
return b
}
// fromProto converts from a protobuf Resource to a Resource interface.
func fromProto(b *pb.Resource) (Resource, error) {
base := &resource{
paths: b.Path,
mode: os.FileMode(b.Mode),
uid: b.Uid,
gid: b.Gid,
}
base.xattrs = make(map[string][]byte, len(b.Xattr))
for _, attr := range b.Xattr {
base.xattrs[attr.Name] = attr.Data
}
switch {
case base.Mode().IsRegular():
dgsts := make([]digest.Digest, len(b.Digest))
for i, dgst := range b.Digest {
// TODO(stevvooe): Should we be validating at this point?
dgsts[i] = digest.Digest(dgst)
}
return newRegularFile(*base, b.Path, int64(b.Size), dgsts...)
case base.Mode().IsDir():
return newDirectory(*base)
case base.Mode()&os.ModeSymlink != 0:
return newSymLink(*base, b.Target)
case base.Mode()&os.ModeNamedPipe != 0:
return newNamedPipe(*base, b.Path)
case base.Mode()&os.ModeDevice != 0:
return newDevice(*base, b.Path, b.Major, b.Minor)
}
return nil, fmt.Errorf("unknown resource record (%#v): %s", b, base.Mode())
}
// NOTE(stevvooe): An alternative model that supports inline declaration.
// Convenient for unit testing where inline declarations may be desirable but
// creates an awkward API for the standard use case.
// type ResourceKind int
// const (
// ResourceRegularFile = iota + 1
// ResourceDirectory
// ResourceSymLink
// Resource
// )
// type Resource struct {
// Kind ResourceKind
// Paths []string
// Mode os.FileMode
// UID string
// GID string
// Size int64
// Digests []digest.Digest
// Target string
// Major, Minor int
// XAttrs map[string][]byte
// }
// type RegularFile struct {
// Paths []string
// Size int64
// Digests []digest.Digest
// Perm os.FileMode // os.ModePerm + sticky, setuid, setgid
// }

View File

@ -0,0 +1,53 @@
// +build linux darwin freebsd solaris
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import (
"fmt"
"os"
"syscall"
)
// newBaseResource returns a *resource, populated with data from p and fi,
// where p will be populated directly.
func newBaseResource(p string, fi os.FileInfo) (*resource, error) {
// TODO(stevvooe): This need to be resolved for the container's root,
// where here we are really getting the host OS's value. We need to allow
// this be passed in and fixed up to make these uid/gid mappings portable.
// Either this can be part of the driver or we can achieve it through some
// other mechanism.
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
// TODO(stevvooe): This may not be a hard error for all platforms. We
// may want to move this to the driver.
return nil, fmt.Errorf("unable to resolve syscall.Stat_t from (os.FileInfo).Sys(): %#v", fi)
}
return &resource{
paths: []string{p},
mode: fi.Mode(),
uid: int64(sys.Uid),
gid: int64(sys.Gid),
// NOTE(stevvooe): Population of shared xattrs field is deferred to
// the resource types that populate it. Since they are a property of
// the context, they must set there.
}, nil
}

View File

@ -0,0 +1,28 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package continuity
import "os"
// newBaseResource returns a *resource, populated with data from p and fi,
// where p will be populated directly.
func newBaseResource(p string, fi os.FileInfo) (*resource, error) {
return &resource{
paths: []string{p},
mode: fi.Mode(),
}, nil
}

27
vendor/github.com/gofrs/flock/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2015, Tim Heckman
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of linode-netint nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

40
vendor/github.com/gofrs/flock/README.md generated vendored Normal file
View File

@ -0,0 +1,40 @@
# flock
[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock)
[![GoDoc](https://img.shields.io/badge/godoc-go--flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock)
[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE)
`flock` implements a thread-safe sync.Locker interface for file locking. It also
includes a non-blocking TryLock() function to allow locking without blocking execution.
## License
`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details.
## Go Compatibility
This package makes use of the `context` package that was introduced in Go 1.7. As such, this
package has an implicit dependency on Go 1.7+.
## Installation
```
go get -u github.com/gofrs/flock
```
## Usage
```Go
import "github.com/gofrs/flock"
fileLock := flock.New("/var/lock/go-lock.lock")
locked, err := fileLock.TryLock()
if err != nil {
// handle locking error
}
if locked {
// do work
fileLock.Unlock()
}
```
For more detailed usage information take a look at the package API docs on
[GoDoc](https://godoc.org/github.com/gofrs/flock).

127
vendor/github.com/gofrs/flock/flock.go generated vendored Normal file
View File

@ -0,0 +1,127 @@
// Copyright 2015 Tim Heckman. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
// Package flock implements a thread-safe sync.Locker interface for file locking.
// It also includes a non-blocking TryLock() function to allow locking
// without blocking execution.
//
// Package flock is released under the BSD 3-Clause License. See the LICENSE file
// for more details.
//
// While using this library, remember that the locking behaviors are not
// guaranteed to be the same on each platform. For example, some UNIX-like
// operating systems will transparently convert a shared lock to an exclusive
// lock. If you Unlock() the flock from a location where you believe that you
// have the shared lock, you may accidently drop the exclusive lock.
package flock
import (
"context"
"os"
"sync"
"time"
)
// Flock is the struct type to handle file locking. All fields are unexported,
// with access to some of the fields provided by getter methods (Path() and Locked()).
type Flock struct {
path string
m sync.RWMutex
fh *os.File
l bool
r bool
}
// New returns a new instance of *Flock. The only parameter
// it takes is the path to the desired lockfile.
func New(path string) *Flock {
return &Flock{path: path}
}
// NewFlock returns a new instance of *Flock. The only parameter
// it takes is the path to the desired lockfile.
//
// Deprecated: Use New instead.
func NewFlock(path string) *Flock {
return New(path)
}
// Close is equivalent to calling Unlock.
//
// This will release the lock and close the underlying file descriptor.
// It will not remove the file from disk, that's up to your application.
func (f *Flock) Close() error {
return f.Unlock()
}
// Path returns the path as provided in NewFlock().
func (f *Flock) Path() string {
return f.path
}
// Locked returns the lock state (locked: true, unlocked: false).
//
// Warning: by the time you use the returned value, the state may have changed.
func (f *Flock) Locked() bool {
f.m.RLock()
defer f.m.RUnlock()
return f.l
}
// RLocked returns the read lock state (locked: true, unlocked: false).
//
// Warning: by the time you use the returned value, the state may have changed.
func (f *Flock) RLocked() bool {
f.m.RLock()
defer f.m.RUnlock()
return f.r
}
func (f *Flock) String() string {
return f.path
}
// TryLockContext repeatedly tries to take an exclusive lock until one of the
// conditions is met: TryLock succeeds, TryLock fails with error, or Context
// Done channel is closed.
func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
return tryCtx(f.TryLock, ctx, retryDelay)
}
// TryRLockContext repeatedly tries to take a shared lock until one of the
// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context
// Done channel is closed.
func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
return tryCtx(f.TryRLock, ctx, retryDelay)
}
func tryCtx(fn func() (bool, error), ctx context.Context, retryDelay time.Duration) (bool, error) {
if ctx.Err() != nil {
return false, ctx.Err()
}
for {
if ok, err := fn(); ok || err != nil {
return ok, err
}
select {
case <-ctx.Done():
return false, ctx.Err()
case <-time.After(retryDelay):
// try again
}
}
}
func (f *Flock) setFh() error {
// open a new os.File instance
// create it if it doesn't exist, and open the file read-only.
fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600))
if err != nil {
return err
}
// set the filehandle on the struct
f.fh = fh
return nil
}

195
vendor/github.com/gofrs/flock/flock_unix.go generated vendored Normal file
View File

@ -0,0 +1,195 @@
// Copyright 2015 Tim Heckman. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
// +build !windows
package flock
import (
"os"
"syscall"
)
// Lock is a blocking call to try and take an exclusive file lock. It will wait
// until it is able to obtain the exclusive file lock. It's recommended that
// TryLock() be used over this function. This function may block the ability to
// query the current Locked() or RLocked() status due to a RW-mutex lock.
//
// If we are already exclusive-locked, this function short-circuits and returns
// immediately assuming it can take the mutex lock.
//
// If the *Flock has a shared lock (RLock), this may transparently replace the
// shared lock with an exclusive lock on some UNIX-like operating systems. Be
// careful when using exclusive locks in conjunction with shared locks
// (RLock()), because calling Unlock() may accidentally release the exclusive
// lock that was once a shared lock.
func (f *Flock) Lock() error {
return f.lock(&f.l, syscall.LOCK_EX)
}
// RLock is a blocking call to try and take a shared file lock. It will wait
// until it is able to obtain the shared file lock. It's recommended that
// TryRLock() be used over this function. This function may block the ability to
// query the current Locked() or RLocked() status due to a RW-mutex lock.
//
// If we are already shared-locked, this function short-circuits and returns
// immediately assuming it can take the mutex lock.
func (f *Flock) RLock() error {
return f.lock(&f.r, syscall.LOCK_SH)
}
func (f *Flock) lock(locked *bool, flag int) error {
f.m.Lock()
defer f.m.Unlock()
if *locked {
return nil
}
if f.fh == nil {
if err := f.setFh(); err != nil {
return err
}
}
if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil {
shouldRetry, reopenErr := f.reopenFDOnError(err)
if reopenErr != nil {
return reopenErr
}
if !shouldRetry {
return err
}
if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil {
return err
}
}
*locked = true
return nil
}
// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
// while it is running the Locked() and RLocked() functions will be blocked.
//
// This function short-circuits if we are unlocked already. If not, it calls
// syscall.LOCK_UN on the file and closes the file descriptor. It does not
// remove the file from disk. It's up to your application to do.
//
// Please note, if your shared lock became an exclusive lock this may
// unintentionally drop the exclusive lock if called by the consumer that
// believes they have a shared lock. Please see Lock() for more details.
func (f *Flock) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
// if we aren't locked or if the lockfile instance is nil
// just return a nil error because we are unlocked
if (!f.l && !f.r) || f.fh == nil {
return nil
}
// mark the file as unlocked
if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil {
return err
}
f.fh.Close()
f.l = false
f.r = false
f.fh = nil
return nil
}
// TryLock is the preferred function for taking an exclusive file lock. This
// function takes an RW-mutex lock before it tries to lock the file, so there is
// the possibility that this function may block for a short time if another
// goroutine is trying to take any action.
//
// The actual file lock is non-blocking. If we are unable to get the exclusive
// file lock, the function will return false instead of waiting for the lock. If
// we get the lock, we also set the *Flock instance as being exclusive-locked.
func (f *Flock) TryLock() (bool, error) {
return f.try(&f.l, syscall.LOCK_EX)
}
// TryRLock is the preferred function for taking a shared file lock. This
// function takes an RW-mutex lock before it tries to lock the file, so there is
// the possibility that this function may block for a short time if another
// goroutine is trying to take any action.
//
// The actual file lock is non-blocking. If we are unable to get the shared file
// lock, the function will return false instead of waiting for the lock. If we
// get the lock, we also set the *Flock instance as being share-locked.
func (f *Flock) TryRLock() (bool, error) {
return f.try(&f.r, syscall.LOCK_SH)
}
func (f *Flock) try(locked *bool, flag int) (bool, error) {
f.m.Lock()
defer f.m.Unlock()
if *locked {
return true, nil
}
if f.fh == nil {
if err := f.setFh(); err != nil {
return false, err
}
}
var retried bool
retry:
err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB)
switch err {
case syscall.EWOULDBLOCK:
return false, nil
case nil:
*locked = true
return true, nil
}
if !retried {
if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil {
return false, reopenErr
} else if shouldRetry {
retried = true
goto retry
}
}
return false, err
}
// reopenFDOnError determines whether we should reopen the file handle
// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c:
// Since Linux 3.4 (commit 55725513)
// Probably NFSv4 where flock() is emulated by fcntl().
func (f *Flock) reopenFDOnError(err error) (bool, error) {
if err != syscall.EIO && err != syscall.EBADF {
return false, nil
}
if st, err := f.fh.Stat(); err == nil {
// if the file is able to be read and written
if st.Mode()&0600 == 0600 {
f.fh.Close()
f.fh = nil
// reopen in read-write mode and set the filehandle
fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600))
if err != nil {
return false, err
}
f.fh = fh
return true, nil
}
}
return false, nil
}

76
vendor/github.com/gofrs/flock/flock_winapi.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// Copyright 2015 Tim Heckman. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
// +build windows
package flock
import (
"syscall"
"unsafe"
)
var (
kernel32, _ = syscall.LoadLibrary("kernel32.dll")
procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx")
procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx")
)
const (
winLockfileFailImmediately = 0x00000001
winLockfileExclusiveLock = 0x00000002
winLockfileSharedLock = 0x00000000
)
// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows
// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as:
//
// > The function requests an exclusive lock. Otherwise, it requests a shared
// > lock.
//
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
r1, _, errNo := syscall.Syscall6(
uintptr(procLockFileEx),
6,
uintptr(handle),
uintptr(flags),
uintptr(reserved),
uintptr(numberOfBytesToLockLow),
uintptr(numberOfBytesToLockHigh),
uintptr(unsafe.Pointer(offset)))
if r1 != 1 {
if errNo == 0 {
return false, syscall.EINVAL
}
return false, errNo
}
return true, 0
}
func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
r1, _, errNo := syscall.Syscall6(
uintptr(procUnlockFileEx),
5,
uintptr(handle),
uintptr(reserved),
uintptr(numberOfBytesToLockLow),
uintptr(numberOfBytesToLockHigh),
uintptr(unsafe.Pointer(offset)),
0)
if r1 != 1 {
if errNo == 0 {
return false, syscall.EINVAL
}
return false, errNo
}
return true, 0
}

140
vendor/github.com/gofrs/flock/flock_windows.go generated vendored Normal file
View File

@ -0,0 +1,140 @@
// Copyright 2015 Tim Heckman. All rights reserved.
// Use of this source code is governed by the BSD 3-Clause
// license that can be found in the LICENSE file.
package flock
import (
"syscall"
)
// ErrorLockViolation is the error code returned from the Windows syscall when a
// lock would block and you ask to fail immediately.
const ErrorLockViolation syscall.Errno = 0x21 // 33
// Lock is a blocking call to try and take an exclusive file lock. It will wait
// until it is able to obtain the exclusive file lock. It's recommended that
// TryLock() be used over this function. This function may block the ability to
// query the current Locked() or RLocked() status due to a RW-mutex lock.
//
// If we are already locked, this function short-circuits and returns
// immediately assuming it can take the mutex lock.
func (f *Flock) Lock() error {
return f.lock(&f.l, winLockfileExclusiveLock)
}
// RLock is a blocking call to try and take a shared file lock. It will wait
// until it is able to obtain the shared file lock. It's recommended that
// TryRLock() be used over this function. This function may block the ability to
// query the current Locked() or RLocked() status due to a RW-mutex lock.
//
// If we are already locked, this function short-circuits and returns
// immediately assuming it can take the mutex lock.
func (f *Flock) RLock() error {
return f.lock(&f.r, winLockfileSharedLock)
}
func (f *Flock) lock(locked *bool, flag uint32) error {
f.m.Lock()
defer f.m.Unlock()
if *locked {
return nil
}
if f.fh == nil {
if err := f.setFh(); err != nil {
return err
}
}
if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
return errNo
}
*locked = true
return nil
}
// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
// while it is running the Locked() and RLocked() functions will be blocked.
//
// This function short-circuits if we are unlocked already. If not, it calls
// UnlockFileEx() on the file and closes the file descriptor. It does not remove
// the file from disk. It's up to your application to do.
func (f *Flock) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
// if we aren't locked or if the lockfile instance is nil
// just return a nil error because we are unlocked
if (!f.l && !f.r) || f.fh == nil {
return nil
}
// mark the file as unlocked
if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
return errNo
}
f.fh.Close()
f.l = false
f.r = false
f.fh = nil
return nil
}
// TryLock is the preferred function for taking an exclusive file lock. This
// function does take a RW-mutex lock before it tries to lock the file, so there
// is the possibility that this function may block for a short time if another
// goroutine is trying to take any action.
//
// The actual file lock is non-blocking. If we are unable to get the exclusive
// file lock, the function will return false instead of waiting for the lock. If
// we get the lock, we also set the *Flock instance as being exclusive-locked.
func (f *Flock) TryLock() (bool, error) {
return f.try(&f.l, winLockfileExclusiveLock)
}
// TryRLock is the preferred function for taking a shared file lock. This
// function does take a RW-mutex lock before it tries to lock the file, so there
// is the possibility that this function may block for a short time if another
// goroutine is trying to take any action.
//
// The actual file lock is non-blocking. If we are unable to get the shared file
// lock, the function will return false instead of waiting for the lock. If we
// get the lock, we also set the *Flock instance as being shared-locked.
func (f *Flock) TryRLock() (bool, error) {
return f.try(&f.r, winLockfileSharedLock)
}
func (f *Flock) try(locked *bool, flag uint32) (bool, error) {
f.m.Lock()
defer f.m.Unlock()
if *locked {
return true, nil
}
if f.fh == nil {
if err := f.setFh(); err != nil {
return false, err
}
}
_, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{})
if errNo > 0 {
if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING {
return false, nil
}
return false, errNo
}
*locked = true
return true, nil
}

22
vendor/github.com/jaguilar/vt100/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 James Aguilar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

54
vendor/github.com/jaguilar/vt100/README.md generated vendored Normal file
View File

@ -0,0 +1,54 @@
#VT100
[![Build Status](https://travis-ci.org/jaguilar/vt100.svg?branch=master)](https://travis-ci.org/jaguilar/vt100)
[![GoDoc](https://godoc.org/github.com/jaguilar/vt100?status.svg)](https://godoc.org/github.com/jaguilar/vt100)
This is a vt100 screen reader. It seems to do a pretty
decent job of parsing the nethack input stream, which
is all I want it for anyway.
Here is a screenshot of the HTML-formatted screen data:
![](_readme/screencap.png)
The features we currently support:
* Cursor movement
* Erasing
* Many of the text properties -- underline, inverse, blink, etc.
* Sixteen colors
* Cursor saving and unsaving
* UTF-8
Not currently supported (and no plans to support):
* Scrolling
* Prompts
* Other cooked mode features
The API is not stable! This is a v0 package.
## Demo
Try running the demo! Install nethack:
sudo apt-get install nethack
Get this code:
go get github.com/jaguilar/vt100
cd $GOPATH/src/githib.com/jaguilar/vt100
Run this code:
go run demo/demo.go -port=8080 2>/tmp/error.txt
Play some nethack and check out the resulting VT100 terminal status:
# From another terminal . . .
xdg-open http://localhost:8080/debug/vt100
The demo probably assumes Linux (it uses pty-related syscalls). I'll happily
accept pull requests that replicate the pty-spawning functions on OSX and
Windows.

288
vendor/github.com/jaguilar/vt100/command.go generated vendored Normal file
View File

@ -0,0 +1,288 @@
package vt100
import (
"errors"
"expvar"
"fmt"
"image/color"
"regexp"
"strconv"
"strings"
)
// UnsupportedError indicates that we parsed an operation that this
// terminal does not implement. Such errors indicate that the client
// program asked us to perform an action that we don't know how to.
// It MAY be safe to continue trying to do additional operations.
// This is a distinct category of errors from things we do know how
// to do, but are badly encoded, or errors from the underlying io.RuneScanner
// that we're reading commands from.
type UnsupportedError struct {
error
}
var (
supportErrors = expvar.NewMap("vt100-unsupported-operations")
)
func supportError(e error) error {
supportErrors.Add(e.Error(), 1)
return UnsupportedError{e}
}
// Command is a type of object that the terminal can process to perform
// an update.
type Command interface {
display(v *VT100) error
}
// runeCommand is a simple command that just writes a rune
// to the current cell and advances the cursor.
type runeCommand rune
func (r runeCommand) display(v *VT100) error {
v.put(rune(r))
return nil
}
// escapeCommand is a control sequence command. It includes a variety
// of control and escape sequences that move and modify the cursor
// or the terminal.
type escapeCommand struct {
cmd rune
args string
}
func (c escapeCommand) String() string {
return fmt.Sprintf("[%q %U](%v)", c.cmd, c.cmd, c.args)
}
type intHandler func(*VT100, []int) error
var (
// intHandlers are handlers for which all arguments are numbers.
// This is most of them -- all the ones that we process. Eventually,
// we may add handlers that support non-int args. Those handlers
// will instead receive []string, and they'll have to choose on their
// own how they might be parsed.
intHandlers = map[rune]intHandler{
's': save,
'7': save,
'u': unsave,
'8': unsave,
'A': relativeMove(-1, 0),
'B': relativeMove(1, 0),
'C': relativeMove(0, 1),
'D': relativeMove(0, -1),
'K': eraseColumns,
'J': eraseLines,
'H': home,
'f': home,
'm': updateAttributes,
}
)
func save(v *VT100, _ []int) error {
v.save()
return nil
}
func unsave(v *VT100, _ []int) error {
v.unsave()
return nil
}
var (
codeColors = []color.RGBA{
Black,
Red,
Green,
Yellow,
Blue,
Magenta,
Cyan,
White,
{}, // Not used.
DefaultColor,
}
)
// A command to update the attributes of the cursor based on the arg list.
func updateAttributes(v *VT100, args []int) error {
f := &v.Cursor.F
var unsupported []int
for _, x := range args {
switch x {
case 0:
*f = Format{}
case 1:
f.Intensity = Bright
case 2:
f.Intensity = Dim
case 22:
f.Intensity = Normal
case 4:
f.Underscore = true
case 24:
f.Underscore = false
case 5, 6:
f.Blink = true // We don't distinguish between blink speeds.
case 25:
f.Blink = false
case 7:
f.Inverse = true
case 27:
f.Inverse = false
case 8:
f.Conceal = true
case 28:
f.Conceal = false
case 30, 31, 32, 33, 34, 35, 36, 37, 39:
f.Fg = codeColors[x-30]
case 40, 41, 42, 43, 44, 45, 46, 47, 49:
f.Bg = codeColors[x-40]
// 38 and 48 not supported. Maybe someday.
default:
unsupported = append(unsupported, x)
}
}
if unsupported != nil {
return supportError(fmt.Errorf("unknown attributes: %v", unsupported))
}
return nil
}
func relativeMove(y, x int) func(*VT100, []int) error {
return func(v *VT100, args []int) error {
c := 1
if len(args) >= 1 {
c = args[0]
}
// home is 1-indexed, because that's what the terminal sends us. We want to
// reuse its sanitization scheme, so we'll just modify our args by that amount.
return home(v, []int{v.Cursor.Y + y*c + 1, v.Cursor.X + x*c + 1})
}
}
func eraseColumns(v *VT100, args []int) error {
d := eraseForward
if len(args) > 0 {
d = eraseDirection(args[0])
}
if d > eraseAll {
return fmt.Errorf("unknown erase direction: %d", d)
}
v.eraseColumns(d)
return nil
}
func eraseLines(v *VT100, args []int) error {
d := eraseForward
if len(args) > 0 {
d = eraseDirection(args[0])
}
if d > eraseAll {
return fmt.Errorf("unknown erase direction: %d", d)
}
v.eraseLines(d)
return nil
}
func sanitize(v *VT100, y, x int) (int, int, error) {
var err error
if y < 0 || y >= v.Height || x < 0 || x >= v.Width {
err = fmt.Errorf("out of bounds (%d, %d)", y, x)
} else {
return y, x, nil
}
if y < 0 {
y = 0
}
if y >= v.Height {
y = v.Height - 1
}
if x < 0 {
x = 0
}
if x >= v.Width {
x = v.Width - 1
}
return y, x, err
}
func home(v *VT100, args []int) error {
var y, x int
if len(args) >= 2 {
y, x = args[0]-1, args[1]-1 // home args are 1-indexed.
}
y, x, err := sanitize(v, y, x) // Clamp y and x to the bounds of the terminal.
v.home(y, x) // Try to do something like what the client asked.
return err
}
func (c escapeCommand) display(v *VT100) error {
f, ok := intHandlers[c.cmd]
if !ok {
return supportError(c.err(errors.New("unsupported command")))
}
args, err := c.argInts()
if err != nil {
return c.err(fmt.Errorf("while parsing int args: %v", err))
}
return f(v, args)
}
// err enhances e with information about the current escape command
func (c escapeCommand) err(e error) error {
return fmt.Errorf("%s: %s", c, e)
}
var csArgsRe = regexp.MustCompile("^([^0-9]*)(.*)$")
// argInts parses c.args as a slice of at least arity ints. If the number
// of ; separated arguments is less than arity, the remaining elements of
// the result will be zero. errors only on integer parsing failure.
func (c escapeCommand) argInts() ([]int, error) {
if len(c.args) == 0 {
return make([]int, 0), nil
}
args := strings.Split(c.args, ";")
out := make([]int, len(args))
for i, s := range args {
x, err := strconv.ParseInt(s, 10, 0)
if err != nil {
return nil, err
}
out[i] = int(x)
}
return out, nil
}
type controlCommand rune
const (
backspace controlCommand = '\b'
_horizontalTab = '\t'
linefeed = '\n'
_verticalTab = '\v'
_formfeed = '\f'
carriageReturn = '\r'
)
func (c controlCommand) display(v *VT100) error {
switch c {
case backspace:
v.backspace()
case linefeed:
v.Cursor.Y++
v.Cursor.X = 0
case carriageReturn:
v.Cursor.X = 0
}
return nil
}

97
vendor/github.com/jaguilar/vt100/scanner.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package vt100
import (
"bytes"
"fmt"
"io"
"unicode"
)
// Decode decodes one ANSI terminal command from s.
//
// s should be connected to a client program that expects an
// ANSI terminal on the other end. It will push bytes to us that we are meant
// to intepret as terminal control codes, or text to place onto the terminal.
//
// This Command alone does not actually update the terminal. You need to pass
// it to VT100.Process().
//
// You should not share s with any other reader, because it could leave
// the stream in an invalid state.
func Decode(s io.RuneScanner) (Command, error) {
r, size, err := s.ReadRune()
if err != nil {
return nil, err
}
if r == unicode.ReplacementChar && size == 1 {
return nil, fmt.Errorf("non-utf8 data from reader")
}
if r == escape || r == monogramCsi { // At beginning of escape sequence.
s.UnreadRune()
return scanEscapeCommand(s)
}
if unicode.IsControl(r) {
return controlCommand(r), nil
}
return runeCommand(r), nil
}
const (
// There are two ways to begin an escape sequence. One is to put the escape byte.
// The other is to put the single-rune control sequence indicator, which is equivalent
// to putting "\u001b[".
escape = '\u001b'
monogramCsi = '\u009b'
)
var (
csEnd = &unicode.RangeTable{R16: []unicode.Range16{{Lo: 64, Hi: 126, Stride: 1}}}
)
// scanEscapeCommand scans to the end of the current escape sequence. The scanner
// must be positioned at an escape rune (esc or the unicode CSI).
func scanEscapeCommand(s io.RuneScanner) (Command, error) {
csi := false
esc, _, err := s.ReadRune()
if err != nil {
return nil, err
}
if esc != escape && esc != monogramCsi {
return nil, fmt.Errorf("invalid content")
}
if esc == monogramCsi {
csi = true
}
var args bytes.Buffer
quote := false
for i := 0; ; i++ {
r, _, err := s.ReadRune()
if err != nil {
return nil, err
}
if i == 0 && r == '[' {
csi = true
continue
}
if !csi {
return escapeCommand{r, ""}, nil
} else if quote == false && unicode.Is(csEnd, r) {
return escapeCommand{r, args.String()}, nil
}
if r == '"' {
quote = !quote
}
// Otherwise, we're still in the args, and this rune is one of those args.
if _, err := args.WriteRune(r); err != nil {
panic(err) // WriteRune cannot return an error from bytes.Buffer.
}
}
}

435
vendor/github.com/jaguilar/vt100/vt100.go generated vendored Normal file
View File

@ -0,0 +1,435 @@
// package vt100 implements a quick-and-dirty programmable ANSI terminal emulator.
//
// You could, for example, use it to run a program like nethack that expects
// a terminal as a subprocess. It tracks the position of the cursor,
// colors, and various other aspects of the terminal's state, and
// allows you to inspect them.
//
// We do very much mean the dirty part. It's not that we think it might have
// bugs. It's that we're SURE it does. Currently, we only handle raw mode, with no
// cooked mode features like scrolling. We also misinterpret some of the control
// codes, which may or may not matter for your purpose.
package vt100
import (
"bytes"
"fmt"
"image/color"
"sort"
"strings"
)
type Intensity int
const (
Normal Intensity = 0
Bright = 1
Dim = 2
// TODO(jaguilar): Should this be in a subpackage, since the names are pretty collide-y?
)
var (
// Technically RGBAs are supposed to be premultiplied. But CSS doesn't expect them
// that way, so we won't do it in this file.
DefaultColor = color.RGBA{0, 0, 0, 0}
// Our black has 255 alpha, so it will compare negatively with DefaultColor.
Black = color.RGBA{0, 0, 0, 255}
Red = color.RGBA{255, 0, 0, 255}
Green = color.RGBA{0, 255, 0, 255}
Yellow = color.RGBA{255, 255, 0, 255}
Blue = color.RGBA{0, 0, 255, 255}
Magenta = color.RGBA{255, 0, 255, 255}
Cyan = color.RGBA{0, 255, 255, 255}
White = color.RGBA{255, 255, 255, 255}
)
func (i Intensity) alpha() uint8 {
switch i {
case Bright:
return 255
case Normal:
return 170
case Dim:
return 85
default:
return 170
}
}
// Format represents the display format of text on a terminal.
type Format struct {
// Fg is the foreground color.
Fg color.RGBA
// Bg is the background color.
Bg color.RGBA
// Intensity is the text intensity (bright, normal, dim).
Intensity Intensity
// Various text properties.
Underscore, Conceal, Negative, Blink, Inverse bool
}
func toCss(c color.RGBA) string {
return fmt.Sprintf("rgba(%d, %d, %d, %f)", c.R, c.G, c.B, float32(c.A)/255)
}
func (f Format) css() string {
parts := make([]string, 0)
fg, bg := f.Fg, f.Bg
if f.Inverse {
bg, fg = fg, bg
}
if f.Intensity != Normal {
// Intensity only applies to the text -- i.e., the foreground.
fg.A = f.Intensity.alpha()
}
if fg != DefaultColor {
parts = append(parts, "color:"+toCss(fg))
}
if bg != DefaultColor {
parts = append(parts, "background-color:"+toCss(bg))
}
if f.Underscore {
parts = append(parts, "text-decoration:underline")
}
if f.Conceal {
parts = append(parts, "display:none")
}
if f.Blink {
parts = append(parts, "text-decoration:blink")
}
// We're not in performance sensitive code. Although this sort
// isn't strictly necessary, it gives us the nice property that
// the style of a particular set of attributes will always be
// generated the same way. As a result, we can use the html
// output in tests.
sort.StringSlice(parts).Sort()
return strings.Join(parts, ";")
}
// Cursor represents both the position and text type of the cursor.
type Cursor struct {
// Y and X are the coordinates.
Y, X int
// F is the format that will be displayed.
F Format
}
// VT100 represents a simplified, raw VT100 terminal.
type VT100 struct {
// Height and Width are the dimensions of the terminal.
Height, Width int
// Content is the text in the terminal.
Content [][]rune
// Format is the display properties of each cell.
Format [][]Format
// Cursor is the current state of the cursor.
Cursor Cursor
// savedCursor is the state of the cursor last time save() was called.
savedCursor Cursor
unparsed []byte
}
// NewVT100 creates a new VT100 object with the specified dimensions. y and x
// must both be greater than zero.
//
// Each cell is set to contain a ' ' rune, and all formats are left as the
// default.
func NewVT100(y, x int) *VT100 {
if y == 0 || x == 0 {
panic(fmt.Errorf("invalid dim (%d, %d)", y, x))
}
v := &VT100{
Height: y,
Width: x,
Content: make([][]rune, y),
Format: make([][]Format, y),
}
for row := 0; row < y; row++ {
v.Content[row] = make([]rune, x)
v.Format[row] = make([]Format, x)
for col := 0; col < x; col++ {
v.clear(row, col)
}
}
return v
}
func (v *VT100) UsedHeight() int {
count := 0
for _, l := range v.Content {
for _, r := range l {
if r != ' ' {
count++
break
}
}
}
return count
}
func (v *VT100) Resize(y, x int) {
if y > v.Height {
n := y - v.Height
for row := 0; row < n; row++ {
v.Content = append(v.Content, make([]rune, v.Width))
v.Format = append(v.Format, make([]Format, v.Width))
for col := 0; col < v.Width; col++ {
v.clear(v.Height+row, col)
}
}
v.Height = y
} else if y < v.Height {
v.Content = v.Content[:y]
v.Height = y
}
if x > v.Width {
for i := range v.Content {
row := make([]rune, x)
copy(row, v.Content[i])
v.Content[i] = row
format := make([]Format, x)
copy(format, v.Format[i])
v.Format[i] = format
for j := v.Width; j < x; j++ {
v.clear(i, j)
}
}
v.Width = x
} else if x < v.Width {
for i := range v.Content {
v.Content[i] = v.Content[i][:x]
v.Format[i] = v.Format[i][:x]
}
v.Width = x
}
}
func (v *VT100) Write(dt []byte) (int, error) {
n := len(dt)
if len(v.unparsed) > 0 {
dt = append(v.unparsed, dt...) // this almost never happens
v.unparsed = nil
}
buf := bytes.NewBuffer(dt)
for {
if buf.Len() == 0 {
return n, nil
}
cmd, err := Decode(buf)
if err != nil {
if l := buf.Len(); l > 0 && l < 12 { // on small leftover handle unparsed, otherwise skip
v.unparsed = buf.Bytes()
}
return n, nil
}
v.Process(cmd) // ignore error
}
}
// Process handles a single ANSI terminal command, updating the terminal
// appropriately.
//
// One special kind of error that this can return is an UnsupportedError. It's
// probably best to check for these and skip, because they are likely recoverable.
// Support errors are exported as expvars, so it is possibly not necessary to log
// them. If you want to check what's failed, start a debug http server and examine
// the vt100-unsupported-commands field in /debug/vars.
func (v *VT100) Process(c Command) error {
return c.display(v)
}
// HTML renders v as an HTML fragment. One idea for how to use this is to debug
// the current state of the screen reader.
func (v *VT100) HTML() string {
var buf bytes.Buffer
buf.WriteString(`<pre style="color:white;background-color:black;">`)
// Iterate each row. When the css changes, close the previous span, and open
// a new one. No need to close a span when the css is empty, we won't have
// opened one in the past.
var lastFormat Format
for y, row := range v.Content {
for x, r := range row {
f := v.Format[y][x]
if f != lastFormat {
if lastFormat != (Format{}) {
buf.WriteString("</span>")
}
if f != (Format{}) {
buf.WriteString(`<span style="` + f.css() + `">`)
}
lastFormat = f
}
if s := maybeEscapeRune(r); s != "" {
buf.WriteString(s)
} else {
buf.WriteRune(r)
}
}
buf.WriteRune('\n')
}
buf.WriteString("</pre>")
return buf.String()
}
// maybeEscapeRune potentially escapes a rune for display in an html document.
// It only escapes the things that html.EscapeString does, but it works without allocating
// a string to hold r. Returns an empty string if there is no need to escape.
func maybeEscapeRune(r rune) string {
switch r {
case '&':
return "&amp;"
case '\'':
return "&#39;"
case '<':
return "&lt;"
case '>':
return "&gt;"
case '"':
return "&quot;"
}
return ""
}
// put puts r onto the current cursor's position, then advances the cursor.
func (v *VT100) put(r rune) {
v.scrollIfNeeded()
v.Content[v.Cursor.Y][v.Cursor.X] = r
v.Format[v.Cursor.Y][v.Cursor.X] = v.Cursor.F
v.advance()
}
// advance advances the cursor, wrapping to the next line if need be.
func (v *VT100) advance() {
v.Cursor.X++
if v.Cursor.X >= v.Width {
v.Cursor.X = 0
v.Cursor.Y++
}
// if v.Cursor.Y >= v.Height {
// // TODO(jaguilar): if we implement scroll, this should probably scroll.
// // v.Cursor.Y = 0
// v.scroll()
// }
}
func (v *VT100) scrollIfNeeded() {
if v.Cursor.Y >= v.Height {
first := v.Content[0]
copy(v.Content, v.Content[1:])
for i := range first {
first[i] = ' '
}
v.Content[v.Height-1] = first
v.Cursor.Y = v.Height - 1
}
}
// home moves the cursor to the coordinates y x. If y x are out of bounds, v.Err
// is set.
func (v *VT100) home(y, x int) {
v.Cursor.Y, v.Cursor.X = y, x
}
// eraseDirection is the logical direction in which an erase command happens,
// from the cursor. For both erase commands, forward is 0, backward is 1,
// and everything is 2.
type eraseDirection int
const (
// From the cursor to the end, inclusive.
eraseForward eraseDirection = iota
// From the beginning to the cursor, inclusive.
eraseBack
// Everything.
eraseAll
)
// eraseColumns erases columns from the current line.
func (v *VT100) eraseColumns(d eraseDirection) {
y, x := v.Cursor.Y, v.Cursor.X // Aliases for simplicity.
switch d {
case eraseBack:
v.eraseRegion(y, 0, y, x)
case eraseForward:
v.eraseRegion(y, x, y, v.Width-1)
case eraseAll:
v.eraseRegion(y, 0, y, v.Width-1)
}
}
// eraseLines erases lines from the current terminal. Note that
// no matter what is selected, the entire current line is erased.
func (v *VT100) eraseLines(d eraseDirection) {
y := v.Cursor.Y // Alias for simplicity.
switch d {
case eraseBack:
v.eraseRegion(0, 0, y, v.Width-1)
case eraseForward:
v.eraseRegion(y, 0, v.Height-1, v.Width-1)
case eraseAll:
v.eraseRegion(0, 0, v.Height-1, v.Width-1)
}
}
func (v *VT100) eraseRegion(y1, x1, y2, x2 int) {
// Do not sanitize or bounds-check these coordinates, since they come from the
// programmer (me). We should panic if any of them are out of bounds.
if y1 > y2 {
y1, y2 = y2, y1
}
if x1 > x2 {
x1, x2 = x2, x1
}
for y := y1; y <= y2; y++ {
for x := x1; x <= x2; x++ {
v.clear(y, x)
}
}
}
func (v *VT100) clear(y, x int) {
if y >= len(v.Content) || x >= len(v.Content[0]) {
return
}
v.Content[y][x] = ' '
v.Format[y][x] = Format{}
}
func (v *VT100) backspace() {
v.Cursor.X--
if v.Cursor.X < 0 {
if v.Cursor.Y == 0 {
v.Cursor.X = 0
} else {
v.Cursor.Y--
v.Cursor.X = v.Width - 1
}
}
}
func (v *VT100) save() {
v.savedCursor = v.Cursor
}
func (v *VT100) unsave() {
v.Cursor = v.savedCursor
}

View File

@ -27,15 +27,19 @@ Read the proposal from https://github.com/moby/moby/issues/32925
Introductory blog post https://blog.mobyproject.org/introducing-buildkit-17e056cc5317
:information_source: If you are visiting this repo for the usage of experimental Dockerfile features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`, please refer to [`frontend/dockerfile/docs/experimental.md`](frontend/dockerfile/docs/experimental.md).
### Used by
[Moby](https://github.com/moby/moby/pull/37151)
BuildKit is used by the following projects:
[img](https://github.com/genuinetools/img)
[OpenFaaS Cloud](https://github.com/openfaas/openfaas-cloud)
[container build interface](https://github.com/containerbuilding/cbi)
- [Moby & Docker](https://github.com/moby/moby/pull/37151)
- [img](https://github.com/genuinetools/img)
- [OpenFaaS Cloud](https://github.com/openfaas/openfaas-cloud)
- [container build interface](https://github.com/containerbuilding/cbi)
- [Knative Build Templates](https://github.com/knative/build-templates)
- [boss](https://github.com/crosbymichael/boss)
- [Rio](https://github.com/rancher/rio) (on roadmap)
### Quick start
@ -79,6 +83,7 @@ See [`solver/pb/ops.proto`](./solver/pb/ops.proto) for the format definition.
Currently, following high-level languages has been implemented for LLB:
- Dockerfile (See [Exploring Dockerfiles](#exploring-dockerfiles))
- [Buildpacks](https://github.com/tonistiigi/buildkit-pack)
- (open a PR to add your own language)
For understanding the basics of LLB, `examples/buildkit*` directory contains scripts that define how to build different configurations of BuildKit itself and its dependencies using the `client` package. Running one of these scripts generates a protobuf definition of a build graph. Note that the script itself does not execute any steps of the build.
@ -117,7 +122,7 @@ During development, Dockerfile frontend (dockerfile.v0) is also part of the Buil
```
buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=.
buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. --frontend-opt target=foo --frontend-opt build-arg:foo=bar
buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. --opt target=foo --opt build-arg:foo=bar
```
`--local` exposes local source files from client to the builder. `context` and `dockerfile` are the names Dockerfile frontend looks for build context and Dockerfile location.
@ -136,32 +141,36 @@ build-using-dockerfile -t mybuildkit -f ./hack/dockerfiles/test.Dockerfile .
docker inspect myimage
```
##### Building a Dockerfile using [external frontend](https://hub.docker.com/r/tonistiigi/dockerfile/tags/):
##### Building a Dockerfile using [external frontend](https://hub.docker.com/r/docker/dockerfile/tags/):
During development, an external version of the Dockerfile frontend is pushed to https://hub.docker.com/r/tonistiigi/dockerfile that can be used with the gateway frontend. The source for the external frontend is currently located in `./frontend/dockerfile/cmd/dockerfile-frontend` but will move out of this repository in the future ([#163](https://github.com/moby/buildkit/issues/163)). For automatic build from master branch of this repository `tonistiigi/dockerfile:master` image can be used.
External versions of the Dockerfile frontend are pushed to https://hub.docker.com/r/docker/dockerfile-upstream and https://hub.docker.com/r/docker/dockerfile and can be used with the gateway frontend. The source for the external frontend is currently located in `./frontend/dockerfile/cmd/dockerfile-frontend` but will move out of this repository in the future ([#163](https://github.com/moby/buildkit/issues/163)). For automatic build from master branch of this repository `docker/dockerfile-upsteam:master` or `docker/dockerfile-upstream:master-experimental` image can be used.
```
buildctl build --frontend=gateway.v0 --frontend-opt=source=tonistiigi/dockerfile --local context=. --local dockerfile=.
buildctl build --frontend gateway.v0 --frontend-opt=source=tonistiigi/dockerfile --frontend-opt=context=git://github.com/moby/moby --frontend-opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org
buildctl build --frontend gateway.v0 --opt source=docker/dockerfile --local context=. --local dockerfile=.
buildctl build --frontend gateway.v0 --opt source=docker/dockerfile --opt context=git://github.com/moby/moby --opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org
````
### Exporters
##### Building a Dockerfile with experimental features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`
By default, the build result and intermediate cache will only remain internally in BuildKit. Exporter needs to be specified to retrieve the result.
See [`frontend/dockerfile/docs/experimental.md`](frontend/dockerfile/docs/experimental.md).
### Output
By default, the build result and intermediate cache will only remain internally in BuildKit. An output needs to be specified to retrieve the result.
##### Exporting resulting image to containerd
The containerd worker needs to be used
```
buildctl build ... --exporter=image --exporter-opt name=docker.io/username/image
buildctl build ... --output type=image,name=docker.io/username/image
ctr --namespace=buildkit images ls
```
##### Push resulting image to registry
```
buildctl build ... --exporter=image --exporter-opt name=docker.io/username/image --exporter-opt push=true
buildctl build ... --output type=image,name=docker.io/username/image,push=true
```
If credentials are required, `buildctl` will attempt to read Docker configuration file.
@ -172,23 +181,60 @@ If credentials are required, `buildctl` will attempt to read Docker configuratio
The local client will copy the files directly to the client. This is useful if BuildKit is being used for building something else than container images.
```
buildctl build ... --exporter=local --exporter-opt output=path/to/output-dir
buildctl build ... --output type=local,dest=path/to/output-dir
```
Tar exporter is similar to local exporter but transfers the files through a tarball.
```
buildctl build ... --output type=tar,dest=out.tar
buildctl build ... --output type=tar > out.tar
```
##### Exporting built image to Docker
```
# exported tarball is also compatible with OCI spec
buildctl build ... --exporter=docker --exporter-opt name=myimage | docker load
buildctl build ... --output type=docker,name=myimage | docker load
```
##### Exporting [OCI Image Format](https://github.com/opencontainers/image-spec) tarball to client
```
buildctl build ... --exporter=oci --exporter-opt output=path/to/output.tar
buildctl build ... --exporter=oci > output.tar
buildctl build ... --output type=oci,dest=path/to/output.tar
buildctl build ... --output type=oci > output.tar
```
### Exporting/Importing build cache (not image itself)
#### To/From registry
```
buildctl build ... --export-cache type=registry,ref=localhost:5000/myrepo:buildcache
buildctl build ... --import-cache type=registry,ref=localhost:5000/myrepo:buildcache
```
#### To/From local filesystem
```
buildctl build ... --export-cache type=local,src=path/to/input-dir
buildctl build ... --import-cache type=local,dest=path/to/output-dir
```
The directory layout conforms to OCI Image Spec v1.0.
#### `--export-cache` options
* `mode=min` (default): only export layers for the resulting image
* `mode=max`: export all the layers of all intermediate steps
* `ref=docker.io/user/image:tag`: reference for `registry` cache exporter
* `src=path/to/output-dir`: directory for `local` cache exporter
#### `--import-cache` options
* `ref=docker.io/user/image:tag`: reference for `registry` cache importer
* `dest=path/to/input-dir`: directory for `local` cache importer
* `digest=sha256:deadbeef`: digest of the manifest list to import for `local` cache importer. Defaults to the digest of "latest" tag in `index.json`
### Other
#### View build cache
@ -207,15 +253,23 @@ buildctl debug workers -v
BuildKit can also be used by running the `buildkitd` daemon inside a Docker container and accessing it remotely. The client tool `buildctl` is also available for Mac and Windows.
We provide `buildkitd` container images as [`moby/buildkit`](https://hub.docker.com/r/moby/buildkit/tags/):
* `moby/buildkit:latest`: built from the latest regular [release](https://github.com/moby/buildkit/releases)
* `moby/buildkit:rootless`: same as `latest` but runs as an unprivileged user, see [`docs/rootless.md`](docs/rootless.md)
* `moby/buildkit:master`: built from the master branch
* `moby/buildkit:master-rootless`: same as master but runs as an unprivileged user, see [`docs/rootless.md`](docs/rootless.md)
To run daemon in a container:
```
docker run -d --privileged -p 1234:1234 tonistiigi/buildkit --addr tcp://0.0.0.0:1234
docker run -d --privileged -p 1234:1234 moby/buildkit:latest --addr tcp://0.0.0.0:1234
export BUILDKIT_HOST=tcp://0.0.0.0:1234
buildctl build --help
```
The `tonistiigi/buildkit` image can be built locally using the Dockerfile in `./hack/dockerfiles/test.Dockerfile`.
The images can be also built locally using `./hack/dockerfiles/test.Dockerfile` (or `./hack/dockerfiles/test.buildkit.Dockerfile` if you already have BuildKit).
Run `make images` to build the images as `moby/buildkit:local` and `moby/buildkit:local-rootless`.
### Opentracing support
@ -232,7 +286,7 @@ export JAEGER_TRACE=0.0.0.0:6831
### Supported runc version
During development, BuildKit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/v1.1.3/RUNC.md) for more information.
During development, BuildKit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/v1.2.1/RUNC.md) for more information.
### Running BuildKit without root privileges
@ -240,35 +294,5 @@ Please refer to [`docs/rootless.md`](docs/rootless.md).
### Contributing
Running tests:
```bash
make test
```
This runs all unit and integration tests in a containerized environment. Locally, every package can be tested separately with standard Go tools, but integration tests are skipped if local user doesn't have enough permissions or worker binaries are not installed.
```
# test a specific package only
make test TESTPKGS=./client
# run a specific test with all worker combinations
make test TESTPKGS=./client TESTFLAGS="--run /TestCallDiskUsage -v"
# run all integration tests with a specific worker
# supported workers: oci, oci-rootless, containerd, containerd-1.0
make test TESTPKGS=./client TESTFLAGS="--run //worker=containerd -v"
```
Updating vendored dependencies:
```bash
# update vendor.conf
make vendor
```
Validating your updates before submission:
```bash
make validate-all
```
Want to contribute to BuildKit? Awesome! You can find information about
contributing to this project in the [CONTRIBUTING.md](/.github/CONTRIBUTING.md)

File diff suppressed because it is too large Load Diff

View File

@ -66,9 +66,31 @@ message SolveRequest {
}
message CacheOptions {
string ExportRef = 1;
repeated string ImportRefs = 2;
map<string, string> ExportAttrs = 3;
// ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0.
// When ExportRefDeprecated is set, the solver appends
// {.Type = "registry", .Attrs = ExportAttrs.add("ref", ExportRef)}
// to Exports for compatibility. (planned to be removed)
string ExportRefDeprecated = 1;
// ImportRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0.
// When ImportRefsDeprecated is set, the solver appends
// {.Type = "registry", .Attrs = {"ref": importRef}}
// for each of the ImportRefs entry to Imports for compatibility. (planned to be removed)
repeated string ImportRefsDeprecated = 2;
// ExportAttrsDeprecated is deprecated since BuildKit v0.4.0.
// See the description of ExportRefDeprecated.
map<string, string> ExportAttrsDeprecated = 3;
// Exports was introduced in BuildKit v0.4.0.
repeated CacheOptionsEntry Exports = 4;
// Imports was introduced in BuildKit v0.4.0.
repeated CacheOptionsEntry Imports = 5;
}
message CacheOptionsEntry {
// Type is like "registry" or "local"
string Type = 1;
// Attrs are like mode=(min,max), ref=example.com:5000/foo/bar .
// See cache importer/exporter implementations' documentation.
map<string, string> Attrs = 2;
}
message SolveResponse {
@ -124,4 +146,4 @@ message ListWorkersRequest {
message ListWorkersResponse {
repeated moby.buildkit.v1.types.WorkerRecord record = 1;
}
}

View File

@ -1,16 +1,6 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: worker.proto
/*
Package moby_buildkit_v1_types is a generated protocol buffer package.
It is generated from these files:
worker.proto
It has these top-level messages:
WorkerRecord
GCPolicy
*/
package moby_buildkit_v1_types
import proto "github.com/gogo/protobuf/proto"
@ -33,16 +23,47 @@ var _ = math.Inf
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type WorkerRecord struct {
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms"`
GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy" json:"GCPolicy,omitempty"`
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms"`
GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WorkerRecord) Reset() { *m = WorkerRecord{} }
func (m *WorkerRecord) String() string { return proto.CompactTextString(m) }
func (*WorkerRecord) ProtoMessage() {}
func (*WorkerRecord) Descriptor() ([]byte, []int) { return fileDescriptorWorker, []int{0} }
func (m *WorkerRecord) Reset() { *m = WorkerRecord{} }
func (m *WorkerRecord) String() string { return proto.CompactTextString(m) }
func (*WorkerRecord) ProtoMessage() {}
func (*WorkerRecord) Descriptor() ([]byte, []int) {
return fileDescriptor_worker_1d0a62be5114ecbf, []int{0}
}
func (m *WorkerRecord) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *WorkerRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_WorkerRecord.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *WorkerRecord) XXX_Merge(src proto.Message) {
xxx_messageInfo_WorkerRecord.Merge(dst, src)
}
func (m *WorkerRecord) XXX_Size() int {
return m.Size()
}
func (m *WorkerRecord) XXX_DiscardUnknown() {
xxx_messageInfo_WorkerRecord.DiscardUnknown(m)
}
var xxx_messageInfo_WorkerRecord proto.InternalMessageInfo
func (m *WorkerRecord) GetID() string {
if m != nil {
@ -73,16 +94,47 @@ func (m *WorkerRecord) GetGCPolicy() []*GCPolicy {
}
type GCPolicy struct {
All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"`
KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"`
KeepBytes int64 `protobuf:"varint,3,opt,name=keepBytes,proto3" json:"keepBytes,omitempty"`
Filters []string `protobuf:"bytes,4,rep,name=filters" json:"filters,omitempty"`
All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"`
KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"`
KeepBytes int64 `protobuf:"varint,3,opt,name=keepBytes,proto3" json:"keepBytes,omitempty"`
Filters []string `protobuf:"bytes,4,rep,name=filters,proto3" json:"filters,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GCPolicy) Reset() { *m = GCPolicy{} }
func (m *GCPolicy) String() string { return proto.CompactTextString(m) }
func (*GCPolicy) ProtoMessage() {}
func (*GCPolicy) Descriptor() ([]byte, []int) { return fileDescriptorWorker, []int{1} }
func (m *GCPolicy) Reset() { *m = GCPolicy{} }
func (m *GCPolicy) String() string { return proto.CompactTextString(m) }
func (*GCPolicy) ProtoMessage() {}
func (*GCPolicy) Descriptor() ([]byte, []int) {
return fileDescriptor_worker_1d0a62be5114ecbf, []int{1}
}
func (m *GCPolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GCPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GCPolicy.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GCPolicy) XXX_Merge(src proto.Message) {
xxx_messageInfo_GCPolicy.Merge(dst, src)
}
func (m *GCPolicy) XXX_Size() int {
return m.Size()
}
func (m *GCPolicy) XXX_DiscardUnknown() {
xxx_messageInfo_GCPolicy.DiscardUnknown(m)
}
var xxx_messageInfo_GCPolicy proto.InternalMessageInfo
func (m *GCPolicy) GetAll() bool {
if m != nil {
@ -114,6 +166,7 @@ func (m *GCPolicy) GetFilters() []string {
func init() {
proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord")
proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.types.WorkerRecord.LabelsEntry")
proto.RegisterType((*GCPolicy)(nil), "moby.buildkit.v1.types.GCPolicy")
}
func (m *WorkerRecord) Marshal() (dAtA []byte, err error) {
@ -178,6 +231,9 @@ func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) {
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@ -231,6 +287,9 @@ func (m *GCPolicy) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], s)
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@ -244,6 +303,9 @@ func encodeVarintWorker(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *WorkerRecord) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ID)
@ -270,10 +332,16 @@ func (m *WorkerRecord) Size() (n int) {
n += 1 + l + sovWorker(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GCPolicy) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.All {
@ -291,6 +359,9 @@ func (m *GCPolicy) Size() (n int) {
n += 1 + l + sovWorker(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@ -557,6 +628,7 @@ func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@ -694,6 +766,7 @@ func (m *GCPolicy) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@ -808,9 +881,9 @@ var (
ErrIntOverflowWorker = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("worker.proto", fileDescriptorWorker) }
func init() { proto.RegisterFile("worker.proto", fileDescriptor_worker_1d0a62be5114ecbf) }
var fileDescriptorWorker = []byte{
var fileDescriptor_worker_1d0a62be5114ecbf = []byte{
// 355 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xea, 0x40,
0x14, 0x86, 0x6f, 0x5b, 0x2e, 0x97, 0x0e, 0xcd, 0x8d, 0x99, 0x18, 0xd3, 0x10, 0x83, 0x84, 0x15,

View File

@ -5,9 +5,12 @@ import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
"time"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/connhelper"
"github.com/moby/buildkit/util/appdefaults"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
@ -23,9 +26,8 @@ type ClientOpt interface{}
// New returns a new buildkit client. Address can be empty for the system-default address.
func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
gopts := []grpc.DialOption{
grpc.WithDialer(dialer),
}
gopts := []grpc.DialOption{}
needDialer := true
needWithInsecure := true
for _, o := range opts {
if _, ok := o.(*withFailFast); ok {
@ -44,6 +46,19 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())),
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)))
}
if wd, ok := o.(*withDialer); ok {
gopts = append(gopts, grpc.WithDialer(wd.dialer))
needDialer = false
}
}
if needDialer {
dialFn, err := resolveDialer(address)
if err != nil {
return nil, err
}
// TODO(AkihiroSuda): use WithContextDialer (requires grpc 1.19)
// https://github.com/grpc/grpc-go/commit/40cb5618f475e7b9d61aa7920ae4b04ef9bbaf89
gopts = append(gopts, grpc.WithDialer(dialFn))
}
if needWithInsecure {
gopts = append(gopts, grpc.WithInsecure())
@ -75,6 +90,14 @@ func WithFailFast() ClientOpt {
return &withFailFast{}
}
type withDialer struct {
dialer func(string, time.Duration) (net.Conn, error)
}
func WithDialer(df func(string, time.Duration) (net.Conn, error)) ClientOpt {
return &withDialer{dialer: df}
}
type withCredentials struct {
ServerName string
CACert string
@ -128,3 +151,19 @@ func WithTracer(t opentracing.Tracer) ClientOpt {
type withTracer struct {
tracer opentracing.Tracer
}
func resolveDialer(address string) (func(string, time.Duration) (net.Conn, error), error) {
ch, err := connhelper.GetConnectionHelper(address)
if err != nil {
return nil, err
}
if ch != nil {
f := func(a string, _ time.Duration) (net.Conn, error) {
ctx := context.Background()
return ch.ContextDialer(ctx, a)
}
return f, nil
}
// basic dialer
return dialer, nil
}

View File

@ -16,7 +16,7 @@ func dialer(address string, timeout time.Duration) (net.Conn, error) {
}
switch addrParts[0] {
case "npipe":
address = strings.Replace(addrParts[1], "/", "\\", 0)
address = strings.Replace(addrParts[1], "/", "\\", -1)
return winio.DialPipe(address, &timeout)
default:
return net.DialTimeout(addrParts[0], addrParts[1], timeout)

View File

@ -0,0 +1,37 @@
// Package connhelper provides helpers for connecting to a remote daemon host with custom logic.
package connhelper
import (
"context"
"net"
"net/url"
)
var helpers = map[string]func(*url.URL) (*ConnectionHelper, error){}
// ConnectionHelper allows to connect to a remote host with custom stream provider binary.
type ConnectionHelper struct {
// ContextDialer can be passed to grpc.WithContextDialer
ContextDialer func(ctx context.Context, addr string) (net.Conn, error)
}
// GetConnectionHelper returns BuildKit-specific connection helper for the given URL.
// GetConnectionHelper returns nil without error when no helper is registered for the scheme.
func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) {
u, err := url.Parse(daemonURL)
if err != nil {
return nil, err
}
fn, ok := helpers[u.Scheme]
if !ok {
return nil, nil
}
return fn(u)
}
// Register registers new connectionhelper for scheme
func Register(scheme string, fn func(*url.URL) (*ConnectionHelper, error)) {
helpers[scheme] = fn
}

View File

@ -3,6 +3,7 @@ package client
const (
ExporterImage = "image"
ExporterLocal = "local"
ExporterTar = "tar"
ExporterOCI = "oci"
ExporterDocker = "docker"
)

View File

@ -41,5 +41,6 @@ type SolveStatus struct {
}
type SolveResponse struct {
// ExporterResponse is also used for CacheExporter
ExporterResponse map[string]string
}

View File

@ -20,6 +20,7 @@ type Meta struct {
ProxyEnv *ProxyEnv
ExtraHosts []HostIP
Network pb.NetMode
Security pb.SecurityMode
}
func NewExecOp(root Output, meta Meta, readOnly bool, c Constraints) *ExecOp {
@ -52,7 +53,7 @@ type mount struct {
cacheID string
tmpfs bool
cacheSharing CacheMountSharingMode
// hasOutput bool
noOutput bool
}
type ExecOp struct {
@ -79,6 +80,8 @@ func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Outp
m.output = source
} else if m.tmpfs {
m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)}
} else if m.noOutput {
m.output = &output{vertex: e, err: errors.Errorf("mount marked no-output and %s can't be used as a parent", target)}
} else {
o := &output{vertex: e, getIndex: e.getMountIndexFn(m)}
if p := e.constraints.Platform; p != nil {
@ -166,13 +169,18 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
}
peo := &pb.ExecOp{
Meta: meta,
Network: e.meta.Network,
Meta: meta,
Network: e.meta.Network,
Security: e.meta.Security,
}
if e.meta.Network != NetModeSandbox {
addCap(&e.constraints, pb.CapExecMetaNetwork)
}
if e.meta.Security != SecurityModeInsecure {
addCap(&e.constraints, pb.CapExecMetaSecurity)
}
if p := e.meta.ProxyEnv; p != nil {
peo.Meta.ProxyEnv = &pb.ProxyEnv{
HttpProxy: p.HttpProxy,
@ -242,7 +250,7 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
}
outputIndex := pb.OutputIndex(-1)
if !m.readonly && m.cacheID == "" && !m.tmpfs {
if !m.noOutput && !m.readonly && m.cacheID == "" && !m.tmpfs {
outputIndex = pb.OutputIndex(outIndex)
outIndex++
}
@ -338,7 +346,7 @@ func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) {
i := 0
for _, m2 := range e.mounts {
if m2.readonly || m2.cacheID != "" {
if m2.noOutput || m2.readonly || m2.cacheID != "" {
continue
}
if m == m2 {
@ -379,6 +387,10 @@ func SourcePath(src string) MountOption {
}
}
func ForceNoOutput(m *mount) {
m.noOutput = true
}
func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption {
return func(m *mount) {
m.cacheID = id
@ -408,6 +420,12 @@ func Network(n pb.NetMode) RunOption {
})
}
func Security(s pb.SecurityMode) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = security(s)(ei.State)
})
}
func Shlex(str string) RunOption {
return Shlexf(str)
}
@ -623,3 +641,8 @@ const (
NetModeHost = pb.NetMode_HOST
NetModeNone = pb.NetMode_NONE
)
const (
SecurityModeInsecure = pb.SecurityMode_INSECURE
SecurityModeSandbox = pb.SecurityMode_SANDBOX
)

727
vendor/github.com/moby/buildkit/client/llb/fileop.go generated vendored Normal file
View File

@ -0,0 +1,727 @@
package llb
import (
_ "crypto/sha256"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/moby/buildkit/solver/pb"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// Examples:
// local := llb.Local(...)
// llb.Image().Dir("/abc").File(Mkdir("./foo").Mkfile("/abc/foo/bar", []byte("data")))
// llb.Image().File(Mkdir("/foo").Mkfile("/foo/bar", []byte("data")))
// llb.Image().File(Copy(local, "/foo", "/bar")).File(Copy(local, "/foo2", "/bar2"))
//
// a := Mkdir("./foo") // *FileAction /ced/foo
// b := Mkdir("./bar") // /abc/bar
// c := b.Copy(a.WithState(llb.Scratch().Dir("/ced")), "./foo", "./baz") // /abc/baz
// llb.Image().Dir("/abc").File(c)
//
// In future this can be extended to multiple outputs with:
// a := Mkdir("./foo")
// b, id := a.GetSelector()
// c := b.Mkdir("./bar")
// filestate = state.File(c)
// filestate.GetOutput(id).Exec()
func NewFileOp(s State, action *FileAction, c Constraints) *FileOp {
action = action.bind(s)
f := &FileOp{
action: action,
constraints: c,
}
f.output = &output{vertex: f, getIndex: func() (pb.OutputIndex, error) {
return pb.OutputIndex(0), nil
}}
return f
}
// CopyInput is either llb.State or *FileActionWithState
type CopyInput interface {
isFileOpCopyInput()
}
type subAction interface {
toProtoAction(string, pb.InputIndex) pb.IsFileAction
}
type FileAction struct {
state *State
prev *FileAction
action subAction
err error
}
func (fa *FileAction) Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction {
a := Mkdir(p, m, opt...)
a.prev = fa
return a
}
func (fa *FileAction) Mkfile(p string, m os.FileMode, dt []byte, opt ...MkfileOption) *FileAction {
a := Mkfile(p, m, dt, opt...)
a.prev = fa
return a
}
func (fa *FileAction) Rm(p string, opt ...RmOption) *FileAction {
a := Rm(p, opt...)
a.prev = fa
return a
}
func (fa *FileAction) Copy(input CopyInput, src, dest string, opt ...CopyOption) *FileAction {
a := Copy(input, src, dest, opt...)
a.prev = fa
return a
}
func (fa *FileAction) allOutputs(m map[Output]struct{}) {
if fa == nil {
return
}
if fa.state != nil && fa.state.Output() != nil {
m[fa.state.Output()] = struct{}{}
}
if a, ok := fa.action.(*fileActionCopy); ok {
if a.state != nil {
if out := a.state.Output(); out != nil {
m[out] = struct{}{}
}
} else if a.fas != nil {
a.fas.allOutputs(m)
}
}
fa.prev.allOutputs(m)
}
func (fa *FileAction) bind(s State) *FileAction {
if fa == nil {
return nil
}
fa2 := *fa
fa2.prev = fa.prev.bind(s)
fa2.state = &s
return &fa2
}
func (fa *FileAction) WithState(s State) CopyInput {
return &fileActionWithState{FileAction: fa.bind(s)}
}
type fileActionWithState struct {
*FileAction
}
func (fas *fileActionWithState) isFileOpCopyInput() {}
func Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction {
var mi MkdirInfo
for _, o := range opt {
o.SetMkdirOption(&mi)
}
return &FileAction{
action: &fileActionMkdir{
file: p,
mode: m,
info: mi,
},
}
}
type fileActionMkdir struct {
file string
mode os.FileMode
info MkdirInfo
}
func (a *fileActionMkdir) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction {
return &pb.FileAction_Mkdir{
Mkdir: &pb.FileActionMkDir{
Path: normalizePath(parent, a.file, false),
Mode: int32(a.mode & 0777),
MakeParents: a.info.MakeParents,
Owner: a.info.ChownOpt.marshal(base),
Timestamp: marshalTime(a.info.CreatedTime),
},
}
}
type MkdirOption interface {
SetMkdirOption(*MkdirInfo)
}
type ChownOption interface {
MkdirOption
MkfileOption
CopyOption
}
type mkdirOptionFunc func(*MkdirInfo)
func (fn mkdirOptionFunc) SetMkdirOption(mi *MkdirInfo) {
fn(mi)
}
var _ MkdirOption = &MkdirInfo{}
func WithParents(b bool) MkdirOption {
return mkdirOptionFunc(func(mi *MkdirInfo) {
mi.MakeParents = b
})
}
type MkdirInfo struct {
MakeParents bool
ChownOpt *ChownOpt
CreatedTime *time.Time
}
func (mi *MkdirInfo) SetMkdirOption(mi2 *MkdirInfo) {
*mi2 = *mi
}
func WithUser(name string) ChownOption {
opt := ChownOpt{}
parts := strings.SplitN(name, ":", 2)
for i, v := range parts {
switch i {
case 0:
uid, err := parseUID(v)
if err != nil {
opt.User = &UserOpt{Name: v}
} else {
opt.User = &UserOpt{UID: uid}
}
case 1:
gid, err := parseUID(v)
if err != nil {
opt.Group = &UserOpt{Name: v}
} else {
opt.Group = &UserOpt{UID: gid}
}
}
}
return opt
}
func parseUID(str string) (int, error) {
if str == "root" {
return 0, nil
}
uid, err := strconv.ParseInt(str, 10, 32)
if err != nil {
return 0, err
}
return int(uid), nil
}
func WithUIDGID(uid, gid int) ChownOption {
return ChownOpt{
User: &UserOpt{UID: uid},
Group: &UserOpt{UID: gid},
}
}
type ChownOpt struct {
User *UserOpt
Group *UserOpt
}
func (co ChownOpt) SetMkdirOption(mi *MkdirInfo) {
mi.ChownOpt = &co
}
func (co ChownOpt) SetMkfileOption(mi *MkfileInfo) {
mi.ChownOpt = &co
}
func (co ChownOpt) SetCopyOption(mi *CopyInfo) {
mi.ChownOpt = &co
}
func (cp *ChownOpt) marshal(base pb.InputIndex) *pb.ChownOpt {
if cp == nil {
return nil
}
return &pb.ChownOpt{
User: cp.User.marshal(base),
Group: cp.Group.marshal(base),
}
}
type UserOpt struct {
UID int
Name string
}
func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt {
if up == nil {
return nil
}
if up.Name != "" {
return &pb.UserOpt{User: &pb.UserOpt_ByName{ByName: &pb.NamedUserOpt{
Name: up.Name, Input: base}}}
}
return &pb.UserOpt{User: &pb.UserOpt_ByID{ByID: uint32(up.UID)}}
}
func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction {
var mi MkfileInfo
for _, o := range opts {
o.SetMkfileOption(&mi)
}
return &FileAction{
action: &fileActionMkfile{
file: p,
mode: m,
dt: dt,
info: mi,
},
}
}
type MkfileOption interface {
SetMkfileOption(*MkfileInfo)
}
type MkfileInfo struct {
ChownOpt *ChownOpt
CreatedTime *time.Time
}
func (mi *MkfileInfo) SetMkfileOption(mi2 *MkfileInfo) {
*mi2 = *mi
}
var _ MkfileOption = &MkfileInfo{}
type fileActionMkfile struct {
file string
mode os.FileMode
dt []byte
info MkfileInfo
}
func (a *fileActionMkfile) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction {
return &pb.FileAction_Mkfile{
Mkfile: &pb.FileActionMkFile{
Path: normalizePath(parent, a.file, false),
Mode: int32(a.mode & 0777),
Data: a.dt,
Owner: a.info.ChownOpt.marshal(base),
Timestamp: marshalTime(a.info.CreatedTime),
},
}
}
func Rm(p string, opts ...RmOption) *FileAction {
var mi RmInfo
for _, o := range opts {
o.SetRmOption(&mi)
}
return &FileAction{
action: &fileActionRm{
file: p,
info: mi,
},
}
}
type RmOption interface {
SetRmOption(*RmInfo)
}
type rmOptionFunc func(*RmInfo)
func (fn rmOptionFunc) SetRmOption(mi *RmInfo) {
fn(mi)
}
type RmInfo struct {
AllowNotFound bool
AllowWildcard bool
}
func (mi *RmInfo) SetRmOption(mi2 *RmInfo) {
*mi2 = *mi
}
var _ RmOption = &RmInfo{}
func WithAllowNotFound(b bool) RmOption {
return rmOptionFunc(func(mi *RmInfo) {
mi.AllowNotFound = b
})
}
func WithAllowWildcard(b bool) RmOption {
return rmOptionFunc(func(mi *RmInfo) {
mi.AllowWildcard = b
})
}
type fileActionRm struct {
file string
info RmInfo
}
func (a *fileActionRm) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction {
return &pb.FileAction_Rm{
Rm: &pb.FileActionRm{
Path: normalizePath(parent, a.file, false),
AllowNotFound: a.info.AllowNotFound,
AllowWildcard: a.info.AllowWildcard,
},
}
}
func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction {
var state *State
var fas *fileActionWithState
var err error
if st, ok := input.(State); ok {
state = &st
} else if v, ok := input.(*fileActionWithState); ok {
fas = v
} else {
err = errors.Errorf("invalid input type %T for copy", input)
}
var mi CopyInfo
for _, o := range opts {
o.SetCopyOption(&mi)
}
return &FileAction{
action: &fileActionCopy{
state: state,
fas: fas,
src: src,
dest: dest,
info: mi,
},
err: err,
}
}
type CopyOption interface {
SetCopyOption(*CopyInfo)
}
type CopyInfo struct {
Mode *os.FileMode
FollowSymlinks bool
CopyDirContentsOnly bool
AttemptUnpack bool
CreateDestPath bool
AllowWildcard bool
AllowEmptyWildcard bool
ChownOpt *ChownOpt
CreatedTime *time.Time
}
func (mi *CopyInfo) SetCopyOption(mi2 *CopyInfo) {
*mi2 = *mi
}
var _ CopyOption = &CopyInfo{}
type fileActionCopy struct {
state *State
fas *fileActionWithState
src string
dest string
info CopyInfo
}
func (a *fileActionCopy) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction {
c := &pb.FileActionCopy{
Src: a.sourcePath(),
Dest: normalizePath(parent, a.dest, true),
Owner: a.info.ChownOpt.marshal(base),
AllowWildcard: a.info.AllowWildcard,
AllowEmptyWildcard: a.info.AllowEmptyWildcard,
FollowSymlink: a.info.FollowSymlinks,
DirCopyContents: a.info.CopyDirContentsOnly,
AttemptUnpackDockerCompatibility: a.info.AttemptUnpack,
CreateDestPath: a.info.CreateDestPath,
Timestamp: marshalTime(a.info.CreatedTime),
}
if a.info.Mode != nil {
c.Mode = int32(*a.info.Mode)
} else {
c.Mode = -1
}
return &pb.FileAction_Copy{
Copy: c,
}
}
func (c *fileActionCopy) sourcePath() string {
p := path.Clean(c.src)
if !path.IsAbs(p) {
if c.state != nil {
p = path.Join("/", c.state.GetDir(), p)
} else if c.fas != nil {
p = path.Join("/", c.fas.state.GetDir(), p)
}
}
return p
}
type CreatedTime time.Time
func WithCreatedTime(t time.Time) CreatedTime {
return CreatedTime(t)
}
func (c CreatedTime) SetMkdirOption(mi *MkdirInfo) {
mi.CreatedTime = (*time.Time)(&c)
}
func (c CreatedTime) SetMkfileOption(mi *MkfileInfo) {
mi.CreatedTime = (*time.Time)(&c)
}
func (c CreatedTime) SetCopyOption(mi *CopyInfo) {
mi.CreatedTime = (*time.Time)(&c)
}
func marshalTime(t *time.Time) int64 {
if t == nil {
return -1
}
return t.UnixNano()
}
type FileOp struct {
MarshalCache
action *FileAction
output Output
constraints Constraints
isValidated bool
}
func (f *FileOp) Validate() error {
if f.isValidated {
return nil
}
if f.action == nil {
return errors.Errorf("action is required")
}
f.isValidated = true
return nil
}
type marshalState struct {
visited map[*FileAction]*fileActionState
inputs []*pb.Input
actions []*fileActionState
}
func newMarshalState() *marshalState {
return &marshalState{
visited: map[*FileAction]*fileActionState{},
}
}
type fileActionState struct {
base pb.InputIndex
input pb.InputIndex
inputRelative *int
input2 pb.InputIndex
input2Relative *int
target int
action subAction
fa *FileAction
}
func (ms *marshalState) addInput(st *fileActionState, c *Constraints, o Output) (pb.InputIndex, error) {
inp, err := o.ToInput(c)
if err != nil {
return 0, err
}
for i, inp2 := range ms.inputs {
if *inp == *inp2 {
return pb.InputIndex(i), nil
}
}
i := pb.InputIndex(len(ms.inputs))
ms.inputs = append(ms.inputs, inp)
return i, nil
}
func (ms *marshalState) add(fa *FileAction, c *Constraints) (*fileActionState, error) {
if st, ok := ms.visited[fa]; ok {
return st, nil
}
if fa.err != nil {
return nil, fa.err
}
var prevState *fileActionState
if parent := fa.prev; parent != nil {
var err error
prevState, err = ms.add(parent, c)
if err != nil {
return nil, err
}
}
st := &fileActionState{
action: fa.action,
input: -1,
input2: -1,
base: -1,
fa: fa,
}
if source := fa.state.Output(); source != nil {
inp, err := ms.addInput(st, c, source)
if err != nil {
return nil, err
}
st.base = inp
}
if fa.prev == nil {
st.input = st.base
} else {
st.inputRelative = &prevState.target
}
if a, ok := fa.action.(*fileActionCopy); ok {
if a.state != nil {
if out := a.state.Output(); out != nil {
inp, err := ms.addInput(st, c, out)
if err != nil {
return nil, err
}
st.input2 = inp
}
} else if a.fas != nil {
src, err := ms.add(a.fas.FileAction, c)
if err != nil {
return nil, err
}
st.input2Relative = &src.target
} else {
return nil, errors.Errorf("invalid empty source for copy")
}
}
st.target = len(ms.actions)
ms.visited[fa] = st
ms.actions = append(ms.actions, st)
return st, nil
}
func (f *FileOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
if f.Cached(c) {
return f.Load()
}
if err := f.Validate(); err != nil {
return "", nil, nil, err
}
addCap(&f.constraints, pb.CapFileBase)
pfo := &pb.FileOp{}
pop, md := MarshalConstraints(c, &f.constraints)
pop.Op = &pb.Op_File{
File: pfo,
}
state := newMarshalState()
_, err := state.add(f.action, c)
if err != nil {
return "", nil, nil, err
}
pop.Inputs = state.inputs
for i, st := range state.actions {
output := pb.OutputIndex(-1)
if i+1 == len(state.actions) {
output = 0
}
var parent string
if st.fa.state != nil {
parent = st.fa.state.GetDir()
}
pfo.Actions = append(pfo.Actions, &pb.FileAction{
Input: getIndex(st.input, len(state.inputs), st.inputRelative),
SecondaryInput: getIndex(st.input2, len(state.inputs), st.input2Relative),
Output: output,
Action: st.action.toProtoAction(parent, st.base),
})
}
dt, err := pop.Marshal()
if err != nil {
return "", nil, nil, err
}
f.Store(dt, md, c)
return f.Load()
}
func normalizePath(parent, p string, keepSlash bool) string {
origPath := p
p = path.Clean(p)
if !path.IsAbs(p) {
p = path.Join("/", parent, p)
}
if keepSlash {
if strings.HasSuffix(origPath, "/") && !strings.HasSuffix(p, "/") {
p += "/"
} else if strings.HasSuffix(origPath, "/.") {
if p != "/" {
p += "/"
}
p += "."
}
}
return p
}
func (f *FileOp) Output() Output {
return f.output
}
func (f *FileOp) Inputs() (inputs []Output) {
mm := map[Output]struct{}{}
f.action.allOutputs(mm)
for o := range mm {
inputs = append(inputs, o)
}
return inputs
}
func getIndex(input pb.InputIndex, len int, relative *int) pb.InputIndex {
if relative != nil {
return pb.InputIndex(len + *relative)
}
return input
}

View File

@ -21,6 +21,7 @@ var (
keyExtraHost = contextKeyT("llb.exec.extrahost")
keyPlatform = contextKeyT("llb.platform")
keyNetwork = contextKeyT("llb.network")
keySecurity = contextKeyT("llb.security")
)
func addEnvf(key, value string, v ...interface{}) StateOption {
@ -148,7 +149,6 @@ func network(v pb.NetMode) StateOption {
return s.WithValue(keyNetwork, v)
}
}
func getNetwork(s State) pb.NetMode {
v := s.Value(keyNetwork)
if v != nil {
@ -158,6 +158,20 @@ func getNetwork(s State) pb.NetMode {
return NetModeSandbox
}
func security(v pb.SecurityMode) StateOption {
return func(s State) State {
return s.WithValue(keySecurity, v)
}
}
func getSecurity(s State) pb.SecurityMode {
v := s.Value(keySecurity)
if v != nil {
n := v.(pb.SecurityMode)
return n
}
return SecurityModeSandbox
}
type EnvList []KeyValue
type KeyValue struct {

View File

@ -126,30 +126,11 @@ func Image(ref string, opts ...ImageOption) State {
if err != nil {
src.err = err
} else {
var img struct {
Config struct {
Env []string `json:"Env,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty"`
User string `json:"User,omitempty"`
} `json:"config,omitempty"`
}
if err := json.Unmarshal(dt, &img); err != nil {
src.err = err
} else {
st := NewState(src.Output())
for _, env := range img.Config.Env {
parts := strings.SplitN(env, "=", 2)
if len(parts[0]) > 0 {
var v string
if len(parts) > 1 {
v = parts[1]
}
st = st.AddEnv(parts[0], v)
}
}
st = st.Dir(img.Config.WorkingDir)
st, err := NewState(src.Output()).WithImageConfig(dt)
if err == nil {
return st
}
src.err = err
}
}
return NewState(src.Output())

View File

@ -2,8 +2,10 @@ package llb
import (
"context"
"encoding/json"
"fmt"
"net"
"strings"
"github.com/containerd/containerd/platforms"
"github.com/moby/buildkit/identity"
@ -171,6 +173,31 @@ func (s State) WithOutput(o Output) State {
return s
}
func (s State) WithImageConfig(c []byte) (State, error) {
var img struct {
Config struct {
Env []string `json:"Env,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty"`
User string `json:"User,omitempty"`
} `json:"config,omitempty"`
}
if err := json.Unmarshal(c, &img); err != nil {
return State{}, err
}
for _, env := range img.Config.Env {
parts := strings.SplitN(env, "=", 2)
if len(parts[0]) > 0 {
var v string
if len(parts) > 1 {
v = parts[1]
}
s = s.AddEnv(parts[0], v)
}
}
s = s.Dir(img.Config.WorkingDir)
return s, nil
}
func (s State) Run(ro ...RunOption) ExecState {
ei := &ExecInfo{State: s}
if p := s.GetPlatform(); p != nil {
@ -187,6 +214,7 @@ func (s State) Run(ro ...RunOption) ExecState {
ProxyEnv: ei.ProxyEnv,
ExtraHosts: getExtraHosts(ei.State),
Network: getNetwork(ei.State),
Security: getSecurity(ei.State),
}
exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Constraints)
@ -202,6 +230,15 @@ func (s State) Run(ro ...RunOption) ExecState {
}
}
func (s State) File(a *FileAction, opts ...ConstraintsOpt) State {
var c Constraints
for _, o := range opts {
o.SetConstraintsOption(&c)
}
return s.WithOutput(NewFileOp(s, a, c).Output())
}
func (s State) AddEnv(key, value string) State {
return s.AddEnvf(key, value)
}
@ -256,6 +293,13 @@ func (s State) Network(n pb.NetMode) State {
func (s State) GetNetwork() pb.NetMode {
return getNetwork(s)
}
func (s State) Security(n pb.SecurityMode) State {
return security(n)(s)
}
func (s State) GetSecurity() pb.SecurityMode {
return getSecurity(s)
}
func (s State) With(so ...StateOption) State {
for _, o := range so {
@ -268,6 +312,8 @@ func (s State) AddExtraHost(host string, ip net.IP) State {
return extraHost(host, ip)(s)
}
func (s State) isFileOpCopyInput() {}
type output struct {
vertex Vertex
getIndex func() (pb.OutputIndex, error)
@ -378,12 +424,16 @@ func WithDescription(m map[string]string) ConstraintsOpt {
})
}
func WithCustomName(name string, a ...interface{}) ConstraintsOpt {
func WithCustomName(name string) ConstraintsOpt {
return WithDescription(map[string]string{
"llb.customname": fmt.Sprintf(name, a...),
"llb.customname": name,
})
}
func WithCustomNamef(name string, a ...interface{}) ConstraintsOpt {
return WithCustomName(fmt.Sprintf(name, a...))
}
// WithExportCache forces results for this vertex to be exported with the cache
func WithExportCache() ConstraintsOpt {
return constraintsOptFunc(func(c *Constraints) {

View File

@ -0,0 +1,113 @@
package ociindex
import (
"encoding/json"
"io/ioutil"
"os"
"github.com/gofrs/flock"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
const (
// IndexJSONLockFileSuffix is the suffix of the lock file
IndexJSONLockFileSuffix = ".lock"
)
// PutDescToIndex puts desc to index with tag.
// Existing manifests with the same tag will be removed from the index.
func PutDescToIndex(index *v1.Index, desc v1.Descriptor, tag string) error {
if index == nil {
index = &v1.Index{}
}
if index.SchemaVersion == 0 {
index.SchemaVersion = 2
}
if tag != "" {
if desc.Annotations == nil {
desc.Annotations = make(map[string]string)
}
desc.Annotations[v1.AnnotationRefName] = tag
// remove existing manifests with the same tag
var manifests []v1.Descriptor
for _, m := range index.Manifests {
if m.Annotations[v1.AnnotationRefName] != tag {
manifests = append(manifests, m)
}
}
index.Manifests = manifests
}
index.Manifests = append(index.Manifests, desc)
return nil
}
func PutDescToIndexJSONFileLocked(indexJSONPath string, desc v1.Descriptor, tag string) error {
lockPath := indexJSONPath + IndexJSONLockFileSuffix
lock := flock.New(lockPath)
locked, err := lock.TryLock()
if err != nil {
return errors.Wrapf(err, "could not lock %s", lockPath)
}
if !locked {
return errors.Errorf("could not lock %s", lockPath)
}
defer func() {
lock.Unlock()
os.RemoveAll(lockPath)
}()
f, err := os.OpenFile(indexJSONPath, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return errors.Wrapf(err, "could not open %s", indexJSONPath)
}
defer f.Close()
var idx v1.Index
b, err := ioutil.ReadAll(f)
if err != nil {
return errors.Wrapf(err, "could not read %s", indexJSONPath)
}
if len(b) > 0 {
if err := json.Unmarshal(b, &idx); err != nil {
return errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b))
}
}
if err = PutDescToIndex(&idx, desc, tag); err != nil {
return err
}
b, err = json.Marshal(idx)
if err != nil {
return err
}
if _, err = f.WriteAt(b, 0); err != nil {
return err
}
if err = f.Truncate(int64(len(b))); err != nil {
return err
}
return nil
}
func ReadIndexJSONFileLocked(indexJSONPath string) (*v1.Index, error) {
lockPath := indexJSONPath + IndexJSONLockFileSuffix
lock := flock.New(lockPath)
locked, err := lock.TryRLock()
if err != nil {
return nil, errors.Wrapf(err, "could not lock %s", lockPath)
}
if !locked {
return nil, errors.Errorf("could not lock %s", lockPath)
}
defer func() {
lock.Unlock()
os.RemoveAll(lockPath)
}()
b, err := ioutil.ReadFile(indexJSONPath)
if err != nil {
return nil, errors.Wrapf(err, "could not read %s", indexJSONPath)
}
var idx v1.Index
if err := json.Unmarshal(b, &idx); err != nil {
return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b))
}
return &idx, nil
}

View File

@ -2,20 +2,26 @@ package client
import (
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/containerd/containerd/content"
contentlocal "github.com/containerd/containerd/content/local"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/ociindex"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
sessioncontent "github.com/moby/buildkit/session/content"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/entitlements"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -24,21 +30,29 @@ import (
)
type SolveOpt struct {
Exporter string
ExporterAttrs map[string]string
ExporterOutput io.WriteCloser // for ExporterOCI and ExporterDocker
ExporterOutputDir string // for ExporterLocal
Exports []ExportEntry
LocalDirs map[string]string
SharedKey string
Frontend string
FrontendAttrs map[string]string
ExportCache string
ExportCacheAttrs map[string]string
ImportCache []string
CacheExports []CacheOptionsEntry
CacheImports []CacheOptionsEntry
Session []session.Attachable
AllowedEntitlements []entitlements.Entitlement
}
type ExportEntry struct {
Type string
Attrs map[string]string
Output io.WriteCloser // for ExporterOCI and ExporterDocker
OutputDir string // for ExporterLocal
}
type CacheOptionsEntry struct {
Type string
Attrs map[string]string
}
// Solve calls Solve on the controller.
// def must be nil if (and only if) opt.Frontend is set.
func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) {
@ -93,32 +107,51 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
s.Allow(a)
}
switch opt.Exporter {
var ex ExportEntry
if len(opt.Exports) > 1 {
return nil, errors.New("currently only single Exports can be specified")
}
if len(opt.Exports) == 1 {
ex = opt.Exports[0]
}
switch ex.Type {
case ExporterLocal:
if opt.ExporterOutput != nil {
if ex.Output != nil {
return nil, errors.New("output file writer is not supported by local exporter")
}
if opt.ExporterOutputDir == "" {
if ex.OutputDir == "" {
return nil, errors.New("output directory is required for local exporter")
}
s.Allow(filesync.NewFSSyncTargetDir(opt.ExporterOutputDir))
case ExporterOCI, ExporterDocker:
if opt.ExporterOutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
case ExporterOCI, ExporterDocker, ExporterTar:
if ex.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
}
if opt.ExporterOutput == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", opt.Exporter)
if ex.Output == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
}
s.Allow(filesync.NewFSSyncTarget(opt.ExporterOutput))
s.Allow(filesync.NewFSSyncTarget(ex.Output))
default:
if opt.ExporterOutput != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", opt.Exporter)
if ex.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
}
if opt.ExporterOutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", opt.ExporterOutputDir, opt.Exporter)
if ex.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
}
}
cacheOpt, err := parseCacheOptions(opt)
if err != nil {
return nil, err
}
if len(cacheOpt.contentStores) > 0 {
s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores))
}
for k, v := range cacheOpt.frontendAttrs {
opt.FrontendAttrs[k] = v
}
eg.Go(func() error {
return s.Run(statusContext, grpchijack.Dialer(c.controlClient()))
})
@ -144,17 +177,13 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
Ref: ref,
Definition: pbd,
Exporter: opt.Exporter,
ExporterAttrs: opt.ExporterAttrs,
Exporter: ex.Type,
ExporterAttrs: ex.Attrs,
Session: s.ID(),
Frontend: opt.Frontend,
FrontendAttrs: opt.FrontendAttrs,
Cache: controlapi.CacheOptions{
ExportRef: opt.ExportCache,
ImportRefs: opt.ImportCache,
ExportAttrs: opt.ExportCacheAttrs,
},
Entitlements: opt.AllowedEntitlements,
Cache: cacheOpt.options,
Entitlements: opt.AllowedEntitlements,
})
if err != nil {
return errors.Wrap(err, "failed to solve")
@ -243,6 +272,19 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
if err := eg.Wait(); err != nil {
return nil, err
}
// Update index.json of exported cache content store
// FIXME(AkihiroSuda): dedupe const definition of cache/remotecache.ExporterResponseManifestDesc = "cache.manifest"
if manifestDescJSON := res.ExporterResponse["cache.manifest"]; manifestDescJSON != "" {
var manifestDesc ocispec.Descriptor
if err = json.Unmarshal([]byte(manifestDescJSON), &manifestDesc); err != nil {
return nil, err
}
for indexJSONPath, tag := range cacheOpt.indicesToUpdate {
if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil {
return nil, err
}
}
}
return res, nil
}
@ -256,7 +298,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
return nil, errors.Errorf("%s not a directory", d)
}
}
resetUIDAndGID := func(st *fstypes.Stat) bool {
resetUIDAndGID := func(p string, st *fstypes.Stat) bool {
st.Uid = 0
st.Gid = 0
return true
@ -295,3 +337,128 @@ func defaultSessionName() string {
}
return filepath.Base(wd)
}
type cacheOptions struct {
options controlapi.CacheOptions
contentStores map[string]content.Store // key: ID of content store ("local:" + csDir)
indicesToUpdate map[string]string // key: index.JSON file name, value: tag
frontendAttrs map[string]string
}
func parseCacheOptions(opt SolveOpt) (*cacheOptions, error) {
var (
cacheExports []*controlapi.CacheOptionsEntry
cacheImports []*controlapi.CacheOptionsEntry
// legacy API is used for registry caches, because the daemon might not support the new API
legacyExportRef string
legacyImportRefs []string
)
contentStores := make(map[string]content.Store)
indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag
frontendAttrs := make(map[string]string)
legacyExportAttrs := make(map[string]string)
for _, ex := range opt.CacheExports {
if ex.Type == "local" {
csDir := ex.Attrs["dest"]
if csDir == "" {
return nil, errors.New("local cache exporter requires dest")
}
if err := os.MkdirAll(csDir, 0755); err != nil {
return nil, err
}
cs, err := contentlocal.NewStore(csDir)
if err != nil {
return nil, err
}
contentStores["local:"+csDir] = cs
// TODO(AkihiroSuda): support custom index JSON path and tag
indexJSONPath := filepath.Join(csDir, "index.json")
indicesToUpdate[indexJSONPath] = "latest"
}
if ex.Type == "registry" && legacyExportRef == "" {
legacyExportRef = ex.Attrs["ref"]
for k, v := range ex.Attrs {
if k != "ref" {
legacyExportAttrs[k] = v
}
}
} else {
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
Type: ex.Type,
Attrs: ex.Attrs,
})
}
}
for _, im := range opt.CacheImports {
attrs := im.Attrs
if im.Type == "local" {
csDir := im.Attrs["src"]
if csDir == "" {
return nil, errors.New("local cache importer requires src")
}
if err := os.MkdirAll(csDir, 0755); err != nil {
return nil, err
}
cs, err := contentlocal.NewStore(csDir)
if err != nil {
return nil, err
}
contentStores["local:"+csDir] = cs
// if digest is not specified, load from "latest" tag
if attrs["digest"] == "" {
idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json"))
if err != nil {
return nil, err
}
for _, m := range idx.Manifests {
if m.Annotations[ocispec.AnnotationRefName] == "latest" {
attrs["digest"] = string(m.Digest)
break
}
}
if attrs["digest"] == "" {
return nil, errors.New("local cache importer requires either explicit digest or \"latest\" tag on index.json")
}
}
}
if im.Type == "registry" {
legacyImportRef := attrs["ref"]
legacyImportRefs = append(legacyImportRefs, legacyImportRef)
} else {
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
Type: im.Type,
Attrs: attrs,
})
}
}
if opt.Frontend != "" {
// use legacy API for registry importers, because the frontend might not support the new API
if len(legacyImportRefs) > 0 {
frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",")
}
// use new API for other importers
if len(cacheImports) > 0 {
s, err := json.Marshal(cacheImports)
if err != nil {
return nil, err
}
frontendAttrs["cache-imports"] = string(s)
}
}
res := cacheOptions{
options: controlapi.CacheOptions{
// old API (for registry caches, planned to be removed in early 2019)
ExportRefDeprecated: legacyExportRef,
ExportAttrsDeprecated: legacyExportAttrs,
ImportRefsDeprecated: legacyImportRefs,
// new API
Exports: cacheExports,
Imports: cacheImports,
},
contentStores: contentStores,
indicesToUpdate: indicesToUpdate,
frontendAttrs: frontendAttrs,
}
return &res, nil
}

View File

@ -43,10 +43,15 @@ type StatRequest struct {
// SolveRequest is same as frontend.SolveRequest but avoiding dependency
type SolveRequest struct {
Definition *pb.Definition
Frontend string
FrontendOpt map[string]string
ImportCacheRefs []string
Definition *pb.Definition
Frontend string
FrontendOpt map[string]string
CacheImports []CacheOptionsEntry
}
type CacheOptionsEntry struct {
Type string
Attrs map[string]string
}
type WorkerInfo struct {

View File

@ -259,13 +259,33 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (*clie
}
}
}
var (
// old API
legacyRegistryCacheImports []string
// new API (CapImportCaches)
cacheImports []*pb.CacheOptionsEntry
)
supportCapImportCaches := c.caps.Supports(pb.CapImportCaches) == nil
for _, im := range creq.CacheImports {
if !supportCapImportCaches && im.Type == "registry" {
legacyRegistryCacheImports = append(legacyRegistryCacheImports, im.Attrs["ref"])
} else {
cacheImports = append(cacheImports, &pb.CacheOptionsEntry{
Type: im.Type,
Attrs: im.Attrs,
})
}
}
req := &pb.SolveRequest{
Definition: creq.Definition,
Frontend: creq.Frontend,
FrontendOpt: creq.FrontendOpt,
ImportCacheRefs: creq.ImportCacheRefs,
AllowResultReturn: true,
// old API
ImportCacheRefsDeprecated: legacyRegistryCacheImports,
// new API
CacheImports: cacheImports,
}
// backwards compatibility with inline return
@ -356,6 +376,9 @@ func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byt
}
func (r *reference) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) {
if err := r.c.caps.Supports(pb.CapReadDir); err != nil {
return nil, err
}
rdr := &pb.ReadDirRequest{
DirPath: req.Path,
IncludePattern: req.IncludePattern,
@ -369,6 +392,9 @@ func (r *reference) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*
}
func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) {
if err := r.c.caps.Supports(pb.CapStatFile); err != nil {
return nil, err
}
rdr := &pb.StatFileRequest{
Path: req.Path,
Ref: r.id,

View File

@ -18,6 +18,7 @@ const (
CapReturnMap apicaps.CapID = "returnmap"
CapReadDir apicaps.CapID = "readdir"
CapStatFile apicaps.CapID = "statfile"
CapImportCaches apicaps.CapID = "importcaches"
)
func init() {
@ -84,4 +85,11 @@ func init() {
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapImportCaches,
Name: "import caches",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

File diff suppressed because it is too large Load Diff

View File

@ -64,12 +64,25 @@ message SolveRequest {
pb.Definition Definition = 1;
string Frontend = 2;
map<string, string> FrontendOpt = 3;
repeated string ImportCacheRefs = 4;
// ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0.
// When ImportCacheRefsDeprecated is set, the solver appends
// {.Type = "registry", .Attrs = {"ref": importCacheRef}}
// for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed)
repeated string ImportCacheRefsDeprecated = 4;
bool allowResultReturn = 5;
// apicaps.CapSolveInlineReturn deprecated
bool Final = 10;
bytes ExporterAttr = 11;
// CacheImports was added in BuildKit v0.4.0.
// apicaps:CapImportCaches
repeated CacheOptionsEntry CacheImports = 12;
}
// CacheOptionsEntry corresponds to the control.CacheOptionsEntry
message CacheOptionsEntry {
string Type = 1;
map<string, string> Attrs = 2;
}
message SolveResponse {
@ -122,4 +135,4 @@ message PongResponse{
repeated moby.buildkit.v1.apicaps.APICap FrontendAPICaps = 1 [(gogoproto.nullable) = false];
repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false];
repeated moby.buildkit.v1.types.WorkerRecord Workers = 3;
}
}

View File

@ -1,16 +1,6 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: auth.proto
/*
Package auth is a generated protocol buffer package.
It is generated from these files:
auth.proto
It has these top-level messages:
CredentialsRequest
CredentialsResponse
*/
package auth
import proto "github.com/gogo/protobuf/proto"
@ -20,8 +10,10 @@ import math "math"
import strings "strings"
import reflect "reflect"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
import io "io"
@ -40,9 +32,37 @@ type CredentialsRequest struct {
Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"`
}
func (m *CredentialsRequest) Reset() { *m = CredentialsRequest{} }
func (*CredentialsRequest) ProtoMessage() {}
func (*CredentialsRequest) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
func (m *CredentialsRequest) Reset() { *m = CredentialsRequest{} }
func (*CredentialsRequest) ProtoMessage() {}
func (*CredentialsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_0215b2f0213c0d57, []int{0}
}
func (m *CredentialsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CredentialsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CredentialsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CredentialsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CredentialsRequest.Merge(dst, src)
}
func (m *CredentialsRequest) XXX_Size() int {
return m.Size()
}
func (m *CredentialsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CredentialsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CredentialsRequest proto.InternalMessageInfo
func (m *CredentialsRequest) GetHost() string {
if m != nil {
@ -56,9 +76,37 @@ type CredentialsResponse struct {
Secret string `protobuf:"bytes,2,opt,name=Secret,proto3" json:"Secret,omitempty"`
}
func (m *CredentialsResponse) Reset() { *m = CredentialsResponse{} }
func (*CredentialsResponse) ProtoMessage() {}
func (*CredentialsResponse) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
func (m *CredentialsResponse) Reset() { *m = CredentialsResponse{} }
func (*CredentialsResponse) ProtoMessage() {}
func (*CredentialsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_0215b2f0213c0d57, []int{1}
}
func (m *CredentialsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CredentialsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CredentialsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CredentialsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CredentialsResponse.Merge(dst, src)
}
func (m *CredentialsResponse) XXX_Size() int {
return m.Size()
}
func (m *CredentialsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CredentialsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CredentialsResponse proto.InternalMessageInfo
func (m *CredentialsResponse) GetUsername() string {
if m != nil {
@ -167,8 +215,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Auth service
// AuthClient is the client API for Auth service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type AuthClient interface {
Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error)
}
@ -183,15 +232,14 @@ func NewAuthClient(cc *grpc.ClientConn) AuthClient {
func (c *authClient) Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) {
out := new(CredentialsResponse)
err := grpc.Invoke(ctx, "/moby.filesync.v1.Auth/Credentials", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/moby.filesync.v1.Auth/Credentials", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Auth service
// AuthServer is the server API for Auth service.
type AuthServer interface {
Credentials(context.Context, *CredentialsRequest) (*CredentialsResponse, error)
}
@ -295,6 +343,9 @@ func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *CredentialsRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Host)
@ -305,6 +356,9 @@ func (m *CredentialsRequest) Size() (n int) {
}
func (m *CredentialsResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Username)
@ -652,10 +706,10 @@ var (
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
func init() { proto.RegisterFile("auth.proto", fileDescriptor_auth_0215b2f0213c0d57) }
var fileDescriptorAuth = []byte{
// 224 bytes of a gzipped FileDescriptorProto
var fileDescriptor_auth_0215b2f0213c0d57 = []byte{
// 233 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9,
0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0xcb, 0xcc,
0x49, 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x33, 0x54, 0xd2, 0xe0, 0x12, 0x72, 0x2e, 0x4a, 0x4d,
@ -665,9 +719,10 @@ var fileDescriptorAuth = []byte{
0x16, 0xa7, 0x16, 0xe5, 0x25, 0xe6, 0xa6, 0x42, 0x95, 0xc3, 0xf9, 0x42, 0x62, 0x5c, 0x6c, 0xc1,
0xa9, 0xc9, 0x45, 0xa9, 0x25, 0x12, 0x4c, 0x60, 0x19, 0x28, 0xcf, 0x28, 0x89, 0x8b, 0xc5, 0xb1,
0xb4, 0x24, 0x43, 0x28, 0x8a, 0x8b, 0x1b, 0xc9, 0x48, 0x21, 0x15, 0x3d, 0x74, 0xe7, 0xe9, 0x61,
0xba, 0x4d, 0x4a, 0x95, 0x80, 0x2a, 0x88, 0xbb, 0x9c, 0x8c, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1,
0xba, 0x4d, 0x4a, 0x95, 0x80, 0x2a, 0x88, 0xbb, 0x9c, 0xac, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1,
0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e,
0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31,
0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x14, 0x0b, 0x28, 0x90, 0x92, 0xd8, 0xc0,
0xa1, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x73, 0xf3, 0xd5, 0x33, 0x01, 0x00, 0x00,
0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb,
0x31, 0x44, 0xb1, 0x80, 0x02, 0x2b, 0x89, 0x0d, 0x1c, 0x5a, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff,
0xff, 0x64, 0x61, 0x71, 0x59, 0x3b, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,132 @@
package content
import (
"context"
api "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/services/content/contentserver"
"github.com/moby/buildkit/session"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
// GRPCHeaderID is a gRPC header for store ID
const GRPCHeaderID = "buildkit-attachable-store-id"
type attachableContentStore struct {
stores map[string]content.Store
}
func (cs *attachableContentStore) choose(ctx context.Context) (content.Store, error) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "request lacks metadata")
}
values := md[GRPCHeaderID]
if len(values) == 0 {
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "request lacks metadata %q", GRPCHeaderID)
}
id := values[0]
store, ok := cs.stores[id]
if !ok {
return nil, errors.Wrapf(errdefs.ErrNotFound, "unknown store %s", id)
}
return store, nil
}
func (cs *attachableContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
store, err := cs.choose(ctx)
if err != nil {
return content.Info{}, err
}
return store.Info(ctx, dgst)
}
func (cs *attachableContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
store, err := cs.choose(ctx)
if err != nil {
return content.Info{}, err
}
return store.Update(ctx, info, fieldpaths...)
}
func (cs *attachableContentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error {
store, err := cs.choose(ctx)
if err != nil {
return err
}
return store.Walk(ctx, fn, fs...)
}
func (cs *attachableContentStore) Delete(ctx context.Context, dgst digest.Digest) error {
store, err := cs.choose(ctx)
if err != nil {
return err
}
return store.Delete(ctx, dgst)
}
func (cs *attachableContentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
store, err := cs.choose(ctx)
if err != nil {
return nil, err
}
return store.ListStatuses(ctx, fs...)
}
func (cs *attachableContentStore) Status(ctx context.Context, ref string) (content.Status, error) {
store, err := cs.choose(ctx)
if err != nil {
return content.Status{}, err
}
return store.Status(ctx, ref)
}
func (cs *attachableContentStore) Abort(ctx context.Context, ref string) error {
store, err := cs.choose(ctx)
if err != nil {
return err
}
return store.Abort(ctx, ref)
}
func (cs *attachableContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
store, err := cs.choose(ctx)
if err != nil {
return nil, err
}
return store.Writer(ctx, opts...)
}
func (cs *attachableContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
store, err := cs.choose(ctx)
if err != nil {
return nil, err
}
return store.ReaderAt(ctx, desc)
}
type attachable struct {
service api.ContentServer
}
// NewAttachable creates session.Attachable from aggregated stores.
// A key of the store map is an ID string that is used for choosing underlying store.
func NewAttachable(stores map[string]content.Store) session.Attachable {
store := &attachableContentStore{stores: stores}
service := contentserver.New(store)
a := attachable{
service: service,
}
return &a
}
func (a *attachable) Register(server *grpc.Server) {
api.RegisterContentServer(server, a.service)
}

View File

@ -0,0 +1,84 @@
package content
import (
"context"
api "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/proxy"
"github.com/moby/buildkit/session"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"google.golang.org/grpc/metadata"
)
type callerContentStore struct {
store content.Store
storeID string
}
func (cs *callerContentStore) choose(ctx context.Context) context.Context {
nsheader := metadata.Pairs(GRPCHeaderID, cs.storeID)
md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context.
if !ok {
md = nsheader
} else {
// order ensures the latest is first in this list.
md = metadata.Join(nsheader, md)
}
return metadata.NewOutgoingContext(ctx, md)
}
func (cs *callerContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
ctx = cs.choose(ctx)
return cs.store.Info(ctx, dgst)
}
func (cs *callerContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
ctx = cs.choose(ctx)
return cs.store.Update(ctx, info, fieldpaths...)
}
func (cs *callerContentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error {
ctx = cs.choose(ctx)
return cs.store.Walk(ctx, fn, fs...)
}
func (cs *callerContentStore) Delete(ctx context.Context, dgst digest.Digest) error {
ctx = cs.choose(ctx)
return cs.store.Delete(ctx, dgst)
}
func (cs *callerContentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
ctx = cs.choose(ctx)
return cs.store.ListStatuses(ctx, fs...)
}
func (cs *callerContentStore) Status(ctx context.Context, ref string) (content.Status, error) {
ctx = cs.choose(ctx)
return cs.store.Status(ctx, ref)
}
func (cs *callerContentStore) Abort(ctx context.Context, ref string) error {
ctx = cs.choose(ctx)
return cs.store.Abort(ctx, ref)
}
func (cs *callerContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
ctx = cs.choose(ctx)
return cs.store.Writer(ctx, opts...)
}
func (cs *callerContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
ctx = cs.choose(ctx)
return cs.store.ReaderAt(ctx, desc)
}
// NewCallerStore creates content.Store from session.Caller with specified storeID
func NewCallerStore(c session.Caller, storeID string) content.Store {
client := api.NewContentClient(c.Conn())
return &callerContentStore{
store: proxy.NewContentStore(client),
storeID: storeID,
}
}

View File

@ -57,7 +57,7 @@ func (wc *streamWriterCloser) Close() error {
return nil
}
func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error {
func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) error {
st := time.Now()
defer func() {
logrus.Debugf("diffcopy took: %v", time.Since(st))
@ -73,6 +73,7 @@ func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progres
NotifyHashed: cf,
ContentHasher: ch,
ProgressCb: progress,
Filter: fsutil.FilterFunc(filter),
})
}
@ -82,10 +83,10 @@ func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
}
return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
Merge: true,
Filter: func() func(*fstypes.Stat) bool {
Filter: func() func(string, *fstypes.Stat) bool {
uid := os.Getuid()
gid := os.Getgid()
return func(st *fstypes.Stat) bool {
return func(p string, st *fstypes.Stat) bool {
st.Uid = uint32(uid)
st.Gid = uint32(gid)
return true

View File

@ -35,7 +35,7 @@ type SyncedDir struct {
Name string
Dir string
Excludes []string
Map func(*fstypes.Stat) bool
Map func(string, *fstypes.Stat) bool
}
// NewFSSyncProvider creates a new provider for sending files from client
@ -129,7 +129,7 @@ type progressCb func(int, bool)
type protocol struct {
name string
sendFn func(stream grpc.Stream, fs fsutil.FS, progress progressCb) error
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error
}
func isProtoSupported(p string) bool {
@ -158,6 +158,7 @@ type FSSendRequestOpt struct {
DestDir string
CacheUpdater CacheUpdater
ProgressCb func(int, bool)
Filter func(string, *fstypes.Stat) bool
}
// CacheUpdater is an object capable of sending notifications for the cache hash changes
@ -225,7 +226,7 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
panic(fmt.Sprintf("invalid protocol: %q", pr.name))
}
return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb)
return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb, opt.Filter)
}
// NewFSSyncTargetDir allows writing into a directory

View File

@ -1,15 +1,6 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: filesync.proto
/*
Package filesync is a generated protocol buffer package.
It is generated from these files:
filesync.proto
It has these top-level messages:
BytesMessage
*/
package filesync
import proto "github.com/gogo/protobuf/proto"
@ -21,8 +12,10 @@ import bytes "bytes"
import strings "strings"
import reflect "reflect"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
import io "io"
@ -42,9 +35,37 @@ type BytesMessage struct {
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
}
func (m *BytesMessage) Reset() { *m = BytesMessage{} }
func (*BytesMessage) ProtoMessage() {}
func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorFilesync, []int{0} }
func (m *BytesMessage) Reset() { *m = BytesMessage{} }
func (*BytesMessage) ProtoMessage() {}
func (*BytesMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_filesync_26f8b7bce2e5ac0e, []int{0}
}
func (m *BytesMessage) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *BytesMessage) XXX_Merge(src proto.Message) {
xxx_messageInfo_BytesMessage.Merge(dst, src)
}
func (m *BytesMessage) XXX_Size() int {
return m.Size()
}
func (m *BytesMessage) XXX_DiscardUnknown() {
xxx_messageInfo_BytesMessage.DiscardUnknown(m)
}
var xxx_messageInfo_BytesMessage proto.InternalMessageInfo
func (m *BytesMessage) GetData() []byte {
if m != nil {
@ -107,8 +128,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for FileSync service
// FileSyncClient is the client API for FileSync service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type FileSyncClient interface {
DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error)
TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error)
@ -123,7 +145,7 @@ func NewFileSyncClient(cc *grpc.ClientConn) FileSyncClient {
}
func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) {
stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSync/DiffCopy", opts...)
stream, err := c.cc.NewStream(ctx, &_FileSync_serviceDesc.Streams[0], "/moby.filesync.v1.FileSync/DiffCopy", opts...)
if err != nil {
return nil, err
}
@ -154,7 +176,7 @@ func (x *fileSyncDiffCopyClient) Recv() (*BytesMessage, error) {
}
func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) {
stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[1], c.cc, "/moby.filesync.v1.FileSync/TarStream", opts...)
stream, err := c.cc.NewStream(ctx, &_FileSync_serviceDesc.Streams[1], "/moby.filesync.v1.FileSync/TarStream", opts...)
if err != nil {
return nil, err
}
@ -184,8 +206,7 @@ func (x *fileSyncTarStreamClient) Recv() (*BytesMessage, error) {
return m, nil
}
// Server API for FileSync service
// FileSyncServer is the server API for FileSync service.
type FileSyncServer interface {
DiffCopy(FileSync_DiffCopyServer) error
TarStream(FileSync_TarStreamServer) error
@ -268,8 +289,9 @@ var _FileSync_serviceDesc = grpc.ServiceDesc{
Metadata: "filesync.proto",
}
// Client API for FileSend service
// FileSendClient is the client API for FileSend service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type FileSendClient interface {
DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error)
}
@ -283,7 +305,7 @@ func NewFileSendClient(cc *grpc.ClientConn) FileSendClient {
}
func (c *fileSendClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) {
stream, err := grpc.NewClientStream(ctx, &_FileSend_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSend/DiffCopy", opts...)
stream, err := c.cc.NewStream(ctx, &_FileSend_serviceDesc.Streams[0], "/moby.filesync.v1.FileSend/DiffCopy", opts...)
if err != nil {
return nil, err
}
@ -313,8 +335,7 @@ func (x *fileSendDiffCopyClient) Recv() (*BytesMessage, error) {
return m, nil
}
// Server API for FileSend service
// FileSendServer is the server API for FileSend service.
type FileSendServer interface {
DiffCopy(FileSend_DiffCopyServer) error
}
@ -398,6 +419,9 @@ func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *BytesMessage) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Data)
@ -624,10 +648,10 @@ var (
ErrIntOverflowFilesync = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) }
func init() { proto.RegisterFile("filesync.proto", fileDescriptor_filesync_26f8b7bce2e5ac0e) }
var fileDescriptorFilesync = []byte{
// 208 bytes of a gzipped FileDescriptorProto
var fileDescriptor_filesync_26f8b7bce2e5ac0e = []byte{
// 217 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49,
0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa,
0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6,
@ -636,9 +660,10 @@ var fileDescriptorFilesync = []byte{
0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85,
0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90,
0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x34, 0x8a,
0x82, 0x3a, 0x36, 0x35, 0x2f, 0x85, 0xda, 0x8e, 0x75, 0x32, 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6,
0x82, 0x3a, 0x36, 0x35, 0x2f, 0x85, 0xda, 0x8e, 0x75, 0xb2, 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6,
0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78,
0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7,
0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, 0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0,
0xc1, 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x72, 0x81, 0x1a, 0x91, 0x90, 0x01, 0x00, 0x00,
0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c,
0xc7, 0x10, 0xc5, 0x01, 0x33, 0x33, 0x89, 0x0d, 0x1c, 0x0d, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff,
0xff, 0x5e, 0xce, 0x52, 0xb3, 0x98, 0x01, 0x00, 0x00,
}

View File

@ -1,16 +1,6 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: secrets.proto
/*
Package secrets is a generated protocol buffer package.
It is generated from these files:
secrets.proto
It has these top-level messages:
GetSecretRequest
GetSecretResponse
*/
package secrets
import proto "github.com/gogo/protobuf/proto"
@ -21,10 +11,12 @@ import bytes "bytes"
import strings "strings"
import reflect "reflect"
import sortkeys "github.com/gogo/protobuf/sortkeys"
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
import io "io"
@ -41,12 +33,40 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type GetSecretRequest struct {
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} }
func (*GetSecretRequest) ProtoMessage() {}
func (*GetSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorSecrets, []int{0} }
func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} }
func (*GetSecretRequest) ProtoMessage() {}
func (*GetSecretRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_secrets_21bd4adec74a381e, []int{0}
}
func (m *GetSecretRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetSecretRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetSecretRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetSecretRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetSecretRequest.Merge(dst, src)
}
func (m *GetSecretRequest) XXX_Size() int {
return m.Size()
}
func (m *GetSecretRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetSecretRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetSecretRequest proto.InternalMessageInfo
func (m *GetSecretRequest) GetID() string {
if m != nil {
@ -66,9 +86,37 @@ type GetSecretResponse struct {
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
}
func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} }
func (*GetSecretResponse) ProtoMessage() {}
func (*GetSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorSecrets, []int{1} }
func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} }
func (*GetSecretResponse) ProtoMessage() {}
func (*GetSecretResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_secrets_21bd4adec74a381e, []int{1}
}
func (m *GetSecretResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GetSecretResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GetSecretResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *GetSecretResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetSecretResponse.Merge(dst, src)
}
func (m *GetSecretResponse) XXX_Size() int {
return m.Size()
}
func (m *GetSecretResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetSecretResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetSecretResponse proto.InternalMessageInfo
func (m *GetSecretResponse) GetData() []byte {
if m != nil {
@ -79,6 +127,7 @@ func (m *GetSecretResponse) GetData() []byte {
func init() {
proto.RegisterType((*GetSecretRequest)(nil), "moby.buildkit.secrets.v1.GetSecretRequest")
proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.secrets.v1.GetSecretRequest.AnnotationsEntry")
proto.RegisterType((*GetSecretResponse)(nil), "moby.buildkit.secrets.v1.GetSecretResponse")
}
func (this *GetSecretRequest) Equal(that interface{}) bool {
@ -148,7 +197,7 @@ func (this *GetSecretRequest) GoString() string {
for k, _ := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
sortkeys.Strings(keysForAnnotations)
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%#v: %#v,", k, this.Annotations[k])
@ -187,8 +236,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Secrets service
// SecretsClient is the client API for Secrets service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SecretsClient interface {
GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error)
}
@ -203,15 +253,14 @@ func NewSecretsClient(cc *grpc.ClientConn) SecretsClient {
func (c *secretsClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) {
out := new(GetSecretResponse)
err := grpc.Invoke(ctx, "/moby.buildkit.secrets.v1.Secrets/GetSecret", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/moby.buildkit.secrets.v1.Secrets/GetSecret", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Secrets service
// SecretsServer is the server API for Secrets service.
type SecretsServer interface {
GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error)
}
@ -326,6 +375,9 @@ func encodeVarintSecrets(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *GetSecretRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ID)
@ -344,6 +396,9 @@ func (m *GetSecretRequest) Size() (n int) {
}
func (m *GetSecretResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Data)
@ -374,7 +429,7 @@ func (this *GetSecretRequest) String() string {
for k, _ := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
sortkeys.Strings(keysForAnnotations)
github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
@ -788,10 +843,10 @@ var (
ErrIntOverflowSecrets = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("secrets.proto", fileDescriptorSecrets) }
func init() { proto.RegisterFile("secrets.proto", fileDescriptor_secrets_21bd4adec74a381e) }
var fileDescriptorSecrets = []byte{
// 279 bytes of a gzipped FileDescriptorProto
var fileDescriptor_secrets_21bd4adec74a381e = []byte{
// 288 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x4e, 0x4d, 0x2e,
0x4a, 0x2d, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4,
0x4b, 0x2a, 0xcd, 0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x83, 0x49, 0x96, 0x19, 0x2a, 0x1d, 0x64,
@ -805,9 +860,9 @@ var fileDescriptorSecrets = []byte{
0x41, 0x24, 0x1b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x84, 0xb8, 0x58, 0x52, 0x12, 0x4b,
0x12, 0xc1, 0x26, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xf9, 0x5c, 0xec, 0x10, 0x55, 0xc5, 0x42, 0x29,
0x5c, 0x9c, 0x70, 0x3d, 0x42, 0x5a, 0xc4, 0x7b, 0x45, 0x4a, 0x9b, 0x28, 0xb5, 0x10, 0x47, 0x38,
0x99, 0x5e, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f,
0xd9, 0x5e, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f,
0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07,
0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86,
0x28, 0x76, 0xa8, 0x59, 0x49, 0x6c, 0xe0, 0x58, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x05,
0x4e, 0x56, 0xde, 0xc6, 0x01, 0x00, 0x00,
0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x62, 0x87, 0x9a, 0x99, 0xc4, 0x06, 0x8e,
0x3d, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x38, 0xec, 0x1f, 0xce, 0x01, 0x00, 0x00,
}

View File

@ -24,7 +24,7 @@ const (
// Dialer returns a connection that can be used by the session
type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
// Attachable defines a feature that can be expsed on a session
// Attachable defines a feature that can be exposed on a session
type Attachable interface {
Register(*grpc.Server)
}
@ -66,7 +66,7 @@ func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) {
return s, nil
}
// Allow enable a given service to be reachable through the grpc session
// Allow enables a given service to be reachable through the grpc session
func (s *Session) Allow(a Attachable) {
a.Register(s.grpcServer)
}

View File

@ -1,17 +1,6 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: ssh.proto
/*
Package sshforward is a generated protocol buffer package.
It is generated from these files:
ssh.proto
It has these top-level messages:
BytesMessage
CheckAgentRequest
CheckAgentResponse
*/
package sshforward
import proto "github.com/gogo/protobuf/proto"
@ -23,8 +12,10 @@ import bytes "bytes"
import strings "strings"
import reflect "reflect"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
import io "io"
@ -44,9 +35,37 @@ type BytesMessage struct {
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
}
func (m *BytesMessage) Reset() { *m = BytesMessage{} }
func (*BytesMessage) ProtoMessage() {}
func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorSsh, []int{0} }
func (m *BytesMessage) Reset() { *m = BytesMessage{} }
func (*BytesMessage) ProtoMessage() {}
func (*BytesMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_ssh_13bd2c34c031d472, []int{0}
}
func (m *BytesMessage) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *BytesMessage) XXX_Merge(src proto.Message) {
xxx_messageInfo_BytesMessage.Merge(dst, src)
}
func (m *BytesMessage) XXX_Size() int {
return m.Size()
}
func (m *BytesMessage) XXX_DiscardUnknown() {
xxx_messageInfo_BytesMessage.DiscardUnknown(m)
}
var xxx_messageInfo_BytesMessage proto.InternalMessageInfo
func (m *BytesMessage) GetData() []byte {
if m != nil {
@ -59,9 +78,37 @@ type CheckAgentRequest struct {
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
}
func (m *CheckAgentRequest) Reset() { *m = CheckAgentRequest{} }
func (*CheckAgentRequest) ProtoMessage() {}
func (*CheckAgentRequest) Descriptor() ([]byte, []int) { return fileDescriptorSsh, []int{1} }
func (m *CheckAgentRequest) Reset() { *m = CheckAgentRequest{} }
func (*CheckAgentRequest) ProtoMessage() {}
func (*CheckAgentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_ssh_13bd2c34c031d472, []int{1}
}
func (m *CheckAgentRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckAgentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckAgentRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckAgentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckAgentRequest.Merge(dst, src)
}
func (m *CheckAgentRequest) XXX_Size() int {
return m.Size()
}
func (m *CheckAgentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CheckAgentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CheckAgentRequest proto.InternalMessageInfo
func (m *CheckAgentRequest) GetID() string {
if m != nil {
@ -73,9 +120,37 @@ func (m *CheckAgentRequest) GetID() string {
type CheckAgentResponse struct {
}
func (m *CheckAgentResponse) Reset() { *m = CheckAgentResponse{} }
func (*CheckAgentResponse) ProtoMessage() {}
func (*CheckAgentResponse) Descriptor() ([]byte, []int) { return fileDescriptorSsh, []int{2} }
func (m *CheckAgentResponse) Reset() { *m = CheckAgentResponse{} }
func (*CheckAgentResponse) ProtoMessage() {}
func (*CheckAgentResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_ssh_13bd2c34c031d472, []int{2}
}
func (m *CheckAgentResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CheckAgentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CheckAgentResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *CheckAgentResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CheckAgentResponse.Merge(dst, src)
}
func (m *CheckAgentResponse) XXX_Size() int {
return m.Size()
}
func (m *CheckAgentResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CheckAgentResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CheckAgentResponse proto.InternalMessageInfo
func init() {
proto.RegisterType((*BytesMessage)(nil), "moby.sshforward.v1.BytesMessage")
@ -197,8 +272,9 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for SSH service
// SSHClient is the client API for SSH service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SSHClient interface {
CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error)
ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error)
@ -214,7 +290,7 @@ func NewSSHClient(cc *grpc.ClientConn) SSHClient {
func (c *sSHClient) CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error) {
out := new(CheckAgentResponse)
err := grpc.Invoke(ctx, "/moby.sshforward.v1.SSH/CheckAgent", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/moby.sshforward.v1.SSH/CheckAgent", in, out, opts...)
if err != nil {
return nil, err
}
@ -222,7 +298,7 @@ func (c *sSHClient) CheckAgent(ctx context.Context, in *CheckAgentRequest, opts
}
func (c *sSHClient) ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error) {
stream, err := grpc.NewClientStream(ctx, &_SSH_serviceDesc.Streams[0], c.cc, "/moby.sshforward.v1.SSH/ForwardAgent", opts...)
stream, err := c.cc.NewStream(ctx, &_SSH_serviceDesc.Streams[0], "/moby.sshforward.v1.SSH/ForwardAgent", opts...)
if err != nil {
return nil, err
}
@ -252,8 +328,7 @@ func (x *sSHForwardAgentClient) Recv() (*BytesMessage, error) {
return m, nil
}
// Server API for SSH service
// SSHServer is the server API for SSH service.
type SSHServer interface {
CheckAgent(context.Context, *CheckAgentRequest) (*CheckAgentResponse, error)
ForwardAgent(SSH_ForwardAgentServer) error
@ -403,6 +478,9 @@ func encodeVarintSsh(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *BytesMessage) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Data)
@ -413,6 +491,9 @@ func (m *BytesMessage) Size() (n int) {
}
func (m *CheckAgentRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ID)
@ -423,6 +504,9 @@ func (m *CheckAgentRequest) Size() (n int) {
}
func (m *CheckAgentResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
return n
@ -793,10 +877,10 @@ var (
ErrIntOverflowSsh = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("ssh.proto", fileDescriptorSsh) }
func init() { proto.RegisterFile("ssh.proto", fileDescriptor_ssh_13bd2c34c031d472) }
var fileDescriptorSsh = []byte{
// 243 bytes of a gzipped FileDescriptorProto
var fileDescriptor_ssh_13bd2c34c031d472 = []byte{
// 252 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2c, 0x2e, 0xce, 0xd0,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xca, 0xcd, 0x4f, 0xaa, 0xd4, 0x2b, 0x2e, 0xce, 0x48,
0xcb, 0x2f, 0x2a, 0x4f, 0x2c, 0x4a, 0xd1, 0x2b, 0x33, 0x54, 0x52, 0xe2, 0xe2, 0x71, 0xaa, 0x2c,
@ -807,10 +891,10 @@ var fileDescriptorSsh = []byte{
0x90, 0x15, 0x15, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x1a, 0xed, 0x62, 0xe4, 0x62, 0x0e, 0x0e, 0xf6,
0x10, 0x8a, 0xe6, 0xe2, 0x42, 0xc8, 0x0a, 0xa9, 0xea, 0x61, 0xba, 0x44, 0x0f, 0xc3, 0x0a, 0x29,
0x35, 0x42, 0xca, 0x20, 0x96, 0x08, 0x85, 0x71, 0xf1, 0xb8, 0x41, 0x14, 0x40, 0x8c, 0x57, 0xc0,
0xa6, 0x0f, 0xd9, 0x97, 0x52, 0x04, 0x55, 0x68, 0x30, 0x1a, 0x30, 0x3a, 0x59, 0x5c, 0x78, 0x28,
0xa6, 0x0f, 0xd9, 0x97, 0x52, 0x04, 0x55, 0x68, 0x30, 0x1a, 0x30, 0x3a, 0x39, 0x5c, 0x78, 0x28,
0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c,
0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f,
0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x28, 0x2e, 0x84, 0x69,
0x49, 0x6c, 0xe0, 0x00, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x31, 0x3e, 0x40, 0xab, 0x7d,
0x01, 0x00, 0x00,
0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18,
0x6e, 0x3c, 0x96, 0x63, 0x88, 0xe2, 0x42, 0x98, 0x9a, 0xc4, 0x06, 0x0e, 0x78, 0x63, 0x40, 0x00,
0x00, 0x00, 0xff, 0xff, 0x6c, 0xe6, 0x6d, 0xb7, 0x85, 0x01, 0x00, 0x00,
}

View File

@ -21,3 +21,5 @@ const AttrImageResolveModeDefault = "default"
const AttrImageResolveModeForcePull = "pull"
const AttrImageResolveModePreferLocal = "local"
const AttrImageRecordType = "image.recordtype"
type IsFileAction = isFileAction_Action

View File

@ -33,6 +33,7 @@ const (
CapExecMetaBase apicaps.CapID = "exec.meta.base"
CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv"
CapExecMetaNetwork apicaps.CapID = "exec.meta.network"
CapExecMetaSecurity apicaps.CapID = "exec.meta.security"
CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath"
CapExecMountBind apicaps.CapID = "exec.mount.bind"
CapExecMountCache apicaps.CapID = "exec.mount.cache"
@ -43,6 +44,8 @@ const (
CapExecMountSSH apicaps.CapID = "exec.mount.ssh"
CapExecCgroupsMounted apicaps.CapID = "exec.cgroup"
CapFileBase apicaps.CapID = "file.base"
CapConstraints apicaps.CapID = "constraints"
CapPlatform apicaps.CapID = "platform"
@ -178,6 +181,12 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMetaSecurity,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMountBind,
Enabled: true,
@ -226,6 +235,16 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapFileBase,
Enabled: true,
Status: apicaps.CapStatusPrerelease,
SupportedHint: map[string]string{
"docker": "Docker v19.03",
"buildkit": "BuildKit v0.5.0",
},
})
Caps.Init(apicaps.Cap{
ID: CapConstraints,
Enabled: true,

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,7 @@ message Op {
oneof op {
ExecOp exec = 2;
SourceOp source = 3;
CopyOp copy = 4;
FileOp file = 4;
BuildOp build = 5;
}
Platform platform = 10;
@ -44,6 +44,7 @@ message ExecOp {
Meta meta = 1;
repeated Mount mounts = 2;
NetMode network = 3;
SecurityMode security = 4;
}
// Meta is a set of arguments for ExecOp.
@ -64,6 +65,11 @@ enum NetMode {
NONE = 2;
}
enum SecurityMode {
SANDBOX = 0;
INSECURE = 1; // privileged mode
}
// Mount specifies how to mount an input Op as a filesystem.
message Mount {
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
@ -134,18 +140,6 @@ message SSHOpt {
bool optional = 5;
}
// CopyOp copies files across Ops.
message CopyOp {
repeated CopySource src = 1;
string dest = 2;
}
// CopySource specifies a source for CopyOp.
message CopySource {
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
string selector = 2;
}
// SourceOp specifies a source such as build contexts and images.
message SourceOp {
// TODO: use source type or any type instead of URL protocol.
@ -211,4 +205,101 @@ message Definition {
message HostIP {
string Host = 1;
string IP = 2;
}
message FileOp {
repeated FileAction actions = 2;
}
message FileAction {
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // could be real input or target (target index + max input index)
int64 secondaryInput = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // --//--
int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false];
oneof action {
// FileActionCopy copies files from secondaryInput on top of input
FileActionCopy copy = 4;
// FileActionMkFile creates a new file
FileActionMkFile mkfile = 5;
// FileActionMkDir creates a new directory
FileActionMkDir mkdir = 6;
// FileActionRm removes a file
FileActionRm rm = 7;
}
}
message FileActionCopy {
// src is the source path
string src = 1;
// dest path
string dest = 2;
// optional owner override
ChownOpt owner = 3;
// optional permission bits override
int32 mode = 4;
// followSymlink resolves symlinks in src
bool followSymlink = 5;
// dirCopyContents only copies contents if src is a directory
bool dirCopyContents = 6;
// attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead
bool attemptUnpackDockerCompatibility = 7;
// createDestPath creates dest path directories if needed
bool createDestPath = 8;
// allowWildcard allows filepath.Match wildcards in src path
bool allowWildcard = 9;
// allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files
bool allowEmptyWildcard = 10;
// optional created time override
int64 timestamp = 11;
}
message FileActionMkFile {
// path for the new file
string path = 1;
// permission bits
int32 mode = 2;
// data is the new file contents
bytes data = 3;
// optional owner for the new file
ChownOpt owner = 4;
// optional created time override
int64 timestamp = 5;
}
message FileActionMkDir {
// path for the new directory
string path = 1;
// permission bits
int32 mode = 2;
// makeParents creates parent directories as well if needed
bool makeParents = 3;
// optional owner for the new directory
ChownOpt owner = 4;
// optional created time override
int64 timestamp = 5;
}
message FileActionRm {
// path to remove
string path = 1;
// allowNotFound doesn't fail the rm if file is not found
bool allowNotFound = 2;
// allowWildcard allows filepath.Match wildcards in path
bool allowWildcard = 3;
}
message ChownOpt {
UserOpt user = 1;
UserOpt group = 2;
}
message UserOpt {
oneof user {
NamedUserOpt byName = 1;
uint32 byID = 2;
}
}
message NamedUserOpt {
string name = 1;
int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
}

View File

@ -1,15 +1,6 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: caps.proto
/*
Package moby_buildkit_v1_apicaps is a generated protocol buffer package.
It is generated from these files:
caps.proto
It has these top-level messages:
APICap
*/
package moby_buildkit_v1_apicaps
import proto "github.com/gogo/protobuf/proto"
@ -32,18 +23,49 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// APICap defines a capability supported by the service
type APICap struct {
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Enabled bool `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"`
Deprecated bool `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"`
DisabledReason string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"`
DisabledReasonMsg string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"`
DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"`
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Enabled bool `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"`
Deprecated bool `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"`
DisabledReason string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"`
DisabledReasonMsg string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"`
DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *APICap) Reset() { *m = APICap{} }
func (m *APICap) String() string { return proto.CompactTextString(m) }
func (*APICap) ProtoMessage() {}
func (*APICap) Descriptor() ([]byte, []int) { return fileDescriptorCaps, []int{0} }
func (m *APICap) Reset() { *m = APICap{} }
func (m *APICap) String() string { return proto.CompactTextString(m) }
func (*APICap) ProtoMessage() {}
func (*APICap) Descriptor() ([]byte, []int) {
return fileDescriptor_caps_04e1bcd232e9a565, []int{0}
}
func (m *APICap) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *APICap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_APICap.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *APICap) XXX_Merge(src proto.Message) {
xxx_messageInfo_APICap.Merge(dst, src)
}
func (m *APICap) XXX_Size() int {
return m.Size()
}
func (m *APICap) XXX_DiscardUnknown() {
xxx_messageInfo_APICap.DiscardUnknown(m)
}
var xxx_messageInfo_APICap proto.InternalMessageInfo
func (m *APICap) GetID() string {
if m != nil {
@ -149,6 +171,9 @@ func (m *APICap) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledAlternative)))
i += copy(dAtA[i:], m.DisabledAlternative)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
@ -162,6 +187,9 @@ func encodeVarintCaps(dAtA []byte, offset int, v uint64) int {
return offset + 1
}
func (m *APICap) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ID)
@ -186,6 +214,9 @@ func (m *APICap) Size() (n int) {
if l > 0 {
n += 1 + l + sovCaps(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
@ -399,6 +430,7 @@ func (m *APICap) Unmarshal(dAtA []byte) error {
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
@ -513,9 +545,9 @@ var (
ErrIntOverflowCaps = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("caps.proto", fileDescriptorCaps) }
func init() { proto.RegisterFile("caps.proto", fileDescriptor_caps_04e1bcd232e9a565) }
var fileDescriptorCaps = []byte{
var fileDescriptor_caps_04e1bcd232e9a565 = []byte{
// 236 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x4e, 0x2c, 0x28,
0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0x2a, 0xcd,

View File

@ -1,26 +1,19 @@
package entitlements
import "github.com/pkg/errors"
import (
"github.com/pkg/errors"
)
type Entitlement string
const (
EntitlementSecurityConfined Entitlement = "security.confined"
EntitlementSecurityUnconfined Entitlement = "security.unconfined" // unimplemented
EntitlementNetworkHost Entitlement = "network.host"
EntitlementNetworkNone Entitlement = "network.none"
EntitlementSecurityInsecure Entitlement = "security.insecure"
EntitlementNetworkHost Entitlement = "network.host"
)
var all = map[Entitlement]struct{}{
EntitlementSecurityConfined: {},
EntitlementSecurityUnconfined: {},
EntitlementNetworkHost: {},
EntitlementNetworkNone: {},
}
var defaults = map[Entitlement]struct{}{
EntitlementSecurityConfined: {},
EntitlementNetworkNone: {},
EntitlementSecurityInsecure: {},
EntitlementNetworkHost: {},
}
func Parse(s string) (Entitlement, error) {
@ -56,9 +49,6 @@ func WhiteList(allowed, supported []Entitlement) (Set, error) {
m[e] = struct{}{}
}
for e := range defaults {
m[e] = struct{}{}
}
return Set(m), nil
}

View File

@ -0,0 +1,67 @@
package entitlements
import (
"context"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/oci"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// WithInsecureSpec sets spec with All capability.
func WithInsecureSpec() oci.SpecOpts {
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
addCaps := []string{
"CAP_FSETID",
"CAP_KILL",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETPCAP",
"CAP_SETFCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_AUDIT_WRITE",
"CAP_MAC_ADMIN",
"CAP_MAC_OVERRIDE",
"CAP_DAC_READ_SEARCH",
"CAP_SYS_PTRACE",
"CAP_SYS_MODULE",
"CAP_SYSLOG",
"CAP_SYS_RAWIO",
"CAP_SYS_ADMIN",
"CAP_LINUX_IMMUTABLE",
"CAP_SYS_BOOT",
"CAP_SYS_NICE",
"CAP_SYS_PACCT",
"CAP_SYS_TTY_CONFIG",
"CAP_SYS_TIME",
"CAP_WAKE_ALARM",
"CAP_AUDIT_READ",
"CAP_AUDIT_CONTROL",
"CAP_SYS_RESOURCE",
"CAP_BLOCK_SUSPEND",
"CAP_IPC_LOCK",
"CAP_IPC_OWNER",
"CAP_LEASE",
"CAP_NET_ADMIN",
"CAP_NET_BROADCAST",
}
for _, cap := range addCaps {
s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, cap)
s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, cap)
s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, cap)
s.Process.Capabilities.Inheritable = append(s.Process.Capabilities.Inheritable, cap)
s.Process.Capabilities.Permitted = append(s.Process.Capabilities.Permitted, cap)
}
s.Linux.ReadonlyPaths = []string{}
s.Linux.MaskedPaths = []string{}
s.Process.ApparmorProfile = ""
return nil
}
}

View File

@ -5,10 +5,14 @@ import (
"context"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
"time"
"github.com/containerd/console"
"github.com/jaguilar/vt100"
"github.com/moby/buildkit/client"
"github.com/morikuni/aec"
digest "github.com/opencontainers/go-digest"
@ -27,14 +31,26 @@ func DisplaySolveStatus(ctx context.Context, phase string, c console.Console, w
disp.phase = "Building"
}
t := newTrace(w)
t := newTrace(w, modeConsole)
tickerTimeout := 150 * time.Millisecond
displayTimeout := 100 * time.Millisecond
if v := os.Getenv("TTY_DISPLAY_RATE"); v != "" {
if r, err := strconv.ParseInt(v, 10, 64); err == nil {
tickerTimeout = time.Duration(r) * time.Millisecond
displayTimeout = time.Duration(r) * time.Millisecond
}
}
var done bool
ticker := time.NewTicker(100 * time.Millisecond)
ticker := time.NewTicker(tickerTimeout)
defer ticker.Stop()
displayLimiter := rate.NewLimiter(rate.Every(70*time.Millisecond), 1)
displayLimiter := rate.NewLimiter(rate.Every(displayTimeout), 1)
var height int
width, _ := disp.getSize()
for {
select {
case <-ctx.Done():
@ -42,34 +58,43 @@ func DisplaySolveStatus(ctx context.Context, phase string, c console.Console, w
case <-ticker.C:
case ss, ok := <-ch:
if ok {
t.update(ss)
t.update(ss, width)
} else {
done = true
}
}
if modeConsole {
width, height = disp.getSize()
if done {
disp.print(t.displayInfo(), true)
disp.print(t.displayInfo(), width, height, true)
t.printErrorLogs(c)
return nil
} else if displayLimiter.Allow() {
disp.print(t.displayInfo(), false)
ticker.Stop()
ticker = time.NewTicker(tickerTimeout)
disp.print(t.displayInfo(), width, height, false)
}
} else {
if done || displayLimiter.Allow() {
printer.print(t)
if done {
t.printErrorLogs(w)
return nil
}
ticker.Stop()
ticker = time.NewTicker(tickerTimeout)
}
}
}
}
const termHeight = 6
const termPad = 10
type displayInfo struct {
startTime time.Time
jobs []job
jobs []*job
countTotal int
countCompleted int
}
@ -81,6 +106,8 @@ type job struct {
status string
hasError bool
isCanceled bool
vertex *vertex
showTerm bool
}
type trace struct {
@ -90,6 +117,7 @@ type trace struct {
byDigest map[digest.Digest]*vertex
nextIndex int
updates map[digest.Digest]struct{}
modeConsole bool
}
type vertex struct {
@ -107,6 +135,13 @@ type vertex struct {
lastBlockTime *time.Time
count int
statusUpdates map[string]struct{}
jobs []*job
jobCached bool
term *vt100.VT100
termBytes int
termCount int
}
func (v *vertex) update(c int) {
@ -121,11 +156,12 @@ type status struct {
*client.VertexStatus
}
func newTrace(w io.Writer) *trace {
func newTrace(w io.Writer, modeConsole bool) *trace {
return &trace{
byDigest: make(map[digest.Digest]*vertex),
updates: make(map[digest.Digest]struct{}),
w: w,
byDigest: make(map[digest.Digest]*vertex),
updates: make(map[digest.Digest]struct{}),
w: w,
modeConsole: modeConsole,
}
}
@ -140,41 +176,37 @@ func (t *trace) triggerVertexEvent(v *client.Vertex) {
old = *v
}
var ev []string
changed := false
if v.Digest != old.Digest {
ev = append(ev, fmt.Sprintf("%13s %s", "digest:", v.Digest))
changed = true
}
if v.Name != old.Name {
ev = append(ev, fmt.Sprintf("%13s %q", "name:", v.Name))
changed = true
}
if v.Started != old.Started {
if v.Started != nil && old.Started == nil || !v.Started.Equal(*old.Started) {
ev = append(ev, fmt.Sprintf("%13s %v", "started:", v.Started))
changed = true
}
}
if v.Completed != old.Completed && v.Completed != nil {
ev = append(ev, fmt.Sprintf("%13s %v", "completed:", v.Completed))
if v.Started != nil {
ev = append(ev, fmt.Sprintf("%13s %v", "duration:", v.Completed.Sub(*v.Started)))
}
changed = true
}
if v.Cached != old.Cached {
ev = append(ev, fmt.Sprintf("%13s %v", "cached:", v.Cached))
changed = true
}
if v.Error != old.Error {
ev = append(ev, fmt.Sprintf("%13s %q", "error:", v.Error))
changed = true
}
if len(ev) > 0 {
vtx.events = append(vtx.events, ev...)
vtx.update(len(ev))
if changed {
vtx.update(1)
t.updates[v.Digest] = struct{}{}
}
t.byDigest[v.Digest].prev = v
}
func (t *trace) update(s *client.SolveStatus) {
func (t *trace) update(s *client.SolveStatus, termWidth int) {
for _, v := range s.Vertexes {
prev, ok := t.byDigest[v.Digest]
if !ok {
@ -184,6 +216,9 @@ func (t *trace) update(s *client.SolveStatus) {
statusUpdates: make(map[string]struct{}),
index: t.nextIndex,
}
if t.modeConsole {
t.byDigest[v.Digest].term = vt100.NewVT100(termHeight, termWidth-termPad)
}
}
t.triggerVertexEvent(v)
if v.Started != nil && (prev == nil || prev.Started == nil) {
@ -193,12 +228,14 @@ func (t *trace) update(s *client.SolveStatus) {
t.vertexes = append(t.vertexes, t.byDigest[v.Digest])
}
t.byDigest[v.Digest].Vertex = v
t.byDigest[v.Digest].jobCached = false
}
for _, s := range s.Statuses {
v, ok := t.byDigest[s.Vertex]
if !ok {
continue // shouldn't happen
}
v.jobCached = false
prev, ok := v.byID[s.ID]
if !ok {
v.byID[s.ID] = &status{VertexStatus: s}
@ -216,6 +253,14 @@ func (t *trace) update(s *client.SolveStatus) {
if !ok {
continue // shouldn't happen
}
v.jobCached = false
if v.term != nil {
if v.term.Width != termWidth {
v.term.Resize(termHeight, termWidth-termPad)
}
v.termBytes += len(l.Data)
v.term.Write(l.Data) // error unhandled on purpose. don't trust vt100
}
i := 0
complete := split(l.Data, byte('\n'), func(dt []byte) {
if v.logsPartial && len(v.logs) != 0 && i == 0 {
@ -262,10 +307,16 @@ func (t *trace) displayInfo() (d displayInfo) {
}
for _, v := range t.vertexes {
j := job{
if v.jobCached {
d.jobs = append(d.jobs, v.jobs...)
continue
}
var jobs []*job
j := &job{
startTime: addTime(v.Started, t.localTimeDiff),
completedTime: addTime(v.Completed, t.localTimeDiff),
name: strings.Replace(v.Name, "\t", " ", -1),
vertex: v,
}
if v.Error != "" {
if strings.HasSuffix(v.Error, context.Canceled.Error()) {
@ -280,9 +331,9 @@ func (t *trace) displayInfo() (d displayInfo) {
j.name = "CACHED " + j.name
}
j.name = v.indent + j.name
d.jobs = append(d.jobs, j)
jobs = append(jobs, j)
for _, s := range v.statuses {
j := job{
j := &job{
startTime: addTime(s.Started, t.localTimeDiff),
completedTime: addTime(s.Completed, t.localTimeDiff),
name: v.indent + "=> " + s.ID,
@ -292,8 +343,11 @@ func (t *trace) displayInfo() (d displayInfo) {
} else if s.Current != 0 {
j.status = fmt.Sprintf("%.2f", units.Bytes(s.Current))
}
d.jobs = append(d.jobs, j)
jobs = append(jobs, j)
}
d.jobs = append(d.jobs, jobs...)
v.jobs = jobs
v.jobCached = true
}
return d
@ -332,20 +386,56 @@ type display struct {
repeated bool
}
func (disp *display) print(d displayInfo, all bool) {
// this output is inspired by Buck
func (disp *display) getSize() (int, int) {
width := 80
height := 10
size, err := disp.c.Size()
if err == nil && size.Width > 0 && size.Height > 0 {
width = int(size.Width)
height = int(size.Height)
if disp.c != nil {
size, err := disp.c.Size()
if err == nil && size.Width > 0 && size.Height > 0 {
width = int(size.Width)
height = int(size.Height)
}
}
return width, height
}
func setupTerminals(jobs []*job, height int, all bool) []*job {
var candidates []*job
numInUse := 0
for _, j := range jobs {
if j.vertex != nil && j.vertex.termBytes > 0 && j.completedTime == nil {
candidates = append(candidates, j)
}
if j.completedTime == nil {
numInUse++
}
}
sort.Slice(candidates, func(i, j int) bool {
idxI := candidates[i].vertex.termBytes + candidates[i].vertex.termCount*50
idxJ := candidates[j].vertex.termBytes + candidates[j].vertex.termCount*50
return idxI > idxJ
})
numFree := height - 2 - numInUse
numToHide := 0
termLimit := termHeight + 3
for i := 0; numFree > termLimit && i < len(candidates); i++ {
candidates[i].showTerm = true
numToHide += candidates[i].vertex.term.UsedHeight()
numFree -= termLimit
}
if !all {
d.jobs = wrapHeight(d.jobs, height-2)
jobs = wrapHeight(jobs, height-2-numToHide)
}
return jobs
}
func (disp *display) print(d displayInfo, width, height int, all bool) {
// this output is inspired by Buck
d.jobs = setupTerminals(d.jobs, height, all)
b := aec.EmptyBuilder
for i := 0; i <= disp.lineCount; i++ {
b = b.Up(1)
@ -395,11 +485,12 @@ func (disp *display) print(d displayInfo, all bool) {
if left < 12 { // too small screen to show progress
continue
}
if len(j.name) > left {
j.name = j.name[:left]
name := j.name
if len(name) > left {
name = name[:left]
}
out := pfx + j.name
out := pfx + name
if showStatus {
out += " " + status
}
@ -416,17 +507,68 @@ func (disp *display) print(d displayInfo, all bool) {
}
fmt.Fprint(disp.c, out)
lineCount++
if j.showTerm {
term := j.vertex.term
term.Resize(termHeight, width-termPad)
for _, l := range term.Content {
if !isEmpty(l) {
out := aec.Apply(fmt.Sprintf(" => => # %s\n", string(l)), aec.Faint)
fmt.Fprint(disp.c, out)
lineCount++
}
}
j.vertex.termCount++
j.showTerm = false
}
}
// override previous content
if diff := disp.lineCount - lineCount; diff > 0 {
for i := 0; i < diff; i++ {
fmt.Fprintln(disp.c, strings.Repeat(" ", width))
}
fmt.Fprint(disp.c, aec.EmptyBuilder.Up(uint(diff)).Column(0).ANSI)
}
disp.lineCount = lineCount
}
func isEmpty(l []rune) bool {
for _, r := range l {
if r != ' ' {
return false
}
}
return true
}
func align(l, r string, w int) string {
return fmt.Sprintf("%-[2]*[1]s %[3]s", l, w-len(r)-1, r)
}
func wrapHeight(j []job, limit int) []job {
func wrapHeight(j []*job, limit int) []*job {
var wrapped []*job
wrapped = append(wrapped, j...)
if len(j) > limit {
j = j[len(j)-limit:]
wrapped = wrapped[len(j)-limit:]
// wrap things around if incomplete jobs were cut
var invisible []*job
for _, j := range j[:len(j)-limit] {
if j.completedTime == nil {
invisible = append(invisible, j)
}
}
if l := len(invisible); l > 0 {
rewrapped := make([]*job, 0, len(wrapped))
for _, j := range wrapped {
if j.completedTime == nil || l <= 0 {
rewrapped = append(rewrapped, j)
}
l--
}
freespace := len(wrapped) - len(rewrapped)
wrapped = append(invisible[len(invisible)-freespace:], rewrapped...)
}
}
return j
return wrapped
}

View File

@ -1,8 +1,12 @@
package progressui
import (
"context"
"fmt"
"io"
"os"
"sort"
"strings"
"time"
digest "github.com/opencontainers/go-digest"
@ -20,9 +24,10 @@ type lastStatus struct {
}
type textMux struct {
w io.Writer
current digest.Digest
last map[string]lastStatus
w io.Writer
current digest.Digest
last map[string]lastStatus
notFirst bool
}
func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
@ -43,10 +48,22 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
}
old.logsOffset = 0
old.count = 0
fmt.Fprintf(p.w, "#%d ...\n", v.index)
fmt.Fprintf(p.w, "#%d ...\n", old.index)
}
if p.notFirst {
fmt.Fprintln(p.w, "")
} else {
p.notFirst = true
}
if os.Getenv("PROGRESS_NO_TRUNC") == "1" {
fmt.Fprintf(p.w, "#%d %s\n", v.index, v.Name)
fmt.Fprintf(p.w, "#%d %s\n", v.index, v.Digest)
} else {
fmt.Fprintf(p.w, "#%d %s\n", v.index, limitString(v.Name, 72))
}
fmt.Fprintf(p.w, "\n#%d %s\n", v.index, limitString(v.Name, 72))
}
if len(v.events) != 0 {
@ -127,18 +144,46 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
}
p.current = dgst
if v.Completed != nil {
p.current = ""
v.count = 0
fmt.Fprintf(p.w, "\n")
if v.Error != "" {
if v.logsPartial {
fmt.Fprintln(p.w, "")
}
if strings.HasSuffix(v.Error, context.Canceled.Error()) {
fmt.Fprintf(p.w, "#%d CANCELED\n", v.index)
} else {
fmt.Fprintf(p.w, "#%d ERROR: %s\n", v.index, v.Error)
}
} else if v.Cached {
fmt.Fprintf(p.w, "#%d CACHED\n", v.index)
} else {
tm := ""
if v.Started != nil {
tm = fmt.Sprintf(" %.1fs", v.Completed.Sub(*v.Started).Seconds())
}
fmt.Fprintf(p.w, "#%d DONE%s\n", v.index, tm)
}
}
delete(t.updates, dgst)
}
func (p *textMux) print(t *trace) {
func sortCompleted(t *trace, m map[digest.Digest]struct{}) []digest.Digest {
out := make([]digest.Digest, 0, len(m))
for k := range m {
out = append(out, k)
}
sort.Slice(out, func(i, j int) bool {
return t.byDigest[out[i]].Completed.Before(*t.byDigest[out[j]].Completed)
})
return out
}
func (p *textMux) print(t *trace) {
completed := map[digest.Digest]struct{}{}
rest := map[digest.Digest]struct{}{}
@ -161,7 +206,7 @@ func (p *textMux) print(t *trace) {
p.printVtx(t, current)
}
for dgst := range completed {
for _, dgst := range sortCompleted(t, completed) {
if dgst != current {
p.printVtx(t, dgst)
}

View File

@ -1,71 +0,0 @@
github.com/pkg/errors v0.8.0
go.etcd.io/bbolt v1.3.1-etcd.8
github.com/stretchr/testify v1.1.4
github.com/davecgh/go-spew v1.1.0
github.com/pmezard/go-difflib v1.0.0
golang.org/x/sys 1b2967e3c290b7c545b3db0deeda16e9be4f98a2
github.com/containerd/containerd d97a907f7f781c0ab8340877d8e6b53cc7f1c2f6
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
github.com/sirupsen/logrus v1.0.0
google.golang.org/grpc v1.12.0
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd
github.com/gogo/protobuf v1.0.0
github.com/gogo/googleapis b23578765ee54ff6bceff57f397d833bf4ca6869
github.com/golang/protobuf v1.1.0
github.com/containerd/continuity f44b615e492bdfb371aae2f76ec694d9da1db537
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc 20aff4f0488c6d4b8df4d85b4f63f1f704c11abd
github.com/Microsoft/go-winio v0.4.11
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 # v1.0.1-45-geba862d
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
github.com/Microsoft/hcsshim v0.7.3
golang.org/x/crypto 0709b304e793a5edb4a2c0145f281ecdc20838a4
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
github.com/docker/go-units v0.3.1
github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
github.com/docker/docker 71cd53e4a197b303c6ba086bd584ffd67a884281
github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
github.com/tonistiigi/fsutil f567071bed2416e4d87d260d3162722651182317
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
github.com/docker/distribution 30578ca32960a4d368bf6db67b0a33c2a1f3dc6f
github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
github.com/docker/cli 99576756eb3303b7af8102c502f21a912e3c1af6 https://github.com/tonistiigi/docker-cli.git
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/libnetwork 36d3bed0e9f4b3c8c66df9bd45278bb90b33e911
github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005
github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
github.com/uber/jaeger-client-go e02c85f9069ea625a96fc4e1afb5e9ac6c569a6d
github.com/apache/thrift b2a4d4ae21c789b689dd162deb819665567f481c
github.com/uber/jaeger-lib c48167d9cae5887393dd5e61efd06a4a48b7fbb3
github.com/codahale/hdrhistogram f8ad88b59a584afeee9d334eff879b104439117b
github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
# used by dockerfile tests
gotest.tools v2.1.0
github.com/google/go-cmp v0.2.0
# used by rootless spec conv test
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0

View File

@ -26,7 +26,7 @@ type DiskWriterOpt struct {
Filter FilterFunc
}
type FilterFunc func(*types.Stat) bool
type FilterFunc func(string, *types.Stat) bool
type DiskWriter struct {
opt DiskWriterOpt
@ -84,6 +84,12 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
destPath := filepath.Join(dw.dest, filepath.FromSlash(p))
if kind == ChangeKindDelete {
if dw.filter != nil {
var empty types.Stat
if ok := dw.filter(p, &empty); !ok {
return nil
}
}
// todo: no need to validate if diff is trusted but is it always?
if err := os.RemoveAll(destPath); err != nil {
return errors.Wrapf(err, "failed to remove: %s", destPath)
@ -104,7 +110,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
statCopy := *stat
if dw.filter != nil {
if ok := dw.filter(&statCopy); !ok {
if ok := dw.filter(p, &statCopy); !ok {
return nil
}
}

View File

@ -3,9 +3,11 @@ package fsutil
import (
"context"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
"github.com/pkg/errors"
@ -37,36 +39,80 @@ func (fs *fs) Open(p string) (io.ReadCloser, error) {
return os.Open(filepath.Join(fs.root, p))
}
func SubDirFS(fs FS, stat types.Stat) FS {
return &subDirFS{fs: fs, stat: stat}
type Dir struct {
Stat types.Stat
FS FS
}
func SubDirFS(dirs []Dir) (FS, error) {
sort.Slice(dirs, func(i, j int) bool {
return dirs[i].Stat.Path < dirs[j].Stat.Path
})
m := map[string]Dir{}
for _, d := range dirs {
if path.Base(d.Stat.Path) != d.Stat.Path {
return nil, errors.Errorf("subdir %s must be single file", d.Stat.Path)
}
if _, ok := m[d.Stat.Path]; ok {
return nil, errors.Errorf("invalid path %s", d.Stat.Path)
}
m[d.Stat.Path] = d
}
return &subDirFS{m: m, dirs: dirs}, nil
}
type subDirFS struct {
fs FS
stat types.Stat
m map[string]Dir
dirs []Dir
}
func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error {
main := &StatInfo{Stat: &fs.stat}
if !main.IsDir() {
return errors.Errorf("fs subdir not mode directory")
}
if main.Name() != fs.stat.Path {
return errors.Errorf("subdir path must be single file")
}
if err := fn(fs.stat.Path, main, nil); err != nil {
return err
}
return fs.fs.Walk(ctx, func(p string, fi os.FileInfo, err error) error {
stat, ok := fi.Sys().(*types.Stat)
if !ok {
return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p)
for _, d := range fs.dirs {
fi := &StatInfo{Stat: &d.Stat}
if !fi.IsDir() {
return errors.Errorf("fs subdir %s not mode directory", d.Stat.Path)
}
stat.Path = path.Join(fs.stat.Path, stat.Path)
return fn(filepath.Join(fs.stat.Path, p), &StatInfo{stat}, nil)
})
if err := fn(d.Stat.Path, fi, nil); err != nil {
return err
}
if err := d.FS.Walk(ctx, func(p string, fi os.FileInfo, err error) error {
stat, ok := fi.Sys().(*types.Stat)
if !ok {
return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p)
}
stat.Path = path.Join(d.Stat.Path, stat.Path)
if stat.Linkname != "" {
if fi.Mode()&os.ModeSymlink != 0 {
if strings.HasPrefix(stat.Linkname, "/") {
stat.Linkname = path.Join("/"+d.Stat.Path, stat.Linkname)
}
} else {
stat.Linkname = path.Join(d.Stat.Path, stat.Linkname)
}
}
return fn(filepath.Join(d.Stat.Path, p), &StatInfo{stat}, nil)
}); err != nil {
return err
}
}
return nil
}
func (fs *subDirFS) Open(p string) (io.ReadCloser, error) {
return fs.fs.Open(strings.TrimPrefix(p, fs.stat.Path+"/"))
parts := strings.SplitN(filepath.Clean(p), string(filepath.Separator), 2)
if len(parts) == 0 {
return ioutil.NopCloser(&emptyReader{}), nil
}
d, ok := fs.m[parts[0]]
if !ok {
return nil, os.ErrNotExist
}
return d.FS.Open(parts[1])
}
type emptyReader struct {
}
func (*emptyReader) Read([]byte) (int, error) {
return 0, io.EOF
}

View File

@ -46,14 +46,18 @@ func setUnixOpt(fi os.FileInfo, stat *types.Stat, path string, seenFiles map[uin
}
ino := s.Ino
linked := false
if seenFiles != nil {
if s.Nlink > 1 {
if oldpath, ok := seenFiles[ino]; ok {
stat.Linkname = oldpath
stat.Size_ = 0
linked = true
}
}
seenFiles[ino] = path
if !linked {
seenFiles[ino] = path
}
}
}
}

72
vendor/github.com/tonistiigi/fsutil/tarwriter.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package fsutil
import (
"archive/tar"
"context"
"io"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
)
func WriteTar(ctx context.Context, fs FS, w io.Writer) error {
tw := tar.NewWriter(w)
err := fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error {
stat, ok := fi.Sys().(*types.Stat)
if !ok {
return errors.Wrapf(err, "invalid fileinfo without stat info: %s", path)
}
hdr, err := tar.FileInfoHeader(fi, stat.Linkname)
if err != nil {
return err
}
name := filepath.ToSlash(path)
if fi.IsDir() && !strings.HasSuffix(name, "/") {
name += "/"
}
hdr.Name = name
hdr.Uid = int(stat.Uid)
hdr.Gid = int(stat.Gid)
hdr.Devmajor = stat.Devmajor
hdr.Devminor = stat.Devminor
hdr.Linkname = stat.Linkname
if hdr.Linkname != "" {
hdr.Size = 0
hdr.Typeflag = tar.TypeLink
}
if len(stat.Xattrs) > 0 {
hdr.PAXRecords = map[string]string{}
}
for k, v := range stat.Xattrs {
hdr.PAXRecords["SCHILY.xattr."+k] = string(v)
}
if err := tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write file header")
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 && hdr.Linkname == "" {
rc, err := fs.Open(path)
if err != nil {
return err
}
if _, err := io.Copy(tw, rc); err != nil {
return err
}
if err := rc.Close(); err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
return tw.Close()
}

View File

@ -19,7 +19,7 @@ type WalkOpt struct {
// FollowPaths contains symlinks that are resolved into include patterns
// before performing the fs walk
FollowPaths []string
Map func(*types.Stat) bool
Map FilterFunc
}
func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error {
@ -157,7 +157,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
return ctx.Err()
default:
if opt != nil && opt.Map != nil {
if allowed := opt.Map(stat); !allowed {
if allowed := opt.Map(stat.Path, stat); !allowed {
return nil
}
}