Merge pull request #2641 from thaJeztah/bump_docker

vendor: update docker and buildkit
This commit is contained in:
Sebastiaan van Stijn 2020-07-21 16:49:04 +02:00 committed by GitHub
commit 280e7230d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 3207 additions and 703 deletions

View File

@ -12,7 +12,7 @@ github.com/creack/pty 3a6a957789163cacdfe0e291617a
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
github.com/docker/compose-on-kubernetes 78e6a00beda64ac8ccb9fec787e601fe2ce0d5bb # v0.5.0-alpha1
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
github.com/docker/docker aaf470eca7b588aa19e6681bff8bf08d17be1bf2
github.com/docker/docker 78e6ffd279b627ebba046b9675ff4849091d9cc3
github.com/docker/docker-credential-helpers 54f0238b6bf101fc3ad3b34114cb5520beb562f5 # v0.6.3
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 # Contains a customized version of canonical/json and is used by Notary. The package is periodically rebased on current Go versions.
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
@ -31,6 +31,7 @@ github.com/google/gofuzz 24818f796faf91cd76ec7bddd724
github.com/google/shlex e7afc7fbc51079733e9468cdfd1efcd7d196cd1d
github.com/googleapis/gnostic 7c663266750e7d82587642f65e60bc4083f1f84e # v0.2.0
github.com/gorilla/mux 75dcda0896e109a2a22c9315bca3bb21b87b2ba5 # v1.7.4
github.com/grpc-ecosystem/go-grpc-middleware 3c51f7f332123e8be5a157c0802a228ac85bf9db # v1.2.0
github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/hashicorp/golang-lru 7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3
@ -44,7 +45,7 @@ github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030
github.com/Microsoft/hcsshim 5bc557dd210ff2caf615e6e22d398123de77fc11 # v0.8.9
github.com/miekg/pkcs11 210dc1e16747c5ba98a03bcbcf728c38086ea357 # v1.0.3
github.com/mitchellh/mapstructure d16e9488127408e67948eb43b6d3fbb9f222da10 # v1.3.2
github.com/moby/buildkit ae7ff7174f73bcb4df89b97e1623b3fb0bfb0a0c
github.com/moby/buildkit df35e9818d1f9066e616e03f4b8d727c97562e5b
github.com/moby/sys 6154f11e6840c0d6b0dbb23f4125a6134b3013c9 # mountinfo/v0.1.3
github.com/moby/term 73f35e472e8f0a3f91347164138ce6bd73b756a9
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3

View File

@ -203,15 +203,16 @@ type Info struct {
// LiveRestoreEnabled determines whether containers should be kept
// running when the daemon is shutdown or upon daemon start if
// running containers are detected
LiveRestoreEnabled bool
Isolation container.Isolation
InitBinary string
ContainerdCommit Commit
RuncCommit Commit
InitCommit Commit
SecurityOptions []string
ProductLicense string `json:",omitempty"`
Warnings []string
LiveRestoreEnabled bool
Isolation container.Isolation
InitBinary string
ContainerdCommit Commit
RuncCommit Commit
InitCommit Commit
SecurityOptions []string
ProductLicense string `json:",omitempty"`
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
Warnings []string
}
// KeyValue holds a key/value pair
@ -219,6 +220,12 @@ type KeyValue struct {
Key, Value string
}
// NetworkAddressPool is a temp struct used by Info struct
type NetworkAddressPool struct {
Base string
Size int
}
// SecurityOpt contains the name and options of a security option
type SecurityOpt struct {
Name string
@ -511,6 +518,16 @@ type Checkpoint struct {
type Runtime struct {
Path string `json:"path"`
Args []string `json:"runtimeArgs,omitempty"`
// This is exposed here only for internal use
// It is not currently supported to specify custom shim configs
Shim *ShimConfig `json:"-"`
}
// ShimConfig is used by runtime to configure containerd shims
type ShimConfig struct {
Binary string
Opts interface{}
}
// DiskUsage contains response of Engine API:

View File

@ -1,4 +1,4 @@
// +build linux freebsd openbsd darwin solaris illumos
// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly
package client // import "github.com/docker/docker/client"

View File

@ -27,17 +27,6 @@ import (
"github.com/sirupsen/logrus"
)
var unpigzPath string
func init() {
if path, err := exec.LookPath("unpigz"); err != nil {
logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library")
} else {
logrus.Debugf("Using unpigz binary found at path %s", path)
unpigzPath = path
}
}
type (
// Compression is the state represents if compressed or not.
Compression int
@ -158,19 +147,30 @@ func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error)
}
func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
if unpigzPath == "" {
noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ")
var noPigz bool
if noPigzEnv != "" {
var err error
noPigz, err = strconv.ParseBool(noPigzEnv)
if err != nil {
logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var")
}
}
if noPigz {
logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv)
return gzip.NewReader(buf)
}
disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ")
if disablePigzEnv != "" {
if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil {
return nil, err
} else if disablePigz {
return gzip.NewReader(buf)
}
unpigzPath, err := exec.LookPath("unpigz")
if err != nil {
logrus.Debugf("unpigz binary not found, falling back to go gzip library")
return gzip.NewReader(buf)
}
logrus.Debugf("Using %s to decompress", unpigzPath)
return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
}

View File

@ -10,9 +10,9 @@ import (
"strings"
"syscall"
"github.com/containerd/containerd/sys"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
"golang.org/x/sys/unix"
)
@ -81,7 +81,7 @@ func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
if rsystem.RunningInUserNS() {
if sys.RunningInUserNS() {
// cannot create a device if running in user namespace
return nil
}

View File

@ -1,5 +1,5 @@
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/Microsoft/hcsshim 5bc557dd210ff2caf615e6e22d398123de77fc11 # v0.8.9
github.com/Microsoft/hcsshim 9dcb42f100215f8d375b4a9265e5bba009217a85 # moby branch
github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
github.com/golang/gddo 72a348e765d293ed6d1ded7b699591f14d6cd921
@ -10,7 +10,6 @@ github.com/moby/term 73f35e472e8f0a3f91347164138c
github.com/creack/pty 3a6a957789163cacdfe0e291617a1c8e80612c11 # v1.1.9
github.com/konsorten/go-windows-terminal-sequences edb144dfd453055e1e49a3d8b410a660b5a87613 # v1.0.3
github.com/mattn/go-shellwords 36a9b3c57cb5caa559ff63fb7e9b585f1c00df75 # v1.0.6
github.com/sirupsen/logrus 60c74ad9be0d874af0ab0daef6ab07c5c5911f0d # v1.6.0
github.com/tchap/go-patricia a7f0089c6f496e8e70402f61733606daa326cac5 # v2.3.0
golang.org/x/net 0de0cce0169b09b364e001f108dc0399ea8630b3
@ -18,7 +17,7 @@ golang.org/x/sys 85ca7c5b95cdf1e557abb38a283d
github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
github.com/moby/sys 6154f11e6840c0d6b0dbb23f4125a6134b3013c9 # mountinfo/v0.1.3
golang.org/x/text 342b2e1fbaa52c93f31447ad2c6abc048c63e475 # v0.3.2
golang.org/x/text 23ae387dee1f90d29a23c0e87ee0b46038fbed0e # v0.3.3
gotest.tools/v3 bb0d8a963040ea5048dcef1a14d8f8b58a33d4b3 # v3.0.2
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
@ -28,7 +27,7 @@ github.com/imdario/mergo 1afb36080aec31e0d1528973ebe6
golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb
# buildkit
github.com/moby/buildkit ae7ff7174f73bcb4df89b97e1623b3fb0bfb0a0c
github.com/moby/buildkit df35e9818d1f9066e616e03f4b8d727c97562e5b
github.com/tonistiigi/fsutil c2c7d7b0e1441705cd802e5699c0a10b1dfe39fd
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
@ -36,11 +35,12 @@ github.com/google/shlex e7afc7fbc51079733e9468cdfd1e
github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
github.com/gofrs/flock 392e7fae8f1b0bdbd67dad7237d23f618feb6dbb # v0.7.1
github.com/grpc-ecosystem/go-grpc-middleware 3c51f7f332123e8be5a157c0802a228ac85bf9db # v1.2.0
# libnetwork
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy.installer accordingly
github.com/docker/libnetwork 2e24aed516bd5c836e11378bb457dd612aa868ed
github.com/docker/libnetwork 9e99af28df21367340c95a3863e31808d689c92a
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@ -168,7 +168,7 @@ github.com/morikuni/aec 39771216ff4c63d11f5e604076f9
# metrics
github.com/docker/go-metrics b619b3592b65de4f087d9f16863a7e6ff905973c # v0.0.1
github.com/opencontainers/selinux 0d49ba2a6aae052c614dfe5de62a158711a6c461 # v1.5.1
github.com/opencontainers/selinux c688bba66d7ecb448819836b96f9c416da8b0746 # v1.5.2
# archive/tar

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,85 @@
# Go gRPC Middleware
[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware.svg?branch=master)](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware)
[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware)
[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware)
[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge)
[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE)
[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
## Middleware
[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs)
that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement
common patterns: auth, logging, message, validation, retries or monitoring.
These are generic building blocks that make it easy to build multiple microservices easily.
The purpose of this repository is to act as a go-to point for such reusable functionality. It contains
some of them itself, but also will link to useful external repos.
`grpc_middleware` itself provides support for chaining interceptors, here's an example:
```go
import "github.com/grpc-ecosystem/go-grpc-middleware"
myServer := grpc.NewServer(
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_ctxtags.StreamServerInterceptor(),
grpc_opentracing.StreamServerInterceptor(),
grpc_prometheus.StreamServerInterceptor,
grpc_zap.StreamServerInterceptor(zapLogger),
grpc_auth.StreamServerInterceptor(myAuthFunction),
grpc_recovery.StreamServerInterceptor(),
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_ctxtags.UnaryServerInterceptor(),
grpc_opentracing.UnaryServerInterceptor(),
grpc_prometheus.UnaryServerInterceptor,
grpc_zap.UnaryServerInterceptor(zapLogger),
grpc_auth.UnaryServerInterceptor(myAuthFunction),
grpc_recovery.UnaryServerInterceptor(),
)),
)
```
## Interceptors
*Please send a PR to add new interceptors or middleware to this list*
#### Auth
* [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware
#### Logging
* [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
* [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
* [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
* [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers.
#### Monitoring
* [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
* [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
* [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
#### Client
* [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
#### Server
* [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
* [`grpc_recovery`](recovery/) - turn panics into gRPC errors
* [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
## Status
This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io).
Additional tooling will be added, and contributions are welcome.
## License
`go-grpc-middleware` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.

View File

@ -0,0 +1,120 @@
// Copyright 2016 Michal Witkowski. All Rights Reserved.
// See LICENSE for licensing terms.
// gRPC Server Interceptor chaining middleware.
package grpc_middleware
import (
"context"
"google.golang.org/grpc"
)
// ChainUnaryServer creates a single interceptor out of a chain of many interceptors.
//
// Execution is done in left-to-right order, including passing of context.
// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
// will see context changes of one and two.
func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
n := len(interceptors)
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler {
return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
return currentInter(currentCtx, currentReq, info, currentHandler)
}
}
chainedHandler := handler
for i := n - 1; i >= 0; i-- {
chainedHandler = chainer(interceptors[i], chainedHandler)
}
return chainedHandler(ctx, req)
}
}
// ChainStreamServer creates a single interceptor out of a chain of many interceptors.
//
// Execution is done in left-to-right order, including passing of context.
// For example ChainUnaryServer(one, two, three) will execute one before two before three.
// If you want to pass context between interceptors, use WrapServerStream.
func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
n := len(interceptors)
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler {
return func(currentSrv interface{}, currentStream grpc.ServerStream) error {
return currentInter(currentSrv, currentStream, info, currentHandler)
}
}
chainedHandler := handler
for i := n - 1; i >= 0; i-- {
chainedHandler = chainer(interceptors[i], chainedHandler)
}
return chainedHandler(srv, ss)
}
}
// ChainUnaryClient creates a single interceptor out of a chain of many interceptors.
//
// Execution is done in left-to-right order, including passing of context.
// For example ChainUnaryClient(one, two, three) will execute one before two before three.
func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
n := len(interceptors)
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker {
return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...)
}
}
chainedInvoker := invoker
for i := n - 1; i >= 0; i-- {
chainedInvoker = chainer(interceptors[i], chainedInvoker)
}
return chainedInvoker(ctx, method, req, reply, cc, opts...)
}
}
// ChainStreamClient creates a single interceptor out of a chain of many interceptors.
//
// Execution is done in left-to-right order, including passing of context.
// For example ChainStreamClient(one, two, three) will execute one before two before three.
func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
n := len(interceptors)
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer {
return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...)
}
}
chainedStreamer := streamer
for i := n - 1; i >= 0; i-- {
chainedStreamer = chainer(interceptors[i], chainedStreamer)
}
return chainedStreamer(ctx, desc, cc, method, opts...)
}
}
// Chain creates a single interceptor out of a chain of many interceptors.
//
// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors.
// Basically syntactic sugar.
func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption {
return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...))
}
// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors.
// Basically syntactic sugar.
func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption {
return grpc.StreamInterceptor(ChainStreamServer(interceptors...))
}

View File

@ -0,0 +1,69 @@
// Copyright 2016 Michal Witkowski. All Rights Reserved.
// See LICENSE for licensing terms.
/*
`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools.
Middleware
gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the
upstream gRPC codebase is relatively bare bones.
This package, and most of its child packages provides commonly needed middleware for gRPC:
client-side interceptors for retires, server-side interceptors for input validation and auth,
functions for chaining said interceptors, metadata convenience methods and more.
Chaining
By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on
the server side. `grpc_middleware` provides convenient chaining methods
Simple way of turning a multiple interceptors into a single interceptor. Here's an example for
server chaining:
myServer := grpc.NewServer(
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary),
)
These interceptors will be executed from left to right: logging, monitoring and auth.
Here's an example for client side chaining:
clientConn, err = grpc.Dial(
address,
grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)),
grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)),
)
client = pb_testproto.NewTestServiceClient(clientConn)
resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
These interceptors will be executed from left to right: monitoring and then retry logic.
The retry interceptor will call every interceptor that follows it whenever when a retry happens.
Writing Your Own
Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting
bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design.
For example, you may want to pass the identity of the caller from the auth interceptor all the way
to the handling function.
For example, a client side interceptor example for auth looks like:
func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
newCtx := context.WithValue(ctx, "user_id", "john@example.com")
return handler(newCtx, req)
}
Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within
the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is
needed. For example:
func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
newStream := grpc_middleware.WrapServerStream(stream)
newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com")
return handler(srv, stream)
}
*/
package grpc_middleware

View File

@ -0,0 +1,22 @@
module github.com/grpc-ecosystem/go-grpc-middleware
require (
github.com/go-kit/kit v0.9.0
github.com/go-logfmt/logfmt v0.4.0 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/gogo/protobuf v1.2.1
github.com/golang/protobuf v1.3.2
github.com/opentracing/opentracing-go v1.1.0
github.com/pkg/errors v0.8.1 // indirect
github.com/sirupsen/logrus v1.4.2
github.com/stretchr/testify v1.4.0
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect
google.golang.org/grpc v1.19.0
)
go 1.13

View File

@ -0,0 +1,30 @@
// Copyright 2016 Michal Witkowski. All Rights Reserved.
// See LICENSE for licensing terms.
package grpc_middleware
import (
"context"
"google.golang.org/grpc"
)
// WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context.
type WrappedServerStream struct {
grpc.ServerStream
// WrappedContext is the wrapper's own Context. You can assign it.
WrappedContext context.Context
}
// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context()
func (w *WrappedServerStream) Context() context.Context {
return w.WrappedContext
}
// WrapServerStream returns a ServerStream that has the ability to overwrite context.
func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream {
if existing, ok := stream.(*WrappedServerStream); ok {
return existing
}
return &WrappedServerStream{ServerStream: stream, WrappedContext: stream.Context()}
}

View File

@ -86,6 +86,7 @@ BuildKit is used by the following projects:
- [PouchContainer](https://github.com/alibaba/pouch)
- [Docker buildx](https://github.com/docker/buildx)
- [Okteto Cloud](https://okteto.com/)
- [Earthly earthfiles](https://github.com/vladaionescu/earthly)
## Quick start
@ -148,6 +149,7 @@ Currently, the following high-level languages has been implemented for LLB:
- [Buildpacks](https://github.com/tonistiigi/buildkit-pack)
- [Mockerfile](https://matt-rickard.com/building-a-new-dockerfile-frontend/)
- [Gockerfile](https://github.com/po3rin/gockerfile)
- [bldr (Pkgfile)](https://github.com/talos-systems/bldr/)
- (open a PR to add your own language)
### Exploring Dockerfiles

View File

@ -8,12 +8,14 @@ import (
"net"
"time"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/connhelper"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/util/appdefaults"
"github.com/moby/buildkit/util/grpcerrors"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
@ -31,6 +33,10 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
gopts := []grpc.DialOption{}
needDialer := true
needWithInsecure := true
var unary []grpc.UnaryClientInterceptor
var stream []grpc.StreamClientInterceptor
for _, o := range opts {
if _, ok := o.(*withFailFast); ok {
gopts = append(gopts, grpc.FailOnNonTempDialError(true))
@ -44,9 +50,8 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
needWithInsecure = false
}
if wt, ok := o.(*withTracer); ok {
gopts = append(gopts,
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())),
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)))
unary = append(unary, otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads()))
stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))
}
if wd, ok := o.(*withDialer); ok {
gopts = append(gopts, grpc.WithDialer(wd.dialer))
@ -68,6 +73,22 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
if address == "" {
address = appdefaults.Address
}
unary = append(unary, grpcerrors.UnaryClientInterceptor)
stream = append(stream, grpcerrors.StreamClientInterceptor)
if len(unary) == 1 {
gopts = append(gopts, grpc.WithUnaryInterceptor(unary[0]))
} else if len(unary) > 1 {
gopts = append(gopts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...)))
}
if len(stream) == 1 {
gopts = append(gopts, grpc.WithStreamInterceptor(stream[0]))
} else if len(stream) > 1 {
gopts = append(gopts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...)))
}
conn, err := grpc.DialContext(ctx, address, gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)

View File

@ -1,4 +1,5 @@
// Package connhelper provides helpers for connecting to a remote daemon host with custom logic.
// Package connhelper provides helpers for connecting to a remote daemon host
// with custom logic.
package connhelper
import (

View File

@ -61,7 +61,7 @@ func (as *asyncState) Do(ctx context.Context) error {
if err != nil {
select {
case <-ctx.Done():
if errors.Cause(err) == ctx.Err() {
if errors.Is(err, ctx.Err()) {
return res, err
}
default:
@ -85,8 +85,8 @@ type errVertex struct {
func (v *errVertex) Validate(context.Context) error {
return v.err
}
func (v *errVertex) Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
return "", nil, nil, v.err
func (v *errVertex) Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
return "", nil, nil, nil, v.err
}
func (v *errVertex) Output() Output {
return nil

View File

@ -20,6 +20,7 @@ type DefinitionOp struct {
ops map[digest.Digest]*pb.Op
defs map[digest.Digest][]byte
metas map[digest.Digest]pb.OpMetadata
sources map[digest.Digest][]*SourceLocation
platforms map[digest.Digest]*specs.Platform
dgst digest.Digest
index pb.OutputIndex
@ -49,6 +50,38 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) {
platforms[dgst] = platform
}
srcs := map[digest.Digest][]*SourceLocation{}
if def.Source != nil {
sourceMaps := make([]*SourceMap, len(def.Source.Infos))
for i, info := range def.Source.Infos {
var st *State
sdef := info.Definition
if sdef != nil {
op, err := NewDefinitionOp(sdef)
if err != nil {
return nil, err
}
state := NewState(op)
st = &state
}
sourceMaps[i] = NewSourceMap(st, info.Filename, info.Data)
}
for dgst, locs := range def.Source.Locations {
for _, loc := range locs.Locations {
if loc.SourceIndex < 0 || int(loc.SourceIndex) >= len(sourceMaps) {
return nil, errors.Errorf("failed to find source map with index %d", loc.SourceIndex)
}
srcs[digest.Digest(dgst)] = append(srcs[digest.Digest(dgst)], &SourceLocation{
SourceMap: sourceMaps[int(loc.SourceIndex)],
Ranges: loc.Ranges,
})
}
}
}
var index pb.OutputIndex
if dgst != "" {
index = ops[dgst].Inputs[0].Index
@ -59,6 +92,7 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) {
ops: ops,
defs: defs,
metas: def.Metadata,
sources: srcs,
platforms: platforms,
dgst: dgst,
index: index,
@ -110,20 +144,20 @@ func (d *DefinitionOp) Validate(context.Context) error {
return nil
}
func (d *DefinitionOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
func (d *DefinitionOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
if d.dgst == "" {
return "", nil, nil, errors.Errorf("cannot marshal empty definition op")
return "", nil, nil, nil, errors.Errorf("cannot marshal empty definition op")
}
if err := d.Validate(ctx); err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
d.mu.Lock()
defer d.mu.Unlock()
meta := d.metas[d.dgst]
return d.dgst, d.defs[d.dgst], &meta, nil
return d.dgst, d.defs[d.dgst], &meta, d.sources[d.dgst], nil
}

View File

@ -81,7 +81,7 @@ func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Outp
}
m.output = o
}
e.Store(nil, nil, nil)
e.Store(nil, nil, nil, nil)
e.isValidated = false
return m.output
}
@ -124,12 +124,12 @@ func (e *ExecOp) Validate(ctx context.Context) error {
return nil
}
func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
if e.Cached(c) {
return e.Load()
}
if err := e.Validate(ctx); err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
// make sure mounts are sorted
sort.Slice(e.mounts, func(i, j int) bool {
@ -138,7 +138,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
env, err := getEnv(e.base)(ctx)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
if len(e.ssh) > 0 {
@ -161,17 +161,17 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
args, err := getArgs(e.base)(ctx)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
cwd, err := getDir(e.base)(ctx)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
user, err := getUser(e.base)(ctx)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
meta := &pb.Meta{
@ -182,7 +182,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
}
extraHosts, err := getExtraHosts(e.base)(ctx)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
if len(extraHosts) > 0 {
hosts := make([]*pb.HostIP, len(extraHosts))
@ -194,12 +194,12 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
network, err := getNetwork(e.base)(ctx)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
security, err := getSecurity(e.base)(ctx)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
peo := &pb.ExecOp{
@ -252,7 +252,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
if e.constraints.Platform == nil {
p, err := getPlatform(e.base)(ctx)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
e.constraints.Platform = p
}
@ -267,11 +267,11 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
inputIndex := pb.InputIndex(len(pop.Inputs))
if m.source != nil {
if m.tmpfs {
return "", nil, nil, errors.Errorf("tmpfs mounts must use scratch")
return "", nil, nil, nil, errors.Errorf("tmpfs mounts must use scratch")
}
inp, err := m.source.ToInput(ctx, c)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
newInput := true
@ -356,9 +356,9 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
dt, err := pop.Marshal()
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
e.Store(dt, md, c)
e.Store(dt, md, e.constraints.SourceLocations, c)
return e.Load()
}
@ -388,7 +388,7 @@ func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) {
i := 0
for _, m2 := range e.mounts {
if m2.noOutput || m2.readonly || m2.cacheID != "" {
if m2.noOutput || m2.readonly || m2.tmpfs || m2.cacheID != "" {
continue
}
if m == m2 {

View File

@ -649,12 +649,12 @@ func (ms *marshalState) add(fa *FileAction, c *Constraints) (*fileActionState, e
return st, nil
}
func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
if f.Cached(c) {
return f.Load()
}
if err := f.Validate(ctx); err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
addCap(&f.constraints, pb.CapFileBase)
@ -669,7 +669,7 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
state := newMarshalState(ctx)
_, err := state.add(f.action, c)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
pop.Inputs = state.inputs
@ -683,13 +683,13 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
if st.fa.state != nil {
parent, err = st.fa.state.GetDir(ctx)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
}
action, err := st.action.toProtoAction(ctx, parent, st.base)
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
pfo.Actions = append(pfo.Actions, &pb.FileAction{
@ -702,9 +702,9 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
dt, err := pop.Marshal()
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
f.Store(dt, md, c)
f.Store(dt, md, f.constraints.SourceLocations, c)
return f.Load()
}

View File

@ -14,21 +14,24 @@ import (
type Definition struct {
Def [][]byte
Metadata map[digest.Digest]pb.OpMetadata
Source *pb.Source
}
func (def *Definition) ToPB() *pb.Definition {
md := make(map[digest.Digest]pb.OpMetadata)
md := make(map[digest.Digest]pb.OpMetadata, len(def.Metadata))
for k, v := range def.Metadata {
md[k] = v
}
return &pb.Definition{
Def: def.Def,
Source: def.Source,
Metadata: md,
}
}
func (def *Definition) FromPB(x *pb.Definition) {
def.Def = x.Def
def.Source = x.Source
def.Metadata = make(map[digest.Digest]pb.OpMetadata)
for k, v := range x.Metadata {
def.Metadata[k] = v
@ -95,18 +98,20 @@ type MarshalCache struct {
digest digest.Digest
dt []byte
md *pb.OpMetadata
srcs []*SourceLocation
constraints *Constraints
}
func (mc *MarshalCache) Cached(c *Constraints) bool {
return mc.dt != nil && mc.constraints == c
}
func (mc *MarshalCache) Load() (digest.Digest, []byte, *pb.OpMetadata, error) {
return mc.digest, mc.dt, mc.md, nil
func (mc *MarshalCache) Load() (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
return mc.digest, mc.dt, mc.md, mc.srcs, nil
}
func (mc *MarshalCache) Store(dt []byte, md *pb.OpMetadata, c *Constraints) {
func (mc *MarshalCache) Store(dt []byte, md *pb.OpMetadata, srcs []*SourceLocation, c *Constraints) {
mc.digest = digest.FromBytes(dt)
mc.dt = dt
mc.md = md
mc.constraints = c
mc.srcs = srcs
}

View File

@ -44,12 +44,12 @@ func (s *SourceOp) Validate(ctx context.Context) error {
return nil
}
func (s *SourceOp) Marshal(ctx context.Context, constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
func (s *SourceOp) Marshal(ctx context.Context, constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) {
if s.Cached(constraints) {
return s.Load()
}
if err := s.Validate(ctx); err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
if strings.HasPrefix(s.id, "local://") {
@ -74,10 +74,10 @@ func (s *SourceOp) Marshal(ctx context.Context, constraints *Constraints) (diges
dt, err := proto.Marshal()
if err != nil {
return "", nil, nil, err
return "", nil, nil, nil, err
}
s.Store(dt, md, constraints)
s.Store(dt, md, s.constraints.SourceLocations, constraints)
return s.Load()
}

111
vendor/github.com/moby/buildkit/client/llb/sourcemap.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package llb
import (
"context"
"github.com/moby/buildkit/solver/pb"
"github.com/opencontainers/go-digest"
)
type SourceMap struct {
State *State
Definition *Definition
Filename string
Data []byte
}
func NewSourceMap(st *State, filename string, dt []byte) *SourceMap {
return &SourceMap{
State: st,
Filename: filename,
Data: dt,
}
}
func (s *SourceMap) Location(r []*pb.Range) ConstraintsOpt {
return constraintsOptFunc(func(c *Constraints) {
if s == nil {
return
}
c.SourceLocations = append(c.SourceLocations, &SourceLocation{
SourceMap: s,
Ranges: r,
})
})
}
type SourceLocation struct {
SourceMap *SourceMap
Ranges []*pb.Range
}
type sourceMapCollector struct {
maps []*SourceMap
index map[*SourceMap]int
locations map[digest.Digest][]*SourceLocation
}
func newSourceMapCollector() *sourceMapCollector {
return &sourceMapCollector{
index: map[*SourceMap]int{},
locations: map[digest.Digest][]*SourceLocation{},
}
}
func (smc *sourceMapCollector) Add(dgst digest.Digest, ls []*SourceLocation) {
for _, l := range ls {
idx, ok := smc.index[l.SourceMap]
if !ok {
idx = len(smc.maps)
smc.maps = append(smc.maps, l.SourceMap)
}
smc.index[l.SourceMap] = idx
}
smc.locations[dgst] = ls
}
func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) {
s := &pb.Source{
Locations: make(map[string]*pb.Locations),
}
for _, m := range smc.maps {
def := m.Definition
if def == nil && m.State != nil {
var err error
def, err = m.State.Marshal(ctx, co...)
if err != nil {
return nil, err
}
m.Definition = def
}
info := &pb.SourceInfo{
Data: m.Data,
Filename: m.Filename,
}
if def != nil {
info.Definition = def.ToPB()
}
s.Infos = append(s.Infos, info)
}
for dgst, locs := range smc.locations {
pbLocs, ok := s.Locations[dgst.String()]
if !ok {
pbLocs = &pb.Locations{}
}
for _, loc := range locs {
pbLocs.Locations = append(pbLocs.Locations, &pb.Location{
SourceIndex: int32(smc.index[loc.SourceMap]),
Ranges: loc.Ranges,
})
}
s.Locations[dgst.String()] = pbLocs
}
return s, nil
}

View File

@ -24,7 +24,7 @@ type Output interface {
type Vertex interface {
Validate(context.Context) error
Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error)
Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error)
Output() Output
Inputs() []Output
}
@ -124,7 +124,9 @@ func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition,
o.SetConstraintsOption(c)
}
def, err := marshal(ctx, s.Output().Vertex(ctx), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c)
smc := newSourceMapCollector()
def, err := marshal(ctx, s.Output().Vertex(ctx), def, smc, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c)
if err != nil {
return def, err
}
@ -159,23 +161,28 @@ func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition,
}
def.Metadata[dgst] = md
sm, err := smc.Marshal(ctx, co...)
if err != nil {
return nil, err
}
def.Source = sm
return def, nil
}
func marshal(ctx context.Context, v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, c *Constraints) (*Definition, error) {
func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollector, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, c *Constraints) (*Definition, error) {
if _, ok := vertexCache[v]; ok {
return def, nil
}
for _, inp := range v.Inputs() {
var err error
def, err = marshal(ctx, inp.Vertex(ctx), def, cache, vertexCache, c)
def, err = marshal(ctx, inp.Vertex(ctx), def, s, cache, vertexCache, c)
if err != nil {
return def, err
}
}
dgst, dt, opMeta, err := v.Marshal(ctx, c)
dgst, dt, opMeta, sls, err := v.Marshal(ctx, c)
if err != nil {
return def, err
}
@ -186,6 +193,7 @@ func marshal(ctx context.Context, v Vertex, def *Definition, cache map[digest.Di
if _, ok := cache[dgst]; ok {
return def, nil
}
s.Add(dgst, sls)
def.Def = append(def.Def, dt)
cache[dgst] = struct{}{}
return def, nil
@ -367,7 +375,7 @@ func (o *output) ToInput(ctx context.Context, c *Constraints) (*pb.Input, error)
return nil, err
}
}
dgst, _, _, err := o.vertex.Marshal(ctx, c)
dgst, _, _, _, err := o.vertex.Marshal(ctx, c)
if err != nil {
return nil, err
}
@ -514,6 +522,7 @@ type Constraints struct {
Metadata pb.OpMetadata
LocalUniqueID string
Caps *apicaps.CapSet
SourceLocations []*SourceLocation
}
func Platform(p specs.Platform) ConstraintsOpt {

View File

@ -10,11 +10,14 @@ import (
"time"
"github.com/gogo/googleapis/google/rpc"
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/ptypes/any"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/gateway/client"
pb "github.com/moby/buildkit/frontend/gateway/pb"
opspb "github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/grpcerrors"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
fstypes "github.com/tonistiigi/fsutil/types"
@ -29,7 +32,7 @@ type GrpcClient interface {
}
func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
resp, err := c.Ping(ctx, &pb.PingRequest{})
if err != nil {
@ -150,12 +153,12 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
}
}
if retError != nil {
st, _ := status.FromError(errors.Cause(retError))
st, _ := status.FromError(grpcerrors.ToGRPC(retError))
stp := st.Proto()
req.Error = &rpc.Status{
Code: stp.Code,
Message: stp.Message,
// Details: stp.Details,
Details: convertToGogoAny(stp.Details),
}
}
if _, err := c.client.Return(ctx, req); err != nil && retError == nil {
@ -503,7 +506,7 @@ func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, err
return stdioConn(), nil
})
cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure())
cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor))
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create grpc client")
}
@ -589,3 +592,11 @@ func workers() []client.WorkerInfo {
func product() string {
return os.Getenv("BUILDKIT_EXPORTEDPRODUCT")
}
func convertToGogoAny(in []*any.Any) []*gogotypes.Any {
out := make([]*gogotypes.Any, len(in))
for i := range in {
out[i] = &gogotypes.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
}
return out
}

View File

@ -32,6 +32,9 @@ const (
// CapFrontendInputs is a capability to request frontend inputs from the
// LLBBridge GRPC server.
CapFrontendInputs apicaps.CapID = "frontend.inputs"
// CapGatewaySolveMetadata can be used to check if solve calls from gateway reliably return metadata
CapGatewaySolveMetadata apicaps.CapID = "gateway.solve.metadata"
)
func init() {
@ -126,4 +129,11 @@ func init() {
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapGatewaySolveMetadata,
Name: "gateway metadata",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

View File

@ -32,7 +32,7 @@ service LLBBridge {
message Result {
oneof result {
// Deprecated non-array refs.
// Deprecated non-array refs.
string refDeprecated = 1;
RefMapDeprecated refsDeprecated = 2;
@ -67,7 +67,7 @@ message InputsRequest {
}
message InputsResponse {
map<string, pb.Definition> Definitions = 1;
map<string, pb.Definition> Definitions = 1;
}
message ResolveImageConfigRequest {
@ -87,9 +87,9 @@ message SolveRequest {
string Frontend = 2;
map<string, string> FrontendOpt = 3;
// ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0.
// When ImportCacheRefsDeprecated is set, the solver appends
// {.Type = "registry", .Attrs = {"ref": importCacheRef}}
// for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed)
// When ImportCacheRefsDeprecated is set, the solver appends
// {.Type = "registry", .Attrs = {"ref": importCacheRef}}
// for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed)
repeated string ImportCacheRefsDeprecated = 4;
bool allowResultReturn = 5;
bool allowResultArrayRef = 6;

View File

@ -3,24 +3,22 @@ module github.com/moby/buildkit
go 1.13
require (
github.com/AkihiroSuda/containerd-fuse-overlayfs v0.0.0-20200220082720-bb896865146c
github.com/AkihiroSuda/containerd-fuse-overlayfs v0.0.0-20200512015515-32086ef23a5a
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/BurntSushi/toml v0.3.1
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
github.com/Microsoft/hcsshim v0.8.7 // indirect
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 // indirect
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
github.com/containerd/cgroups v0.0.0-20200217135630-d732e370d46d // indirect
github.com/containerd/console v0.0.0-20191219165238-8375c3424e4d
github.com/containerd/cgroups v0.0.0-20200327175542-b44481373989 // indirect
github.com/containerd/console v1.0.0
github.com/containerd/containerd v1.4.0-0
github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41
github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00 // indirect
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b // indirect
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328
github.com/containerd/ttrpc v0.0.0-20200121165050-0be804eadb15 // indirect
github.com/containerd/typeurl v0.0.0-20200205145503-b45ef1f1f737 // indirect
github.com/coreos/go-systemd/v22 v22.0.0
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24
github.com/docker/distribution v0.0.0-20200223014041-6b972e50feee
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v0.0.0
github.com/docker/docker-credential-helpers v0.6.0 // indirect
github.com/docker/go-connections v0.3.0
@ -29,55 +27,52 @@ require (
github.com/gogo/googleapis v1.3.2
github.com/gogo/protobuf v1.3.1
github.com/golang/protobuf v1.3.3
github.com/google/go-cmp v0.3.1
github.com/google/go-cmp v0.4.0
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
github.com/google/uuid v1.1.1 // indirect
github.com/gorilla/mux v1.7.4 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
github.com/hashicorp/go-immutable-radix v1.0.0
github.com/hashicorp/golang-lru v0.5.1
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
github.com/imdario/mergo v0.3.7 // indirect
github.com/imdario/mergo v0.3.9 // indirect
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452
github.com/mitchellh/hashstructure v1.0.0
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4
github.com/opencontainers/runtime-spec v1.0.1
github.com/opencontainers/selinux v1.3.2 // indirect
github.com/opencontainers/runc v1.0.0-rc10
github.com/opencontainers/runtime-spec v1.0.2
github.com/opencontainers/selinux v1.5.1 // indirect
github.com/opentracing-contrib/go-stdlib v0.0.0-20171029140428-b1a47cfbdd75
github.com/opentracing/opentracing-go v0.0.0-20171003133519-1361b9cd60be
github.com/opentracing/opentracing-go v1.1.0
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.2.1
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.4.2
github.com/stretchr/testify v1.4.0
github.com/stretchr/testify v1.5.1
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
github.com/tonistiigi/fsutil v0.0.0-20200326231323-c2c7d7b0e144
github.com/tonistiigi/fsutil v0.0.0-20200512175118-ae3a8d753069
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e
github.com/uber/jaeger-client-go v2.11.2+incompatible
github.com/uber/jaeger-lib v1.2.1 // indirect
github.com/urfave/cli v1.22.2
github.com/vishvananda/netlink v1.0.0 // indirect
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
github.com/vishvananda/netlink v1.1.0 // indirect
go.etcd.io/bbolt v1.3.3
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9
google.golang.org/grpc v1.27.1
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gotest.tools v2.2.0+incompatible
gotest.tools/v3 v3.0.2 // indirect
)
replace github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
replace github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
replace github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55
replace github.com/docker/docker => github.com/docker/docker v1.4.2-0.20200227233006-38f52c9fec82
replace (
github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200512144102-f13ba8f2f2fd
github.com/docker/docker => github.com/docker/docker v17.12.0-ce-rc1.0.20200310163718-4634ce647cf2+incompatible
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
)

View File

@ -4,9 +4,8 @@ import (
"context"
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"github.com/moby/buildkit/util/grpcerrors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string, string, error) {
@ -17,10 +16,10 @@ func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string
Host: host,
})
if err != nil {
if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.Unimplemented {
if grpcerrors.Code(err) == codes.Unimplemented {
return "", "", nil
}
return "", "", errors.WithStack(err)
return "", "", err
}
return resp.Username, resp.Secret, nil
}

View File

@ -41,7 +41,7 @@ type streamWriterCloser struct {
func (wc *streamWriterCloser) Write(dt []byte) (int, error) {
if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil {
// SendMsg return EOF on remote errors
if errors.Cause(err) == io.EOF {
if errors.Is(err, io.EOF) {
if err := errors.WithStack(wc.ClientStream.RecvMsg(struct{}{})); err != nil {
return 0, err
}
@ -105,7 +105,7 @@ func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error {
for {
bm := BytesMessage{}
if err := ds.RecvMsg(&bm); err != nil {
if errors.Cause(err) == io.EOF {
if errors.Is(err, io.EOF) {
return nil
}
return errors.WithStack(err)

View File

@ -255,7 +255,7 @@ func (sp *fsSyncTarget) Register(server *grpc.Server) {
RegisterFileSendServer(server, sp)
}
func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) (err error) {
if sp.outdir != "" {
return syncTargetDiffCopy(stream, sp.outdir)
}
@ -277,7 +277,12 @@ func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
if wc == nil {
return status.Errorf(codes.AlreadyExists, "target already exists")
}
defer wc.Close()
defer func() {
err1 := wc.Close()
if err != nil {
err = err1
}
}()
return writeTargetFile(stream, wc)
}

View File

@ -6,7 +6,9 @@ import (
"sync/atomic"
"time"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/moby/buildkit/util/grpcerrors"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -25,6 +27,9 @@ func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {
}
func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) {
var unary []grpc.UnaryClientInterceptor
var stream []grpc.StreamClientInterceptor
var dialCount int64
dialer := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
if c := atomic.AddInt64(&dialCount, 1); c > 1 {
@ -40,10 +45,23 @@ func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.
if span := opentracing.SpanFromContext(ctx); span != nil {
tracer := span.Tracer()
dialOpts = append(dialOpts,
grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer, traceFilter())),
grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter())),
)
unary = append(unary, otgrpc.OpenTracingClientInterceptor(tracer, traceFilter()))
stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter()))
}
unary = append(unary, grpcerrors.UnaryClientInterceptor)
stream = append(stream, grpcerrors.StreamClientInterceptor)
if len(unary) == 1 {
dialOpts = append(dialOpts, grpc.WithUnaryInterceptor(unary[0]))
} else if len(unary) > 1 {
dialOpts = append(dialOpts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...)))
}
if len(stream) == 1 {
dialOpts = append(dialOpts, grpc.WithStreamInterceptor(stream[0]))
} else if len(stream) > 1 {
dialOpts = append(dialOpts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...)))
}
cc, err := grpc.DialContext(ctx, "", dialOpts...)

View File

@ -4,9 +4,9 @@ import (
"context"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type SecretStore interface {
@ -21,10 +21,10 @@ func GetSecret(ctx context.Context, c session.Caller, id string) ([]byte, error)
ID: id,
})
if err != nil {
if st, ok := status.FromError(errors.Cause(err)); ok && (st.Code() == codes.Unimplemented || st.Code() == codes.NotFound) {
if code := grpcerrors.Code(err); code == codes.Unimplemented || code == codes.NotFound {
return nil, errors.Wrapf(ErrNotFound, "secret %s not found", id)
}
return nil, errors.WithStack(err)
return nil, err
}
return resp.Data, nil
}

View File

@ -31,7 +31,7 @@ func (sp *secretProvider) Register(server *grpc.Server) {
func (sp *secretProvider) GetSecret(ctx context.Context, req *secrets.GetSecretRequest) (*secrets.GetSecretResponse, error) {
dt, err := sp.store.GetSecret(ctx, req.ID)
if err != nil {
if errors.Cause(err) == secrets.ErrNotFound {
if errors.Is(err, secrets.ErrNotFound) {
return nil, status.Errorf(codes.NotFound, err.Error())
}
return nil, err

View File

@ -5,8 +5,10 @@ import (
"net"
"strings"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/grpcerrors"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
@ -45,13 +47,29 @@ type Session struct {
func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) {
id := identity.NewID()
var unary []grpc.UnaryServerInterceptor
var stream []grpc.StreamServerInterceptor
serverOpts := []grpc.ServerOption{}
if span := opentracing.SpanFromContext(ctx); span != nil {
tracer := span.Tracer()
serverOpts = []grpc.ServerOption{
grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter())),
grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer, traceFilter())),
}
unary = append(unary, otgrpc.OpenTracingServerInterceptor(tracer, traceFilter()))
stream = append(stream, otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter()))
}
unary = append(unary, grpcerrors.UnaryServerInterceptor)
stream = append(stream, grpcerrors.StreamServerInterceptor)
if len(unary) == 1 {
serverOpts = append(serverOpts, grpc.UnaryInterceptor(unary[0]))
} else if len(unary) > 1 {
serverOpts = append(serverOpts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unary...)))
}
if len(stream) == 1 {
serverOpts = append(serverOpts, grpc.StreamInterceptor(stream[0]))
} else if len(stream) > 1 {
serverOpts = append(serverOpts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(stream...)))
}
s := &Session{

File diff suppressed because it is too large Load Diff

View File

@ -177,6 +177,42 @@ message OpMetadata {
map<string, bool> caps = 5 [(gogoproto.castkey) = "github.com/moby/buildkit/util/apicaps.CapID", (gogoproto.nullable) = false];
}
// Source is a source mapping description for a file
message Source {
map<string, Locations> locations = 1;
repeated SourceInfo infos = 2;
}
// Locations is a list of ranges with a index to its source map.
message Locations {
repeated Location locations = 1;
}
// Source info contains the shared metadata of a source mapping
message SourceInfo {
string filename = 1;
bytes data = 2;
Definition definition = 3;
}
// Location defines list of areas in to source file
message Location {
int32 sourceIndex = 1;
repeated Range ranges = 2;
}
// Range is an area in the source file
message Range {
Position start = 1 [(gogoproto.nullable) = false];
Position end = 2 [(gogoproto.nullable) = false];
}
// Position is single location in a source file
message Position {
int32 Line = 1;
int32 Character = 2;
}
message ExportCache {
bool Value = 1;
}
@ -200,6 +236,8 @@ message Definition {
// metadata contains metadata for the each of the Op messages.
// A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future.
map<string, OpMetadata> metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Source contains the source mapping information for the vertexes in the definition
Source Source = 3;
}
message HostIP {
@ -302,4 +340,4 @@ message UserOpt {
message NamedUserOpt {
string name = 1;
int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
}
}

View File

@ -43,7 +43,7 @@ func WhiteList(allowed, supported []Entitlement) (Set, error) {
}
if supported != nil {
if !supm.Allowed(e) {
return nil, errors.Errorf("entitlement %s is not allowed", e)
return nil, errors.Errorf("granting entitlement %s is not allowed by build daemon configuration", e)
}
}
m[e] = struct{}{}

View File

@ -35,7 +35,7 @@ func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context)
var backoff time.Duration
for {
v, err = g.do(ctx, key, fn)
if err == nil || errors.Cause(err) != errRetry {
if err == nil || !errors.Is(err, errRetry) {
return v, err
}
// backoff logic

View File

@ -0,0 +1,188 @@
package grpcerrors
import (
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/moby/buildkit/util/stack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type TypedError interface {
ToProto() TypedErrorProto
}
type TypedErrorProto interface {
proto.Message
WrapError(error) error
}
func ToGRPC(err error) error {
if err == nil {
return nil
}
st, ok := AsGRPCStatus(err)
if !ok || st == nil {
st = status.New(Code(err), err.Error())
}
if st.Code() != Code(err) {
pb := st.Proto()
pb.Code = int32(Code(err))
st = status.FromProto(pb)
}
var details []proto.Message
for _, st := range stack.Traces(err) {
details = append(details, st)
}
each(err, func(err error) {
if te, ok := err.(TypedError); ok {
details = append(details, te.ToProto())
}
})
if len(details) > 0 {
if st2, err := st.WithDetails(details...); err == nil {
st = st2
}
}
return st.Err()
}
func Code(err error) codes.Code {
if se, ok := err.(interface {
Code() codes.Code
}); ok {
return se.Code()
}
if se, ok := err.(interface {
GRPCStatus() *status.Status
}); ok {
return se.GRPCStatus().Code()
}
wrapped, ok := err.(interface {
Unwrap() error
})
if ok {
return Code(wrapped.Unwrap())
}
return status.FromContextError(err).Code()
}
func WrapCode(err error, code codes.Code) error {
return &withCode{error: err, code: code}
}
func AsGRPCStatus(err error) (*status.Status, bool) {
if err == nil {
return nil, true
}
if se, ok := err.(interface {
GRPCStatus() *status.Status
}); ok {
return se.GRPCStatus(), true
}
wrapped, ok := err.(interface {
Unwrap() error
})
if ok {
return AsGRPCStatus(wrapped.Unwrap())
}
return nil, false
}
func FromGRPC(err error) error {
if err == nil {
return nil
}
st, ok := status.FromError(err)
if !ok {
return err
}
pb := st.Proto()
n := &spb.Status{
Code: pb.Code,
Message: pb.Message,
}
details := make([]TypedErrorProto, 0, len(pb.Details))
stacks := make([]*stack.Stack, 0, len(pb.Details))
// details that we don't understand are copied as proto
for _, d := range pb.Details {
var m interface{}
detail := &ptypes.DynamicAny{}
if err := ptypes.UnmarshalAny(d, detail); err != nil {
detail := &gogotypes.DynamicAny{}
if err := gogotypes.UnmarshalAny(gogoAny(d), detail); err != nil {
n.Details = append(n.Details, d)
continue
}
m = detail.Message
} else {
m = detail.Message
}
switch v := m.(type) {
case *stack.Stack:
stacks = append(stacks, v)
case TypedErrorProto:
details = append(details, v)
default:
n.Details = append(n.Details, d)
}
}
err = status.FromProto(n).Err()
for _, s := range stacks {
if s != nil {
err = stack.Wrap(err, *s)
}
}
for _, d := range details {
err = d.WrapError(err)
}
return stack.Enable(err)
}
type withCode struct {
code codes.Code
error
}
func (e *withCode) Unwrap() error {
return e.error
}
func each(err error, fn func(error)) {
fn(err)
if wrapped, ok := err.(interface {
Unwrap() error
}); ok {
each(wrapped.Unwrap(), fn)
}
}
func gogoAny(in *any.Any) *gogotypes.Any {
return &gogotypes.Any{
TypeUrl: in.TypeUrl,
Value: in.Value,
}
}

View File

@ -0,0 +1,28 @@
package grpcerrors
import (
"context"
"google.golang.org/grpc"
)
func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
resp, err = handler(ctx, req)
if err != nil {
err = ToGRPC(err)
}
return resp, err
}
func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
return ToGRPC(handler(srv, ss))
}
func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
return FromGRPC(invoker(ctx, method, req, reply, cc, opts...))
}
func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
s, err := streamer(ctx, desc, cc, method, opts...)
return s, ToGRPC(err)
}

View File

@ -548,6 +548,9 @@ func align(l, r string, w int) string {
}
func wrapHeight(j []*job, limit int) []*job {
if limit < 0 {
return nil
}
var wrapped []*job
wrapped = append(wrapped, j...)
if len(j) > limit {

View File

@ -57,11 +57,11 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
p.notFirst = true
}
if os.Getenv("PROGRESS_NO_TRUNC") == "1" {
if os.Getenv("PROGRESS_NO_TRUNC") == "0" {
fmt.Fprintf(p.w, "#%d %s\n", v.index, limitString(v.Name, 72))
} else {
fmt.Fprintf(p.w, "#%d %s\n", v.index, v.Name)
fmt.Fprintf(p.w, "#%d %s\n", v.index, v.Digest)
} else {
fmt.Fprintf(p.w, "#%d %s\n", v.index, limitString(v.Name, 72))
}
}

View File

@ -0,0 +1,3 @@
package stack
//go:generate protoc -I=. -I=../../vendor/ --go_out=. stack.proto

151
vendor/github.com/moby/buildkit/util/stack/stack.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
package stack
import (
"fmt"
io "io"
"os"
"strconv"
"strings"
"github.com/pkg/errors"
)
var version string
var revision string
func SetVersionInfo(v, r string) {
version = v
revision = r
}
func Traces(err error) []*Stack {
var st []*Stack
wrapped, ok := err.(interface {
Unwrap() error
})
if ok {
st = Traces(wrapped.Unwrap())
}
if ste, ok := err.(interface {
StackTrace() errors.StackTrace
}); ok {
st = append(st, convertStack(ste.StackTrace()))
}
if ste, ok := err.(interface {
StackTrace() *Stack
}); ok {
st = append(st, ste.StackTrace())
}
return st
}
func Enable(err error) error {
if err == nil {
return nil
}
if !hasLocalStackTrace(err) {
return errors.WithStack(err)
}
return err
}
func Wrap(err error, s Stack) error {
return &withStack{stack: s, error: err}
}
func hasLocalStackTrace(err error) bool {
wrapped, ok := err.(interface {
Unwrap() error
})
if ok && hasLocalStackTrace(wrapped.Unwrap()) {
return true
}
_, ok = err.(interface {
StackTrace() errors.StackTrace
})
return ok
}
func Formatter(err error) fmt.Formatter {
return &formatter{err}
}
type formatter struct {
error
}
func (w *formatter) Format(s fmt.State, verb rune) {
if w.error == nil {
fmt.Fprintf(s, "%v", w.error)
return
}
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%s\n", w.Error())
for _, stack := range Traces(w.error) {
fmt.Fprintf(s, "%d %s %s\n", stack.Pid, stack.Version, strings.Join(stack.Cmdline, " "))
for _, f := range stack.Frames {
fmt.Fprintf(s, "%s\n\t%s:%d\n", f.Name, f.File, f.Line)
}
fmt.Fprintln(s)
}
return
}
fallthrough
case 's':
io.WriteString(s, w.Error())
case 'q':
fmt.Fprintf(s, "%q", w.Error())
}
}
func convertStack(s errors.StackTrace) *Stack {
var out Stack
for _, f := range s {
dt, err := f.MarshalText()
if err != nil {
continue
}
p := strings.SplitN(string(dt), " ", 2)
if len(p) != 2 {
continue
}
idx := strings.LastIndexByte(p[1], ':')
if idx == -1 {
continue
}
line, err := strconv.Atoi(p[1][idx+1:])
if err != nil {
continue
}
out.Frames = append(out.Frames, &Frame{
Name: p[0],
File: p[1][:idx],
Line: int32(line),
})
}
out.Cmdline = os.Args
out.Pid = int32(os.Getpid())
out.Version = version
out.Revision = revision
return &out
}
type withStack struct {
stack Stack
error
}
func (e *withStack) Unwrap() error {
return e.error
}
func (e *withStack) StackTrace() *Stack {
return &e.stack
}

170
vendor/github.com/moby/buildkit/util/stack/stack.pb.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: stack.proto
package stack
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Stack struct {
Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"`
Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Stack) Reset() { *m = Stack{} }
func (m *Stack) String() string { return proto.CompactTextString(m) }
func (*Stack) ProtoMessage() {}
func (*Stack) Descriptor() ([]byte, []int) {
return fileDescriptor_b44c07feb2ca0a5a, []int{0}
}
func (m *Stack) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Stack.Unmarshal(m, b)
}
func (m *Stack) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Stack.Marshal(b, m, deterministic)
}
func (m *Stack) XXX_Merge(src proto.Message) {
xxx_messageInfo_Stack.Merge(m, src)
}
func (m *Stack) XXX_Size() int {
return xxx_messageInfo_Stack.Size(m)
}
func (m *Stack) XXX_DiscardUnknown() {
xxx_messageInfo_Stack.DiscardUnknown(m)
}
var xxx_messageInfo_Stack proto.InternalMessageInfo
func (m *Stack) GetFrames() []*Frame {
if m != nil {
return m.Frames
}
return nil
}
func (m *Stack) GetCmdline() []string {
if m != nil {
return m.Cmdline
}
return nil
}
func (m *Stack) GetPid() int32 {
if m != nil {
return m.Pid
}
return 0
}
func (m *Stack) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *Stack) GetRevision() string {
if m != nil {
return m.Revision
}
return ""
}
type Frame struct {
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"`
Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Frame) Reset() { *m = Frame{} }
func (m *Frame) String() string { return proto.CompactTextString(m) }
func (*Frame) ProtoMessage() {}
func (*Frame) Descriptor() ([]byte, []int) {
return fileDescriptor_b44c07feb2ca0a5a, []int{1}
}
func (m *Frame) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Frame.Unmarshal(m, b)
}
func (m *Frame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Frame.Marshal(b, m, deterministic)
}
func (m *Frame) XXX_Merge(src proto.Message) {
xxx_messageInfo_Frame.Merge(m, src)
}
func (m *Frame) XXX_Size() int {
return xxx_messageInfo_Frame.Size(m)
}
func (m *Frame) XXX_DiscardUnknown() {
xxx_messageInfo_Frame.DiscardUnknown(m)
}
var xxx_messageInfo_Frame proto.InternalMessageInfo
func (m *Frame) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Frame) GetFile() string {
if m != nil {
return m.File
}
return ""
}
func (m *Frame) GetLine() int32 {
if m != nil {
return m.Line
}
return 0
}
func init() {
proto.RegisterType((*Stack)(nil), "stack.Stack")
proto.RegisterType((*Frame)(nil), "stack.Frame")
}
func init() { proto.RegisterFile("stack.proto", fileDescriptor_b44c07feb2ca0a5a) }
var fileDescriptor_b44c07feb2ca0a5a = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8f, 0x3d, 0xce, 0x82, 0x40,
0x10, 0x86, 0xb3, 0xdf, 0xb2, 0x7c, 0x3a, 0x58, 0x98, 0xa9, 0x36, 0x56, 0x1b, 0x62, 0x41, 0x45,
0xa1, 0x47, 0x30, 0xa1, 0x32, 0x16, 0x78, 0x02, 0x84, 0x35, 0xd9, 0xc8, 0x5f, 0x76, 0x09, 0xd7,
0xf0, 0xca, 0x66, 0x06, 0xb4, 0x7b, 0xde, 0x9f, 0xe4, 0x9d, 0x81, 0x24, 0x4c, 0x55, 0xfd, 0xca,
0x47, 0x3f, 0x4c, 0x03, 0x2a, 0x16, 0xe9, 0x5b, 0x80, 0xba, 0x13, 0xe1, 0x11, 0xe2, 0xa7, 0xaf,
0x3a, 0x1b, 0xb4, 0x30, 0x32, 0x4b, 0x4e, 0xbb, 0x7c, 0xa9, 0x17, 0x64, 0x96, 0x6b, 0x86, 0x1a,
0xfe, 0xeb, 0xae, 0x69, 0x5d, 0x6f, 0xf5, 0x9f, 0x91, 0xd9, 0xb6, 0xfc, 0x4a, 0xdc, 0x83, 0x1c,
0x5d, 0xa3, 0xa5, 0x11, 0x99, 0x2a, 0x09, 0xa9, 0x3b, 0x5b, 0x1f, 0xdc, 0xd0, 0xeb, 0xc8, 0x08,
0xea, 0xae, 0x12, 0x0f, 0xb0, 0xf1, 0x76, 0x76, 0x1c, 0x29, 0x8e, 0x7e, 0x3a, 0xbd, 0x80, 0xe2,
0x49, 0x44, 0x88, 0x6e, 0x55, 0x67, 0xb5, 0xe0, 0x02, 0x33, 0x79, 0x85, 0x6b, 0x69, 0x9b, 0x3d,
0x62, 0xf2, 0xae, 0x74, 0xcf, 0xb2, 0xcc, 0xfc, 0x88, 0xf9, 0xc9, 0xf3, 0x27, 0x00, 0x00, 0xff,
0xff, 0xfd, 0x2c, 0xbb, 0xfb, 0xf3, 0x00, 0x00, 0x00,
}

17
vendor/github.com/moby/buildkit/util/stack/stack.proto generated vendored Normal file
View File

@ -0,0 +1,17 @@
syntax = "proto3";
package stack;
message Stack {
repeated Frame frames = 1;
repeated string cmdline = 2;
int32 pid = 3;
string version = 4;
string revision = 5;
}
message Frame {
string Name = 1;
string File = 2;
int32 Line = 3;
}

View File

@ -1,155 +0,0 @@
// +build linux
package system
import (
"os"
"os/exec"
"syscall" // only for exec
"unsafe"
"github.com/opencontainers/runc/libcontainer/user"
"golang.org/x/sys/unix"
)
// If arg2 is nonzero, set the "child subreaper" attribute of the
// calling process; if arg2 is zero, unset the attribute. When a
// process is marked as a child subreaper, all of the children
// that it creates, and their descendants, will be marked as
// having a subreaper. In effect, a subreaper fulfills the role
// of init(1) for its descendant processes. Upon termination of
// a process that is orphaned (i.e., its immediate parent has
// already terminated) and marked as having a subreaper, the
// nearest still living ancestor subreaper will receive a SIGCHLD
// signal and be able to wait(2) on the process to discover its
// termination status.
const PR_SET_CHILD_SUBREAPER = 36
type ParentDeathSignal int
func (p ParentDeathSignal) Restore() error {
if p == 0 {
return nil
}
current, err := GetParentDeathSignal()
if err != nil {
return err
}
if p == current {
return nil
}
return p.Set()
}
func (p ParentDeathSignal) Set() error {
return SetParentDeathSignal(uintptr(p))
}
func Execv(cmd string, args []string, env []string) error {
name, err := exec.LookPath(cmd)
if err != nil {
return err
}
return syscall.Exec(name, args, env)
}
func Prlimit(pid, resource int, limit unix.Rlimit) error {
_, _, err := unix.RawSyscall6(unix.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0)
if err != 0 {
return err
}
return nil
}
func SetParentDeathSignal(sig uintptr) error {
if err := unix.Prctl(unix.PR_SET_PDEATHSIG, sig, 0, 0, 0); err != nil {
return err
}
return nil
}
func GetParentDeathSignal() (ParentDeathSignal, error) {
var sig int
if err := unix.Prctl(unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0, 0, 0); err != nil {
return -1, err
}
return ParentDeathSignal(sig), nil
}
func SetKeepCaps() error {
if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 1, 0, 0, 0); err != nil {
return err
}
return nil
}
func ClearKeepCaps() error {
if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 0, 0, 0, 0); err != nil {
return err
}
return nil
}
func Setctty() error {
if err := unix.IoctlSetInt(0, unix.TIOCSCTTY, 0); err != nil {
return err
}
return nil
}
// RunningInUserNS detects whether we are currently running in a user namespace.
// Originally copied from github.com/lxc/lxd/shared/util.go
func RunningInUserNS() bool {
uidmap, err := user.CurrentProcessUIDMap()
if err != nil {
// This kernel-provided file only exists if user namespaces are supported
return false
}
return UIDMapInUserNS(uidmap)
}
func UIDMapInUserNS(uidmap []user.IDMap) bool {
/*
* We assume we are in the initial user namespace if we have a full
* range - 4294967295 uids starting at uid 0.
*/
if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 {
return false
}
return true
}
// GetParentNSeuid returns the euid within the parent user namespace
func GetParentNSeuid() int64 {
euid := int64(os.Geteuid())
uidmap, err := user.CurrentProcessUIDMap()
if err != nil {
// This kernel-provided file only exists if user namespaces are supported
return euid
}
for _, um := range uidmap {
if um.ID <= euid && euid <= um.ID+um.Count-1 {
return um.ParentID + euid - um.ID
}
}
return euid
}
// SetSubreaper sets the value i as the subreaper setting for the calling process
func SetSubreaper(i int) error {
return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
}
// GetSubreaper returns the subreaper setting for the calling process
func GetSubreaper() (int, error) {
var i uintptr
if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil {
return -1, err
}
return int(i), nil
}

View File

@ -1,113 +0,0 @@
package system
import (
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
)
// State is the status of a process.
type State rune
const ( // Only values for Linux 3.14 and later are listed here
Dead State = 'X'
DiskSleep State = 'D'
Running State = 'R'
Sleeping State = 'S'
Stopped State = 'T'
TracingStop State = 't'
Zombie State = 'Z'
)
// String forms of the state from proc(5)'s documentation for
// /proc/[pid]/status' "State" field.
func (s State) String() string {
switch s {
case Dead:
return "dead"
case DiskSleep:
return "disk sleep"
case Running:
return "running"
case Sleeping:
return "sleeping"
case Stopped:
return "stopped"
case TracingStop:
return "tracing stop"
case Zombie:
return "zombie"
default:
return fmt.Sprintf("unknown (%c)", s)
}
}
// Stat_t represents the information from /proc/[pid]/stat, as
// described in proc(5) with names based on the /proc/[pid]/status
// fields.
type Stat_t struct {
// PID is the process ID.
PID uint
// Name is the command run by the process.
Name string
// State is the state of the process.
State State
// StartTime is the number of clock ticks after system boot (since
// Linux 2.6).
StartTime uint64
}
// Stat returns a Stat_t instance for the specified process.
func Stat(pid int) (stat Stat_t, err error) {
bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
if err != nil {
return stat, err
}
return parseStat(string(bytes))
}
// GetProcessStartTime is deprecated. Use Stat(pid) and
// Stat_t.StartTime instead.
func GetProcessStartTime(pid int) (string, error) {
stat, err := Stat(pid)
if err != nil {
return "", err
}
return fmt.Sprintf("%d", stat.StartTime), nil
}
func parseStat(data string) (stat Stat_t, err error) {
// From proc(5), field 2 could contain space and is inside `(` and `)`.
// The following is an example:
// 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
i := strings.LastIndex(data, ")")
if i <= 2 || i >= len(data)-1 {
return stat, fmt.Errorf("invalid stat data: %q", data)
}
parts := strings.SplitN(data[:i], "(", 2)
if len(parts) != 2 {
return stat, fmt.Errorf("invalid stat data: %q", data)
}
stat.Name = parts[1]
_, err = fmt.Sscanf(parts[0], "%d", &stat.PID)
if err != nil {
return stat, err
}
// parts indexes should be offset by 3 from the field number given
// proc(5), because parts is zero-indexed and we've removed fields
// one (PID) and two (Name) in the paren-split.
parts = strings.Split(data[i+2:], " ")
var state int
fmt.Sscanf(parts[3-3], "%c", &state)
stat.State = State(state)
fmt.Sscanf(parts[22-3], "%d", &stat.StartTime)
return stat, nil
}

View File

@ -1,26 +0,0 @@
// +build linux
// +build 386 arm
package system
import (
"golang.org/x/sys/unix"
)
// Setuid sets the uid of the calling thread to the specified uid.
func Setuid(uid int) (err error) {
_, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0)
if e1 != 0 {
err = e1
}
return
}
// Setgid sets the gid of the calling thread to the specified gid.
func Setgid(gid int) (err error) {
_, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0)
if e1 != 0 {
err = e1
}
return
}

View File

@ -1,26 +0,0 @@
// +build linux
// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le riscv64 s390x
package system
import (
"golang.org/x/sys/unix"
)
// Setuid sets the uid of the calling thread to the specified uid.
func Setuid(uid int) (err error) {
_, _, e1 := unix.RawSyscall(unix.SYS_SETUID, uintptr(uid), 0, 0)
if e1 != 0 {
err = e1
}
return
}
// Setgid sets the gid of the calling thread to the specified gid.
func Setgid(gid int) (err error) {
_, _, e1 := unix.RawSyscall(unix.SYS_SETGID, uintptr(gid), 0, 0)
if e1 != 0 {
err = e1
}
return
}

View File

@ -1,12 +0,0 @@
// +build cgo,linux
package system
/*
#include <unistd.h>
*/
import "C"
func GetClockTicks() int {
return int(C.sysconf(C._SC_CLK_TCK))
}

View File

@ -1,15 +0,0 @@
// +build !cgo windows
package system
func GetClockTicks() int {
// TODO figure out a better alternative for platforms where we're missing cgo
//
// TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency().
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx
//
// An example of its usage can be found here.
// https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx
return 100
}

View File

@ -1,27 +0,0 @@
// +build !linux
package system
import (
"os"
"github.com/opencontainers/runc/libcontainer/user"
)
// RunningInUserNS is a stub for non-Linux systems
// Always returns false
func RunningInUserNS() bool {
return false
}
// UIDMapInUserNS is a stub for non-Linux systems
// Always returns false
func UIDMapInUserNS(uidmap []user.IDMap) bool {
return false
}
// GetParentNSeuid returns the euid within the parent user namespace
// Always returns os.Geteuid on non-linux
func GetParentNSeuid() int {
return os.Geteuid()
}

View File

@ -1,35 +0,0 @@
package system
import "golang.org/x/sys/unix"
// Returns a []byte slice if the xattr is set and nil otherwise
// Requires path and its attribute as arguments
func Lgetxattr(path string, attr string) ([]byte, error) {
var sz int
// Start with a 128 length byte array
dest := make([]byte, 128)
sz, errno := unix.Lgetxattr(path, attr, dest)
switch {
case errno == unix.ENODATA:
return nil, errno
case errno == unix.ENOTSUP:
return nil, errno
case errno == unix.ERANGE:
// 128 byte array might just not be good enough,
// A dummy buffer is used to get the real size
// of the xattrs on disk
sz, errno = unix.Lgetxattr(path, attr, []byte{})
if errno != nil {
return nil, errno
}
dest = make([]byte, sz)
sz, errno = unix.Lgetxattr(path, attr, dest)
if errno != nil {
return nil, errno
}
case errno != nil:
return nil, errno
}
return dest[:sz], nil
}