mirror of https://github.com/docker/cli.git
vendor: buildkit v0.8.0-rc2, docker
diffs: - full diff:af34b94a78...6c0a036dce
- full diff:4d1f260e84
...v0.8.0-rc2 New dependencies: - go.opencensus.io v0.22.3 - github.com/containerd/typeurl v1.0.1 - github.com/golang/groupcache 869f871628b6baa9cfbc11732cdf6546b17c1298 Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
4cab568abb
commit
9a0a071d55
|
@ -4,8 +4,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/docker/docker/builder/dockerignore"
|
|
||||||
"github.com/docker/docker/pkg/fileutils"
|
"github.com/docker/docker/pkg/fileutils"
|
||||||
|
"github.com/moby/buildkit/frontend/dockerfile/dockerignore"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadDockerignore reads the .dockerignore file in the context directory and
|
// ReadDockerignore reads the .dockerignore file in the context directory and
|
||||||
|
|
|
@ -7,13 +7,14 @@ github.com/containerd/console 5d7e1412f07b502a01029ea20e20
|
||||||
github.com/containerd/containerd 0edc412565dcc6e3d6125ff9e4b009ad4b89c638 # master (v1.5.0-dev)
|
github.com/containerd/containerd 0edc412565dcc6e3d6125ff9e4b009ad4b89c638 # master (v1.5.0-dev)
|
||||||
github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
|
github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
|
||||||
github.com/containerd/cgroups 0b889c03f102012f1d93a97ddd3ef71cd6f4f510
|
github.com/containerd/cgroups 0b889c03f102012f1d93a97ddd3ef71cd6f4f510
|
||||||
|
github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1
|
||||||
github.com/coreos/etcd d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9 # v3.3.12
|
github.com/coreos/etcd d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9 # v3.3.12
|
||||||
github.com/cpuguy83/go-md2man/v2 f79a8a8ca69da163eee19ab442bedad7a35bba5a # v2.0.0
|
github.com/cpuguy83/go-md2man/v2 f79a8a8ca69da163eee19ab442bedad7a35bba5a # v2.0.0
|
||||||
github.com/creack/pty 2a38352e8b4d7ab6c336eef107e42a55e72e7fbc # v1.1.11
|
github.com/creack/pty 2a38352e8b4d7ab6c336eef107e42a55e72e7fbc # v1.1.11
|
||||||
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
|
github.com/davecgh/go-spew 8991bc29aa16c548c550c7ff78260e27b9ab7c73 # v1.1.1
|
||||||
github.com/docker/compose-on-kubernetes 78e6a00beda64ac8ccb9fec787e601fe2ce0d5bb # v0.5.0-alpha1
|
github.com/docker/compose-on-kubernetes 78e6a00beda64ac8ccb9fec787e601fe2ce0d5bb # v0.5.0-alpha1
|
||||||
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
||||||
github.com/docker/docker af34b94a78a194ae9bce48a200241d08c05b7187
|
github.com/docker/docker 6c0a036dce2051cc0d73221965e76e3d2e6a8a3a # master (v20.10.0-dev)
|
||||||
github.com/docker/docker-credential-helpers 54f0238b6bf101fc3ad3b34114cb5520beb562f5 # v0.6.3
|
github.com/docker/docker-credential-helpers 54f0238b6bf101fc3ad3b34114cb5520beb562f5 # v0.6.3
|
||||||
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 # Contains a customized version of canonical/json and is used by Notary. The package is periodically rebased on current Go versions.
|
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 # Contains a customized version of canonical/json and is used by Notary. The package is periodically rebased on current Go versions.
|
||||||
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
|
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0
|
||||||
|
@ -27,6 +28,7 @@ github.com/gofrs/flock 6caa7350c26b838538005fae7dbe
|
||||||
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
|
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
|
||||||
github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
|
github.com/gogo/protobuf 5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
|
||||||
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
||||||
|
github.com/golang/groupcache 869f871628b6baa9cfbc11732cdf6546b17c1298
|
||||||
github.com/golang/protobuf 84668698ea25b64748563aa20726db66a6b8d299 # v1.3.5
|
github.com/golang/protobuf 84668698ea25b64748563aa20726db66a6b8d299 # v1.3.5
|
||||||
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
|
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
|
||||||
github.com/google/gofuzz 24818f796faf91cd76ec7bddd72458fbced7a6c1
|
github.com/google/gofuzz 24818f796faf91cd76ec7bddd72458fbced7a6c1
|
||||||
|
@ -46,7 +48,7 @@ github.com/Microsoft/go-winio 5b44b70ab3ab4d291a7c1d28afe7
|
||||||
github.com/Microsoft/hcsshim 5bc557dd210ff2caf615e6e22d398123de77fc11 # v0.8.9
|
github.com/Microsoft/hcsshim 5bc557dd210ff2caf615e6e22d398123de77fc11 # v0.8.9
|
||||||
github.com/miekg/pkcs11 210dc1e16747c5ba98a03bcbcf728c38086ea357 # v1.0.3
|
github.com/miekg/pkcs11 210dc1e16747c5ba98a03bcbcf728c38086ea357 # v1.0.3
|
||||||
github.com/mitchellh/mapstructure d16e9488127408e67948eb43b6d3fbb9f222da10 # v1.3.2
|
github.com/mitchellh/mapstructure d16e9488127408e67948eb43b6d3fbb9f222da10 # v1.3.2
|
||||||
github.com/moby/buildkit 4d1f260e8490ec438ab66e08bb105577aca0ce06
|
github.com/moby/buildkit fcb87e6b8ccf3631a65799cc56caa76f9117816e # v0.8.0-rc2
|
||||||
github.com/moby/sys 1bc8673b57550ddf85262eb0fed0aac651a37dab # symlink/v0.1.0 (latest tag, either mount/vXXX, mountinfo/vXXX or symlink/vXXX)
|
github.com/moby/sys 1bc8673b57550ddf85262eb0fed0aac651a37dab # symlink/v0.1.0 (latest tag, either mount/vXXX, mountinfo/vXXX or symlink/vXXX)
|
||||||
github.com/moby/term bea5bbe245bf407372d477f1361d2ff042d2f556
|
github.com/moby/term bea5bbe245bf407372d477f1361d2ff042d2f556
|
||||||
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
|
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
|
||||||
|
@ -73,6 +75,7 @@ github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e
|
||||||
github.com/xeipuuv/gojsonpointer 02993c407bfbf5f6dae44c4f4b1cf6a39b5fc5bb
|
github.com/xeipuuv/gojsonpointer 02993c407bfbf5f6dae44c4f4b1cf6a39b5fc5bb
|
||||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||||
github.com/xeipuuv/gojsonschema 82fcdeb203eb6ab2a67d0a623d9c19e5e5a64927 # v1.2.0
|
github.com/xeipuuv/gojsonschema 82fcdeb203eb6ab2a67d0a623d9c19e5e5a64927 # v1.2.0
|
||||||
|
go.opencensus.io d835ff86be02193d324330acdb7d65546b05f814 # v0.22.3
|
||||||
golang.org/x/crypto c1f2f97bffc9c53fc40a1a28a5b460094c0050d9
|
golang.org/x/crypto c1f2f97bffc9c53fc40a1a28a5b460094c0050d9
|
||||||
golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7
|
golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7
|
||||||
golang.org/x/oauth2 bf48bf16ab8d622ce64ec6ce98d2c98f916b6303
|
golang.org/x/oauth2 bf48bf16ab8d622ce64ec6ce98d2c98f916b6303
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaults
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultMaxRecvMsgSize defines the default maximum message size for
|
||||||
|
// receiving protobufs passed over the GRPC API.
|
||||||
|
DefaultMaxRecvMsgSize = 16 << 20
|
||||||
|
// DefaultMaxSendMsgSize defines the default maximum message size for
|
||||||
|
// sending protobufs passed over the GRPC API.
|
||||||
|
DefaultMaxSendMsgSize = 16 << 20
|
||||||
|
// DefaultRuntimeNSLabel defines the namespace label to check for the
|
||||||
|
// default runtime
|
||||||
|
DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
|
||||||
|
// DefaultSnapshotterNSLabel defines the namespace label to check for the
|
||||||
|
// default snapshotter
|
||||||
|
DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
|
||||||
|
)
|
|
@ -0,0 +1,37 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaults
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultRootDir is the default location used by containerd to store
|
||||||
|
// persistent data
|
||||||
|
DefaultRootDir = "/var/lib/containerd"
|
||||||
|
// DefaultStateDir is the default location used by containerd to store
|
||||||
|
// transient data
|
||||||
|
DefaultStateDir = "/run/containerd"
|
||||||
|
// DefaultAddress is the default unix socket address
|
||||||
|
DefaultAddress = "/run/containerd/containerd.sock"
|
||||||
|
// DefaultDebugAddress is the default unix socket address for pprof data
|
||||||
|
DefaultDebugAddress = "/run/containerd/debug.sock"
|
||||||
|
// DefaultFIFODir is the default location used by client-side cio library
|
||||||
|
// to store FIFOs.
|
||||||
|
DefaultFIFODir = "/run/containerd/fifo"
|
||||||
|
// DefaultRuntime is the default linux runtime
|
||||||
|
DefaultRuntime = "io.containerd.runc.v2"
|
||||||
|
)
|
45
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
Normal file
45
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package defaults
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultRootDir is the default location used by containerd to store
|
||||||
|
// persistent data
|
||||||
|
DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
|
||||||
|
// DefaultStateDir is the default location used by containerd to store
|
||||||
|
// transient data
|
||||||
|
DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultAddress is the default winpipe address
|
||||||
|
DefaultAddress = `\\.\pipe\containerd-containerd`
|
||||||
|
// DefaultDebugAddress is the default winpipe address for pprof data
|
||||||
|
DefaultDebugAddress = `\\.\pipe\containerd-debug`
|
||||||
|
// DefaultFIFODir is the default location used by client-side cio library
|
||||||
|
// to store FIFOs. Unused on Windows.
|
||||||
|
DefaultFIFODir = ""
|
||||||
|
// DefaultRuntime is the default windows runtime
|
||||||
|
DefaultRuntime = "io.containerd.runhcs.v1"
|
||||||
|
)
|
|
@ -0,0 +1,19 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package defaults provides several common defaults for interacting with
|
||||||
|
// containerd. These can be used on the client-side or server-side.
|
||||||
|
package defaults
|
202
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
generated
vendored
Normal file
202
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/log"
|
||||||
|
remoteserrors "github.com/containerd/containerd/remotes/errors"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/net/context/ctxhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNoToken is returned if a request is successful but the body does not
|
||||||
|
// contain an authorization token.
|
||||||
|
ErrNoToken = errors.New("authorization server did not include a token in the response")
|
||||||
|
)
|
||||||
|
|
||||||
|
// GenerateTokenOptions generates options for fetching a token based on a challenge
|
||||||
|
func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) {
|
||||||
|
realm, ok := c.Parameters["realm"]
|
||||||
|
if !ok {
|
||||||
|
return TokenOptions{}, errors.New("no realm specified for token auth challenge")
|
||||||
|
}
|
||||||
|
|
||||||
|
realmURL, err := url.Parse(realm)
|
||||||
|
if err != nil {
|
||||||
|
return TokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm")
|
||||||
|
}
|
||||||
|
|
||||||
|
to := TokenOptions{
|
||||||
|
Realm: realmURL.String(),
|
||||||
|
Service: c.Parameters["service"],
|
||||||
|
Username: username,
|
||||||
|
Secret: secret,
|
||||||
|
}
|
||||||
|
|
||||||
|
scope, ok := c.Parameters["scope"]
|
||||||
|
if ok {
|
||||||
|
to.Scopes = append(to.Scopes, scope)
|
||||||
|
} else {
|
||||||
|
log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge")
|
||||||
|
}
|
||||||
|
|
||||||
|
return to, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenOptions are optios for requesting a token
|
||||||
|
type TokenOptions struct {
|
||||||
|
Realm string
|
||||||
|
Service string
|
||||||
|
Scopes []string
|
||||||
|
Username string
|
||||||
|
Secret string
|
||||||
|
}
|
||||||
|
|
||||||
|
// OAuthTokenResponse is response from fetching token with a OAuth POST request
|
||||||
|
type OAuthTokenResponse struct {
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
ExpiresIn int `json:"expires_in"`
|
||||||
|
IssuedAt time.Time `json:"issued_at"`
|
||||||
|
Scope string `json:"scope"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchTokenWithOAuth fetches a token using a POST request
|
||||||
|
func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) {
|
||||||
|
form := url.Values{}
|
||||||
|
if len(to.Scopes) > 0 {
|
||||||
|
form.Set("scope", strings.Join(to.Scopes, " "))
|
||||||
|
}
|
||||||
|
form.Set("service", to.Service)
|
||||||
|
form.Set("client_id", clientID)
|
||||||
|
|
||||||
|
if to.Username == "" {
|
||||||
|
form.Set("grant_type", "refresh_token")
|
||||||
|
form.Set("refresh_token", to.Secret)
|
||||||
|
} else {
|
||||||
|
form.Set("grant_type", "password")
|
||||||
|
form.Set("username", to.Username)
|
||||||
|
form.Set("password", to.Secret)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||||
|
for k, v := range headers {
|
||||||
|
req.Header[k] = append(req.Header[k], v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := ctxhttp.Do(ctx, client, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||||
|
return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp))
|
||||||
|
}
|
||||||
|
|
||||||
|
decoder := json.NewDecoder(resp.Body)
|
||||||
|
|
||||||
|
var tr OAuthTokenResponse
|
||||||
|
if err = decoder.Decode(&tr); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "unable to decode token response")
|
||||||
|
}
|
||||||
|
|
||||||
|
if tr.AccessToken == "" {
|
||||||
|
return nil, errors.WithStack(ErrNoToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchTokenResponse is response from fetching token with GET request
|
||||||
|
type FetchTokenResponse struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
ExpiresIn int `json:"expires_in"`
|
||||||
|
IssuedAt time.Time `json:"issued_at"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchToken fetches a token using a GET request
|
||||||
|
func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) {
|
||||||
|
req, err := http.NewRequest("GET", to.Realm, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range headers {
|
||||||
|
req.Header[k] = append(req.Header[k], v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
reqParams := req.URL.Query()
|
||||||
|
|
||||||
|
if to.Service != "" {
|
||||||
|
reqParams.Add("service", to.Service)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scope := range to.Scopes {
|
||||||
|
reqParams.Add("scope", scope)
|
||||||
|
}
|
||||||
|
|
||||||
|
if to.Secret != "" {
|
||||||
|
req.SetBasicAuth(to.Username, to.Secret)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.URL.RawQuery = reqParams.Encode()
|
||||||
|
|
||||||
|
resp, err := ctxhttp.Do(ctx, client, req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||||
|
return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp))
|
||||||
|
}
|
||||||
|
|
||||||
|
decoder := json.NewDecoder(resp.Body)
|
||||||
|
|
||||||
|
var tr FetchTokenResponse
|
||||||
|
if err = decoder.Decode(&tr); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "unable to decode token response")
|
||||||
|
}
|
||||||
|
|
||||||
|
// `access_token` is equivalent to `token` and if both are specified
|
||||||
|
// the choice is undefined. Canonicalize `access_token` by sticking
|
||||||
|
// things in `token`.
|
||||||
|
if tr.AccessToken != "" {
|
||||||
|
tr.Token = tr.AccessToken
|
||||||
|
}
|
||||||
|
|
||||||
|
if tr.Token == "" {
|
||||||
|
return nil, errors.WithStack(ErrNoToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tr, nil
|
||||||
|
}
|
203
vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go
generated
vendored
Normal file
203
vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthenticationScheme defines scheme of the authentication method
|
||||||
|
type AuthenticationScheme byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
// BasicAuth is scheme for Basic HTTP Authentication RFC 7617
|
||||||
|
BasicAuth AuthenticationScheme = 1 << iota
|
||||||
|
// DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616
|
||||||
|
DigestAuth
|
||||||
|
// BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750
|
||||||
|
BearerAuth
|
||||||
|
)
|
||||||
|
|
||||||
|
// Challenge carries information from a WWW-Authenticate response header.
|
||||||
|
// See RFC 2617.
|
||||||
|
type Challenge struct {
|
||||||
|
// scheme is the auth-scheme according to RFC 2617
|
||||||
|
Scheme AuthenticationScheme
|
||||||
|
|
||||||
|
// parameters are the auth-params according to RFC 2617
|
||||||
|
Parameters map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
type byScheme []Challenge
|
||||||
|
|
||||||
|
func (bs byScheme) Len() int { return len(bs) }
|
||||||
|
func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
|
||||||
|
|
||||||
|
// Sort in priority order: token > digest > basic
|
||||||
|
func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme }
|
||||||
|
|
||||||
|
// Octet types from RFC 2616.
|
||||||
|
type octetType byte
|
||||||
|
|
||||||
|
var octetTypes [256]octetType
|
||||||
|
|
||||||
|
const (
|
||||||
|
isToken octetType = 1 << iota
|
||||||
|
isSpace
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// OCTET = <any 8-bit sequence of data>
|
||||||
|
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||||
|
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||||
|
// CR = <US-ASCII CR, carriage return (13)>
|
||||||
|
// LF = <US-ASCII LF, linefeed (10)>
|
||||||
|
// SP = <US-ASCII SP, space (32)>
|
||||||
|
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||||
|
// <"> = <US-ASCII double-quote mark (34)>
|
||||||
|
// CRLF = CR LF
|
||||||
|
// LWS = [CRLF] 1*( SP | HT )
|
||||||
|
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||||
|
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||||
|
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||||
|
// token = 1*<any CHAR except CTLs or separators>
|
||||||
|
// qdtext = <any TEXT except <">>
|
||||||
|
|
||||||
|
for c := 0; c < 256; c++ {
|
||||||
|
var t octetType
|
||||||
|
isCtl := c <= 31 || c == 127
|
||||||
|
isChar := 0 <= c && c <= 127
|
||||||
|
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
||||||
|
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
||||||
|
t |= isSpace
|
||||||
|
}
|
||||||
|
if isChar && !isCtl && !isSeparator {
|
||||||
|
t |= isToken
|
||||||
|
}
|
||||||
|
octetTypes[c] = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAuthHeader parses challenges from WWW-Authenticate header
|
||||||
|
func ParseAuthHeader(header http.Header) []Challenge {
|
||||||
|
challenges := []Challenge{}
|
||||||
|
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
||||||
|
v, p := parseValueAndParams(h)
|
||||||
|
var s AuthenticationScheme
|
||||||
|
switch v {
|
||||||
|
case "basic":
|
||||||
|
s = BasicAuth
|
||||||
|
case "digest":
|
||||||
|
s = DigestAuth
|
||||||
|
case "bearer":
|
||||||
|
s = BearerAuth
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
challenges = append(challenges, Challenge{Scheme: s, Parameters: p})
|
||||||
|
}
|
||||||
|
sort.Stable(byScheme(challenges))
|
||||||
|
return challenges
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseValueAndParams(header string) (value string, params map[string]string) {
|
||||||
|
params = make(map[string]string)
|
||||||
|
value, s := expectToken(header)
|
||||||
|
if value == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value = strings.ToLower(value)
|
||||||
|
for {
|
||||||
|
var pkey string
|
||||||
|
pkey, s = expectToken(skipSpace(s))
|
||||||
|
if pkey == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(s, "=") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var pvalue string
|
||||||
|
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||||
|
if pvalue == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pkey = strings.ToLower(pkey)
|
||||||
|
params[pkey] = pvalue
|
||||||
|
s = skipSpace(s)
|
||||||
|
if !strings.HasPrefix(s, ",") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s = s[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func skipSpace(s string) (rest string) {
|
||||||
|
i := 0
|
||||||
|
for ; i < len(s); i++ {
|
||||||
|
if octetTypes[s[i]]&isSpace == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s[i:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectToken(s string) (token, rest string) {
|
||||||
|
i := 0
|
||||||
|
for ; i < len(s); i++ {
|
||||||
|
if octetTypes[s[i]]&isToken == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s[:i], s[i:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||||
|
if !strings.HasPrefix(s, "\"") {
|
||||||
|
return expectToken(s)
|
||||||
|
}
|
||||||
|
s = s[1:]
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
switch s[i] {
|
||||||
|
case '"':
|
||||||
|
return s[:i], s[i+1:]
|
||||||
|
case '\\':
|
||||||
|
p := make([]byte, len(s)-1)
|
||||||
|
j := copy(p, s[:i])
|
||||||
|
escape := true
|
||||||
|
for i = i + 1; i < len(s); i++ {
|
||||||
|
b := s[i]
|
||||||
|
switch {
|
||||||
|
case escape:
|
||||||
|
escape = false
|
||||||
|
p[j] = b
|
||||||
|
j++
|
||||||
|
case b == '\\':
|
||||||
|
escape = true
|
||||||
|
case b == '"':
|
||||||
|
return string(p[:j]), s[i+1:]
|
||||||
|
default:
|
||||||
|
p[j] = b
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", ""
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package errors
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ error = ErrUnexpectedStatus{}
|
||||||
|
|
||||||
|
// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status
|
||||||
|
type ErrUnexpectedStatus struct {
|
||||||
|
Status string
|
||||||
|
StatusCode int
|
||||||
|
Body []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrUnexpectedStatus) Error() string {
|
||||||
|
return fmt.Sprintf("unexpected status: %s", e.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response
|
||||||
|
func NewUnexpectedStatusErr(resp *http.Response) error {
|
||||||
|
var b []byte
|
||||||
|
if resp.Body != nil {
|
||||||
|
b, _ = ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB
|
||||||
|
}
|
||||||
|
return ErrUnexpectedStatus{Status: resp.Status, StatusCode: resp.StatusCode, Body: b}
|
||||||
|
}
|
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright The containerd Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,19 @@
|
||||||
|
# typeurl
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/containerd/typeurl.svg?branch=master)](https://travis-ci.org/containerd/typeurl)
|
||||||
|
|
||||||
|
[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl)
|
||||||
|
|
||||||
|
A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
|
||||||
|
|
||||||
|
This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto).
|
||||||
|
|
||||||
|
## Project details
|
||||||
|
|
||||||
|
**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||||
|
As a containerd sub-project, you will find the:
|
||||||
|
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||||
|
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||||
|
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||||
|
|
||||||
|
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
|
@ -0,0 +1,83 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package typeurl
|
||||||
|
|
||||||
|
// Package typeurl assists with managing the registration, marshaling, and
|
||||||
|
// unmarshaling of types encoded as protobuf.Any.
|
||||||
|
//
|
||||||
|
// A protobuf.Any is a proto message that can contain any arbitrary data. It
|
||||||
|
// consists of two components, a TypeUrl and a Value, and its proto definition
|
||||||
|
// looks like this:
|
||||||
|
//
|
||||||
|
// message Any {
|
||||||
|
// string type_url = 1;
|
||||||
|
// bytes value = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The TypeUrl is used to distinguish the contents from other proto.Any
|
||||||
|
// messages. This typeurl library manages these URLs to enable automagic
|
||||||
|
// marshaling and unmarshaling of the contents.
|
||||||
|
//
|
||||||
|
// For example, consider this go struct:
|
||||||
|
//
|
||||||
|
// type Foo struct {
|
||||||
|
// Field1 string
|
||||||
|
// Field2 string
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// To use typeurl, types must first be registered. This is typically done in
|
||||||
|
// the init function
|
||||||
|
//
|
||||||
|
// func init() {
|
||||||
|
// typeurl.Register(&Foo{}, "Foo")
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// This will register the type Foo with the url path "Foo". The arguments to
|
||||||
|
// Register are variadic, and are used to construct a url path. Consider this
|
||||||
|
// example, from the github.com/containerd/containerd/client package:
|
||||||
|
//
|
||||||
|
// func init() {
|
||||||
|
// const prefix = "types.containerd.io"
|
||||||
|
// // register TypeUrls for commonly marshaled external types
|
||||||
|
// major := strconv.Itoa(specs.VersionMajor)
|
||||||
|
// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
|
||||||
|
// // this function has more Register calls, which are elided.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// This registers several types under a more complex url, which ends up mapping
|
||||||
|
// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other
|
||||||
|
// value for major).
|
||||||
|
//
|
||||||
|
// Once a type is registered, it can be marshaled to a proto.Any message simply
|
||||||
|
// by calling `MarshalAny`, like this:
|
||||||
|
//
|
||||||
|
// foo := &Foo{Field1: "value1", Field2: "value2"}
|
||||||
|
// anyFoo, err := typeurl.MarshalAny(foo)
|
||||||
|
//
|
||||||
|
// MarshalAny will resolve the correct URL for the type. If the type in
|
||||||
|
// question implements the proto.Message interface, then it will be marshaled
|
||||||
|
// as a proto message. Otherwise, it will be marshaled as json. This means that
|
||||||
|
// typeurl will work on any arbitrary data, whether or not it has a proto
|
||||||
|
// definition, as long as it can be serialized to json.
|
||||||
|
//
|
||||||
|
// To unmarshal, the process is simply inverse:
|
||||||
|
//
|
||||||
|
// iface, err := typeurl.UnmarshalAny(anyFoo)
|
||||||
|
// foo := iface.(*Foo)
|
||||||
|
//
|
||||||
|
// The correct type is automatically chosen from the type registry, and the
|
||||||
|
// returned interface can be cast straight to that type.
|
|
@ -0,0 +1,8 @@
|
||||||
|
module github.com/containerd/typeurl
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/gogo/protobuf v1.3.1
|
||||||
|
github.com/pkg/errors v0.9.1
|
||||||
|
)
|
|
@ -0,0 +1,195 @@
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package typeurl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
mu sync.Mutex
|
||||||
|
registry = make(map[reflect.Type]string)
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrNotFound = errors.New("not found")
|
||||||
|
|
||||||
|
// Register a type with a base URL for JSON marshaling. When the MarshalAny and
|
||||||
|
// UnmarshalAny functions are called they will treat the Any type value as JSON.
|
||||||
|
// To use protocol buffers for handling the Any value the proto.Register
|
||||||
|
// function should be used instead of this function.
|
||||||
|
func Register(v interface{}, args ...string) {
|
||||||
|
var (
|
||||||
|
t = tryDereference(v)
|
||||||
|
p = path.Join(args...)
|
||||||
|
)
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
if et, ok := registry[t]; ok {
|
||||||
|
if et != p {
|
||||||
|
panic(errors.Errorf("type registered with alternate path %q != %q", et, p))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
registry[t] = p
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeURL returns the type url for a registered type.
|
||||||
|
func TypeURL(v interface{}) (string, error) {
|
||||||
|
mu.Lock()
|
||||||
|
u, ok := registry[tryDereference(v)]
|
||||||
|
mu.Unlock()
|
||||||
|
if !ok {
|
||||||
|
// fallback to the proto registry if it is a proto message
|
||||||
|
pb, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v))
|
||||||
|
}
|
||||||
|
return proto.MessageName(pb), nil
|
||||||
|
}
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is returns true if the type of the Any is the same as v.
|
||||||
|
func Is(any *types.Any, v interface{}) bool {
|
||||||
|
// call to check that v is a pointer
|
||||||
|
tryDereference(v)
|
||||||
|
url, err := TypeURL(v)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return any.TypeUrl == url
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalAny marshals the value v into an any with the correct TypeUrl.
|
||||||
|
// If the provided object is already a proto.Any message, then it will be
|
||||||
|
// returned verbatim. If it is of type proto.Message, it will be marshaled as a
|
||||||
|
// protocol buffer. Otherwise, the object will be marshaled to json.
|
||||||
|
func MarshalAny(v interface{}) (*types.Any, error) {
|
||||||
|
var marshal func(v interface{}) ([]byte, error)
|
||||||
|
switch t := v.(type) {
|
||||||
|
case *types.Any:
|
||||||
|
// avoid reserializing the type if we have an any.
|
||||||
|
return t, nil
|
||||||
|
case proto.Message:
|
||||||
|
marshal = func(v interface{}) ([]byte, error) {
|
||||||
|
return proto.Marshal(t)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
marshal = json.Marshal
|
||||||
|
}
|
||||||
|
|
||||||
|
url, err := TypeURL(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &types.Any{
|
||||||
|
TypeUrl: url,
|
||||||
|
Value: data,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalAny unmarshals the any type into a concrete type.
|
||||||
|
func UnmarshalAny(any *types.Any) (interface{}, error) {
|
||||||
|
return UnmarshalByTypeURL(any.TypeUrl, any.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) {
|
||||||
|
return unmarshal(typeURL, value, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalTo(any *types.Any, out interface{}) error {
|
||||||
|
return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
|
||||||
|
_, err := unmarshal(typeURL, value, out)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||||
|
t, err := getTypeByUrl(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if v == nil {
|
||||||
|
v = reflect.New(t.t).Interface()
|
||||||
|
} else {
|
||||||
|
// Validate interface type provided by client
|
||||||
|
vURL, err := TypeURL(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if typeURL != vURL {
|
||||||
|
return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.isProto {
|
||||||
|
err = proto.Unmarshal(value, v.(proto.Message))
|
||||||
|
} else {
|
||||||
|
err = json.Unmarshal(value, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type urlType struct {
|
||||||
|
t reflect.Type
|
||||||
|
isProto bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTypeByUrl(url string) (urlType, error) {
|
||||||
|
for t, u := range registry {
|
||||||
|
if u == url {
|
||||||
|
return urlType{
|
||||||
|
t: t,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// fallback to proto registry
|
||||||
|
t := proto.MessageType(url)
|
||||||
|
if t != nil {
|
||||||
|
return urlType{
|
||||||
|
// get the underlying Elem because proto returns a pointer to the type
|
||||||
|
t: t.Elem(),
|
||||||
|
isProto: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url)
|
||||||
|
}
|
||||||
|
|
||||||
|
func tryDereference(v interface{}) reflect.Type {
|
||||||
|
t := reflect.TypeOf(v)
|
||||||
|
if t.Kind() == reflect.Ptr {
|
||||||
|
// require check of pointer but dereference to register
|
||||||
|
return t.Elem()
|
||||||
|
}
|
||||||
|
panic("v is not a pointer to a type")
|
||||||
|
}
|
|
@ -1,17 +0,0 @@
|
||||||
// Package dockerignore is deprecated. Use github.com/moby/buildkit/frontend/dockerfile/dockerignore instead.
|
|
||||||
package dockerignore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/moby/buildkit/frontend/dockerfile/dockerignore"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReadAll reads a .dockerignore file and returns the list of file patterns
|
|
||||||
// to ignore. Note this will trim whitespace from each line as well
|
|
||||||
// as use GO's "clean" func to get the shortest/cleanest path for each.
|
|
||||||
//
|
|
||||||
// Deprecated: use github.com/moby/buildkit/frontend/dockerfile/dockerignore.ReadAll instead.
|
|
||||||
func ReadAll(reader io.Reader) ([]string, error) {
|
|
||||||
return dockerignore.ReadAll(reader)
|
|
||||||
}
|
|
|
@ -1,13 +1,13 @@
|
||||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||||
github.com/Microsoft/hcsshim 9dcb42f100215f8d375b4a9265e5bba009217a85 # moby branch
|
github.com/Microsoft/hcsshim 9dcb42f100215f8d375b4a9265e5bba009217a85 # moby branch
|
||||||
github.com/Microsoft/go-winio 5b44b70ab3ab4d291a7c1d28afe7b4afeced0ed4 # v0.4.15-0.20200908182639-5b44b70ab3ab
|
github.com/Microsoft/go-winio 5b44b70ab3ab4d291a7c1d28afe7b4afeced0ed4 # v0.4.15
|
||||||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||||
github.com/golang/gddo 72a348e765d293ed6d1ded7b699591f14d6cd921
|
github.com/golang/gddo 72a348e765d293ed6d1ded7b699591f14d6cd921
|
||||||
github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1
|
github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1
|
||||||
github.com/gorilla/mux 98cb6bf42e086f6af920b965c38cacc07402d51b # v1.8.0
|
github.com/gorilla/mux 98cb6bf42e086f6af920b965c38cacc07402d51b # v1.8.0
|
||||||
github.com/Microsoft/opengcs a10967154e143a36014584a6f664344e3bb0aa64
|
github.com/Microsoft/opengcs a10967154e143a36014584a6f664344e3bb0aa64
|
||||||
github.com/moby/locker 281af2d563954745bea9d1487c965f24d30742fe # v1.0.1
|
github.com/moby/locker 281af2d563954745bea9d1487c965f24d30742fe # v1.0.1
|
||||||
github.com/moby/term 7f0af18e79f2784809e9cef63d0df5aa2c79d76e
|
github.com/moby/term bea5bbe245bf407372d477f1361d2ff042d2f556
|
||||||
|
|
||||||
# Note that this dependency uses submodules, providing the github.com/moby/sys/mount,
|
# Note that this dependency uses submodules, providing the github.com/moby/sys/mount,
|
||||||
# github.com/moby/sys/mountinfo, and github.com/moby/sys/symlink modules. Our vendoring
|
# github.com/moby/sys/mountinfo, and github.com/moby/sys/symlink modules. Our vendoring
|
||||||
|
@ -16,7 +16,7 @@ github.com/moby/term 7f0af18e79f2784809e9cef63d0d
|
||||||
# which could be either `mountinfo/vX.Y.Z`, `mount/vX.Y.Z`, or `symlink/vX.Y.Z`.
|
# which could be either `mountinfo/vX.Y.Z`, `mount/vX.Y.Z`, or `symlink/vX.Y.Z`.
|
||||||
github.com/moby/sys 1bc8673b57550ddf85262eb0fed0aac651a37dab # symlink/v0.1.0
|
github.com/moby/sys 1bc8673b57550ddf85262eb0fed0aac651a37dab # symlink/v0.1.0
|
||||||
|
|
||||||
github.com/creack/pty 3a6a957789163cacdfe0e291617a1c8e80612c11 # v1.1.9
|
github.com/creack/pty 2a38352e8b4d7ab6c336eef107e42a55e72e7fbc # v1.1.11
|
||||||
github.com/sirupsen/logrus 6699a89a232f3db797f2e280639854bbc4b89725 # v1.7.0
|
github.com/sirupsen/logrus 6699a89a232f3db797f2e280639854bbc4b89725 # v1.7.0
|
||||||
github.com/tchap/go-patricia a7f0089c6f496e8e70402f61733606daa326cac5 # v2.3.0
|
github.com/tchap/go-patricia a7f0089c6f496e8e70402f61733606daa326cac5 # v2.3.0
|
||||||
golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7
|
golang.org/x/net ab34263943818b32f575efc978a3d24e80b04bd7
|
||||||
|
@ -33,20 +33,21 @@ github.com/imdario/mergo 1afb36080aec31e0d1528973ebe6
|
||||||
golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb
|
golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb
|
||||||
|
|
||||||
# buildkit
|
# buildkit
|
||||||
github.com/moby/buildkit 4d1f260e8490ec438ab66e08bb105577aca0ce06
|
github.com/moby/buildkit 6861f17f15364de0fe1fd1e6e8da07598a485123
|
||||||
github.com/tonistiigi/fsutil ae3a8d753069d0f76fbee396457e8b6cfd7cb8c3
|
github.com/tonistiigi/fsutil c3ed55f3b48161fd3dc42c17ba09e12ac52d57dc
|
||||||
|
github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
|
||||||
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
||||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
github.com/opentracing/opentracing-go d34af3eaa63c4d08ab54863a4bdd0daa45212e12 # v1.2.0
|
||||||
github.com/google/shlex e7afc7fbc51079733e9468cdfd1efcd7d196cd1d
|
github.com/google/shlex e7afc7fbc51079733e9468cdfd1efcd7d196cd1d
|
||||||
github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
|
github.com/opentracing-contrib/go-stdlib 8a6ff1ad1691a29e4f7b5d46604f97634997c8c4 # v1.0.0
|
||||||
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
|
github.com/mitchellh/hashstructure a38c50148365edc8df43c1580c48fb2b3a1e9cd7 # v1.0.0
|
||||||
github.com/gofrs/flock 6caa7350c26b838538005fae7dbee4e69d9398db # v0.7.3
|
github.com/gofrs/flock 6caa7350c26b838538005fae7dbee4e69d9398db # v0.7.3
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware 3c51f7f332123e8be5a157c0802a228ac85bf9db # v1.2.0
|
github.com/grpc-ecosystem/go-grpc-middleware 3c51f7f332123e8be5a157c0802a228ac85bf9db # v1.2.0
|
||||||
|
|
||||||
# libnetwork
|
# libnetwork
|
||||||
|
|
||||||
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy.installer accordingly
|
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy.installer accordingly
|
||||||
github.com/docker/libnetwork d511c60c5c23e6753631244f271a1ec6097254a5
|
github.com/docker/libnetwork 6b51d028f4bbb9a4cc8d3eaba13baa9f848af546
|
||||||
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
|
github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f
|
||||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||||
|
@ -129,12 +130,12 @@ github.com/googleapis/gax-go bd5b16380fd03dc758d11cef74ba
|
||||||
google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6c7065a3160e8
|
google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6c7065a3160e8
|
||||||
|
|
||||||
# containerd
|
# containerd
|
||||||
github.com/containerd/containerd c623d1b36f09f8ef6536a057bd658b3aa8632828 # v1.4.1
|
github.com/containerd/containerd d4e78200d6da62480c85bf6f26b7221ea938f396
|
||||||
github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf
|
github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf
|
||||||
github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
|
github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165
|
||||||
github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff
|
github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff
|
||||||
github.com/containerd/console 5d7e1412f07b502a01029ea20e20e0d2be31fa7c # v1.0.1
|
github.com/containerd/console 5d7e1412f07b502a01029ea20e20e0d2be31fa7c # v1.0.1
|
||||||
github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c
|
github.com/containerd/go-runc 16b287bc67d069a60fa48db15f330b790b74365b
|
||||||
github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1
|
github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1
|
||||||
github.com/containerd/ttrpc 72bb1b21c5b0a4a107f59dd85f6ab58e564b68d6 # v1.0.1
|
github.com/containerd/ttrpc 72bb1b21c5b0a4a107f59dd85f6ab58e564b68d6 # v1.0.1
|
||||||
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
|
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
|
||||||
|
|
|
@ -0,0 +1,191 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,73 @@
|
||||||
|
# groupcache
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
groupcache is a caching and cache-filling library, intended as a
|
||||||
|
replacement for memcached in many cases.
|
||||||
|
|
||||||
|
For API docs and examples, see http://godoc.org/github.com/golang/groupcache
|
||||||
|
|
||||||
|
## Comparison to memcached
|
||||||
|
|
||||||
|
### **Like memcached**, groupcache:
|
||||||
|
|
||||||
|
* shards by key to select which peer is responsible for that key
|
||||||
|
|
||||||
|
### **Unlike memcached**, groupcache:
|
||||||
|
|
||||||
|
* does not require running a separate set of servers, thus massively
|
||||||
|
reducing deployment/configuration pain. groupcache is a client
|
||||||
|
library as well as a server. It connects to its own peers.
|
||||||
|
|
||||||
|
* comes with a cache filling mechanism. Whereas memcached just says
|
||||||
|
"Sorry, cache miss", often resulting in a thundering herd of
|
||||||
|
database (or whatever) loads from an unbounded number of clients
|
||||||
|
(which has resulted in several fun outages), groupcache coordinates
|
||||||
|
cache fills such that only one load in one process of an entire
|
||||||
|
replicated set of processes populates the cache, then multiplexes
|
||||||
|
the loaded value to all callers.
|
||||||
|
|
||||||
|
* does not support versioned values. If key "foo" is value "bar",
|
||||||
|
key "foo" must always be "bar". There are neither cache expiration
|
||||||
|
times, nor explicit cache evictions. Thus there is also no CAS,
|
||||||
|
nor Increment/Decrement. This also means that groupcache....
|
||||||
|
|
||||||
|
* ... supports automatic mirroring of super-hot items to multiple
|
||||||
|
processes. This prevents memcached hot spotting where a machine's
|
||||||
|
CPU and/or NIC are overloaded by very popular keys/values.
|
||||||
|
|
||||||
|
* is currently only available for Go. It's very unlikely that I
|
||||||
|
(bradfitz@) will port the code to any other language.
|
||||||
|
|
||||||
|
## Loading process
|
||||||
|
|
||||||
|
In a nutshell, a groupcache lookup of **Get("foo")** looks like:
|
||||||
|
|
||||||
|
(On machine #5 of a set of N machines running the same code)
|
||||||
|
|
||||||
|
1. Is the value of "foo" in local memory because it's super hot? If so, use it.
|
||||||
|
|
||||||
|
2. Is the value of "foo" in local memory because peer #5 (the current
|
||||||
|
peer) is the owner of it? If so, use it.
|
||||||
|
|
||||||
|
3. Amongst all the peers in my set of N, am I the owner of the key
|
||||||
|
"foo"? (e.g. does it consistent hash to 5?) If so, load it. If
|
||||||
|
other callers come in, via the same process or via RPC requests
|
||||||
|
from peers, they block waiting for the load to finish and get the
|
||||||
|
same answer. If not, RPC to the peer that's the owner and get
|
||||||
|
the answer. If the RPC fails, just load it locally (still with
|
||||||
|
local dup suppression).
|
||||||
|
|
||||||
|
## Users
|
||||||
|
|
||||||
|
groupcache is in production use by dl.google.com (its original user),
|
||||||
|
parts of Blogger, parts of Google Code, parts of Google Fiber, parts
|
||||||
|
of Google production monitoring systems, etc.
|
||||||
|
|
||||||
|
## Presentations
|
||||||
|
|
||||||
|
See http://talks.golang.org/2013/oscon-dl.slide
|
||||||
|
|
||||||
|
## Help
|
||||||
|
|
||||||
|
Use the golang-nuts mailing list for any discussion or questions.
|
|
@ -0,0 +1,133 @@
|
||||||
|
/*
|
||||||
|
Copyright 2013 Google Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package lru implements an LRU cache.
|
||||||
|
package lru
|
||||||
|
|
||||||
|
import "container/list"
|
||||||
|
|
||||||
|
// Cache is an LRU cache. It is not safe for concurrent access.
|
||||||
|
type Cache struct {
|
||||||
|
// MaxEntries is the maximum number of cache entries before
|
||||||
|
// an item is evicted. Zero means no limit.
|
||||||
|
MaxEntries int
|
||||||
|
|
||||||
|
// OnEvicted optionally specifies a callback function to be
|
||||||
|
// executed when an entry is purged from the cache.
|
||||||
|
OnEvicted func(key Key, value interface{})
|
||||||
|
|
||||||
|
ll *list.List
|
||||||
|
cache map[interface{}]*list.Element
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
|
||||||
|
type Key interface{}
|
||||||
|
|
||||||
|
type entry struct {
|
||||||
|
key Key
|
||||||
|
value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Cache.
|
||||||
|
// If maxEntries is zero, the cache has no limit and it's assumed
|
||||||
|
// that eviction is done by the caller.
|
||||||
|
func New(maxEntries int) *Cache {
|
||||||
|
return &Cache{
|
||||||
|
MaxEntries: maxEntries,
|
||||||
|
ll: list.New(),
|
||||||
|
cache: make(map[interface{}]*list.Element),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a value to the cache.
|
||||||
|
func (c *Cache) Add(key Key, value interface{}) {
|
||||||
|
if c.cache == nil {
|
||||||
|
c.cache = make(map[interface{}]*list.Element)
|
||||||
|
c.ll = list.New()
|
||||||
|
}
|
||||||
|
if ee, ok := c.cache[key]; ok {
|
||||||
|
c.ll.MoveToFront(ee)
|
||||||
|
ee.Value.(*entry).value = value
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ele := c.ll.PushFront(&entry{key, value})
|
||||||
|
c.cache[key] = ele
|
||||||
|
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
|
||||||
|
c.RemoveOldest()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get looks up a key's value from the cache.
|
||||||
|
func (c *Cache) Get(key Key) (value interface{}, ok bool) {
|
||||||
|
if c.cache == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ele, hit := c.cache[key]; hit {
|
||||||
|
c.ll.MoveToFront(ele)
|
||||||
|
return ele.Value.(*entry).value, true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the provided key from the cache.
|
||||||
|
func (c *Cache) Remove(key Key) {
|
||||||
|
if c.cache == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ele, hit := c.cache[key]; hit {
|
||||||
|
c.removeElement(ele)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveOldest removes the oldest item from the cache.
|
||||||
|
func (c *Cache) RemoveOldest() {
|
||||||
|
if c.cache == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ele := c.ll.Back()
|
||||||
|
if ele != nil {
|
||||||
|
c.removeElement(ele)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) removeElement(e *list.Element) {
|
||||||
|
c.ll.Remove(e)
|
||||||
|
kv := e.Value.(*entry)
|
||||||
|
delete(c.cache, kv.key)
|
||||||
|
if c.OnEvicted != nil {
|
||||||
|
c.OnEvicted(kv.key, kv.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of items in the cache.
|
||||||
|
func (c *Cache) Len() int {
|
||||||
|
if c.cache == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return c.ll.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear purges all stored items from the cache.
|
||||||
|
func (c *Cache) Clear() {
|
||||||
|
if c.OnEvicted != nil {
|
||||||
|
for _, e := range c.cache {
|
||||||
|
kv := e.Value.(*entry)
|
||||||
|
c.OnEvicted(kv.key, kv.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.ll = nil
|
||||||
|
c.cache = nil
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,338 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// source: google/protobuf/struct.proto
|
||||||
|
|
||||||
|
package structpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
math "math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||||
|
// `Value` type union.
|
||||||
|
//
|
||||||
|
// The JSON representation for `NullValue` is JSON `null`.
|
||||||
|
type NullValue int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Null value.
|
||||||
|
NullValue_NULL_VALUE NullValue = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
var NullValue_name = map[int32]string{
|
||||||
|
0: "NULL_VALUE",
|
||||||
|
}
|
||||||
|
|
||||||
|
var NullValue_value = map[string]int32{
|
||||||
|
"NULL_VALUE": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x NullValue) String() string {
|
||||||
|
return proto.EnumName(NullValue_name, int32(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NullValue) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_df322afd6c9fb402, []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NullValue) XXX_WellKnownType() string { return "NullValue" }
|
||||||
|
|
||||||
|
// `Struct` represents a structured data value, consisting of fields
|
||||||
|
// which map to dynamically typed values. In some languages, `Struct`
|
||||||
|
// might be supported by a native representation. For example, in
|
||||||
|
// scripting languages like JS a struct is represented as an
|
||||||
|
// object. The details of that representation are described together
|
||||||
|
// with the proto support for the language.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Struct` is JSON object.
|
||||||
|
type Struct struct {
|
||||||
|
// Unordered map of dynamically typed values.
|
||||||
|
Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Struct) Reset() { *m = Struct{} }
|
||||||
|
func (m *Struct) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Struct) ProtoMessage() {}
|
||||||
|
func (*Struct) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_df322afd6c9fb402, []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Struct) XXX_WellKnownType() string { return "Struct" }
|
||||||
|
|
||||||
|
func (m *Struct) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Struct.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Struct) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Struct.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Struct) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Struct.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Struct) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Struct.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Struct proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Struct) GetFields() map[string]*Value {
|
||||||
|
if m != nil {
|
||||||
|
return m.Fields
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// `Value` represents a dynamically typed value which can be either
|
||||||
|
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||||
|
// list of values. A producer of value is expected to set one of that
|
||||||
|
// variants, absence of any variant indicates an error.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Value` is JSON value.
|
||||||
|
type Value struct {
|
||||||
|
// The kind of value.
|
||||||
|
//
|
||||||
|
// Types that are valid to be assigned to Kind:
|
||||||
|
// *Value_NullValue
|
||||||
|
// *Value_NumberValue
|
||||||
|
// *Value_StringValue
|
||||||
|
// *Value_BoolValue
|
||||||
|
// *Value_StructValue
|
||||||
|
// *Value_ListValue
|
||||||
|
Kind isValue_Kind `protobuf_oneof:"kind"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) Reset() { *m = Value{} }
|
||||||
|
func (m *Value) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Value) ProtoMessage() {}
|
||||||
|
func (*Value) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_df322afd6c9fb402, []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Value) XXX_WellKnownType() string { return "Value" }
|
||||||
|
|
||||||
|
func (m *Value) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Value.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Value.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Value) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Value.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Value) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Value.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Value) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Value.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Value proto.InternalMessageInfo
|
||||||
|
|
||||||
|
type isValue_Kind interface {
|
||||||
|
isValue_Kind()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_NullValue struct {
|
||||||
|
NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_NumberValue struct {
|
||||||
|
NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_StringValue struct {
|
||||||
|
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_BoolValue struct {
|
||||||
|
BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_StructValue struct {
|
||||||
|
StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_ListValue struct {
|
||||||
|
ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Value_NullValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_NumberValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_StringValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_BoolValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_StructValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (*Value_ListValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (m *Value) GetKind() isValue_Kind {
|
||||||
|
if m != nil {
|
||||||
|
return m.Kind
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetNullValue() NullValue {
|
||||||
|
if x, ok := m.GetKind().(*Value_NullValue); ok {
|
||||||
|
return x.NullValue
|
||||||
|
}
|
||||||
|
return NullValue_NULL_VALUE
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetNumberValue() float64 {
|
||||||
|
if x, ok := m.GetKind().(*Value_NumberValue); ok {
|
||||||
|
return x.NumberValue
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetStringValue() string {
|
||||||
|
if x, ok := m.GetKind().(*Value_StringValue); ok {
|
||||||
|
return x.StringValue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetBoolValue() bool {
|
||||||
|
if x, ok := m.GetKind().(*Value_BoolValue); ok {
|
||||||
|
return x.BoolValue
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetStructValue() *Struct {
|
||||||
|
if x, ok := m.GetKind().(*Value_StructValue); ok {
|
||||||
|
return x.StructValue
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetListValue() *ListValue {
|
||||||
|
if x, ok := m.GetKind().(*Value_ListValue); ok {
|
||||||
|
return x.ListValue
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||||
|
func (*Value) XXX_OneofWrappers() []interface{} {
|
||||||
|
return []interface{}{
|
||||||
|
(*Value_NullValue)(nil),
|
||||||
|
(*Value_NumberValue)(nil),
|
||||||
|
(*Value_StringValue)(nil),
|
||||||
|
(*Value_BoolValue)(nil),
|
||||||
|
(*Value_StructValue)(nil),
|
||||||
|
(*Value_ListValue)(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// `ListValue` is a wrapper around a repeated field of values.
|
||||||
|
//
|
||||||
|
// The JSON representation for `ListValue` is JSON array.
|
||||||
|
type ListValue struct {
|
||||||
|
// Repeated field of dynamically typed values.
|
||||||
|
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ListValue) Reset() { *m = ListValue{} }
|
||||||
|
func (m *ListValue) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*ListValue) ProtoMessage() {}
|
||||||
|
func (*ListValue) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_df322afd6c9fb402, []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
|
||||||
|
|
||||||
|
func (m *ListValue) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ListValue.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *ListValue) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ListValue.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *ListValue) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ListValue.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ListValue) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ListValue.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ListValue proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *ListValue) GetValues() []*Value {
|
||||||
|
if m != nil {
|
||||||
|
return m.Values
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
|
||||||
|
proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
|
||||||
|
proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
|
||||||
|
proto.RegisterType((*Value)(nil), "google.protobuf.Value")
|
||||||
|
proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptor_df322afd6c9fb402 = []byte{
|
||||||
|
// 417 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
|
||||||
|
0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
|
||||||
|
0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
|
||||||
|
0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
|
||||||
|
0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
|
||||||
|
0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
|
||||||
|
0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
|
||||||
|
0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
|
||||||
|
0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
|
||||||
|
0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
|
||||||
|
0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
|
||||||
|
0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
|
||||||
|
0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
|
||||||
|
0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
|
||||||
|
0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
|
||||||
|
0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
|
||||||
|
0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
|
||||||
|
0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
|
||||||
|
0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
|
||||||
|
0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
|
||||||
|
0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
|
||||||
|
0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
|
||||||
|
0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
|
||||||
|
0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
|
||||||
|
0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
|
||||||
|
0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
|
||||||
|
0x00,
|
||||||
|
}
|
|
@ -0,0 +1,95 @@
|
||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package google.protobuf;
|
||||||
|
|
||||||
|
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||||
|
option cc_enable_arenas = true;
|
||||||
|
option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "StructProto";
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option objc_class_prefix = "GPB";
|
||||||
|
|
||||||
|
// `Struct` represents a structured data value, consisting of fields
|
||||||
|
// which map to dynamically typed values. In some languages, `Struct`
|
||||||
|
// might be supported by a native representation. For example, in
|
||||||
|
// scripting languages like JS a struct is represented as an
|
||||||
|
// object. The details of that representation are described together
|
||||||
|
// with the proto support for the language.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Struct` is JSON object.
|
||||||
|
message Struct {
|
||||||
|
// Unordered map of dynamically typed values.
|
||||||
|
map<string, Value> fields = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// `Value` represents a dynamically typed value which can be either
|
||||||
|
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||||
|
// list of values. A producer of value is expected to set one of that
|
||||||
|
// variants, absence of any variant indicates an error.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Value` is JSON value.
|
||||||
|
message Value {
|
||||||
|
// The kind of value.
|
||||||
|
oneof kind {
|
||||||
|
// Represents a null value.
|
||||||
|
NullValue null_value = 1;
|
||||||
|
// Represents a double value.
|
||||||
|
double number_value = 2;
|
||||||
|
// Represents a string value.
|
||||||
|
string string_value = 3;
|
||||||
|
// Represents a boolean value.
|
||||||
|
bool bool_value = 4;
|
||||||
|
// Represents a structured value.
|
||||||
|
Struct struct_value = 5;
|
||||||
|
// Represents a repeated `Value`.
|
||||||
|
ListValue list_value = 6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||||
|
// `Value` type union.
|
||||||
|
//
|
||||||
|
// The JSON representation for `NullValue` is JSON `null`.
|
||||||
|
enum NullValue {
|
||||||
|
// Null value.
|
||||||
|
NULL_VALUE = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// `ListValue` is a wrapper around a repeated field of values.
|
||||||
|
//
|
||||||
|
// The JSON representation for `ListValue` is JSON array.
|
||||||
|
message ListValue {
|
||||||
|
// Repeated field of dynamically typed values.
|
||||||
|
repeated Value values = 1;
|
||||||
|
}
|
|
@ -62,6 +62,7 @@ You don't need to read this document unless you want to use the full-featured st
|
||||||
- [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service)
|
- [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service)
|
||||||
- [Load balancing](#load-balancing)
|
- [Load balancing](#load-balancing)
|
||||||
- [Containerizing BuildKit](#containerizing-buildkit)
|
- [Containerizing BuildKit](#containerizing-buildkit)
|
||||||
|
- [Podman](#podman)
|
||||||
- [Kubernetes](#kubernetes)
|
- [Kubernetes](#kubernetes)
|
||||||
- [Daemonless](#daemonless)
|
- [Daemonless](#daemonless)
|
||||||
- [Opentracing support](#opentracing-support)
|
- [Opentracing support](#opentracing-support)
|
||||||
|
@ -127,11 +128,6 @@ We are open to adding more backends.
|
||||||
The buildkitd daemon listens gRPC API on `/run/buildkit/buildkitd.sock` by default, but you can also use TCP sockets.
|
The buildkitd daemon listens gRPC API on `/run/buildkit/buildkitd.sock` by default, but you can also use TCP sockets.
|
||||||
See [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service).
|
See [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service).
|
||||||
|
|
||||||
:information_source: Notice to Fedora 31 users:
|
|
||||||
|
|
||||||
* As runc still does not work on cgroup v2 environment like Fedora 31, you need to substitute runc with crun. Run `buildkitd` with `--oci-worker-binary=crun`.
|
|
||||||
* If you want to use runc, you need to configure the system to use cgroup v1. Run `sudo grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"` and reboot.
|
|
||||||
|
|
||||||
### Exploring LLB
|
### Exploring LLB
|
||||||
|
|
||||||
BuildKit builds are based on a binary intermediate format called LLB that is used for defining the dependency graph for processes running part of your build. tl;dr: LLB is to Dockerfile what LLVM IR is to C.
|
BuildKit builds are based on a binary intermediate format called LLB that is used for defining the dependency graph for processes running part of your build. tl;dr: LLB is to Dockerfile what LLVM IR is to C.
|
||||||
|
@ -150,6 +146,9 @@ Currently, the following high-level languages has been implemented for LLB:
|
||||||
- [Mockerfile](https://matt-rickard.com/building-a-new-dockerfile-frontend/)
|
- [Mockerfile](https://matt-rickard.com/building-a-new-dockerfile-frontend/)
|
||||||
- [Gockerfile](https://github.com/po3rin/gockerfile)
|
- [Gockerfile](https://github.com/po3rin/gockerfile)
|
||||||
- [bldr (Pkgfile)](https://github.com/talos-systems/bldr/)
|
- [bldr (Pkgfile)](https://github.com/talos-systems/bldr/)
|
||||||
|
- [HLB](https://github.com/openllb/hlb)
|
||||||
|
- [Earthfile (Earthly)](https://github.com/earthly/earthly)
|
||||||
|
- [Cargo Wharf (Rust)](https://github.com/denzp/cargo-wharf)
|
||||||
- (open a PR to add your own language)
|
- (open a PR to add your own language)
|
||||||
|
|
||||||
### Exploring Dockerfiles
|
### Exploring Dockerfiles
|
||||||
|
@ -353,6 +352,7 @@ The directory layout conforms to OCI Image Spec v1.0.
|
||||||
- `mode=max`: export all the layers of all intermediate steps. Not supported for `inline` cache exporter.
|
- `mode=max`: export all the layers of all intermediate steps. Not supported for `inline` cache exporter.
|
||||||
- `ref=docker.io/user/image:tag`: reference for `registry` cache exporter
|
- `ref=docker.io/user/image:tag`: reference for `registry` cache exporter
|
||||||
- `dest=path/to/output-dir`: directory for `local` cache exporter
|
- `dest=path/to/output-dir`: directory for `local` cache exporter
|
||||||
|
- `oci-mediatypes=true|false`: whether to use OCI mediatypes in exported manifests for `local` and `registry` exporter. Since BuildKit `v0.8` defaults to true.
|
||||||
|
|
||||||
#### `--import-cache` options
|
#### `--import-cache` options
|
||||||
- `type`: `registry` or `local`. Use `registry` to import `inline` cache.
|
- `type`: `registry` or `local`. Use `registry` to import `inline` cache.
|
||||||
|
@ -418,6 +418,16 @@ export BUILDKIT_HOST=docker-container://buildkitd
|
||||||
buildctl build --help
|
buildctl build --help
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Podman
|
||||||
|
To connect to a BuildKit daemon running in a Podman container, use `podman-container://` instead of `docker-container://` .
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run -d --name buildkitd --privileged moby/buildkit:latest
|
||||||
|
buildctl --addr=podman-container://buildkitd build --frontend dockerfile.v0 --local context=. --local dockerfile=. --output type=oci | podman load foo
|
||||||
|
```
|
||||||
|
|
||||||
|
`sudo` is not required.
|
||||||
|
|
||||||
### Kubernetes
|
### Kubernetes
|
||||||
|
|
||||||
For Kubernetes deployments, see [`examples/kubernetes`](./examples/kubernetes).
|
For Kubernetes deployments, see [`examples/kubernetes`](./examples/kubernetes).
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
package moby_buildkit_v1
|
package moby_buildkit_v1 //nolint:golint
|
||||||
|
|
||||||
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto
|
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
package moby_buildkit_v1_types
|
package moby_buildkit_v1_types //nolint:golint
|
||||||
|
|
||||||
//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto
|
//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto
|
||||||
|
|
|
@ -45,11 +45,14 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
|
||||||
}
|
}
|
||||||
|
|
||||||
cb := func(ref string, s *session.Session) error {
|
cb := func(ref string, s *session.Session) error {
|
||||||
g, err := grpcclient.New(ctx, feOpts, s.ID(), product, c.gatewayClientForBuild(ref), gworkers)
|
gwClient := c.gatewayClientForBuild(ref)
|
||||||
|
g, err := grpcclient.New(ctx, feOpts, s.ID(), product, gwClient, gworkers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gwClient.caps = g.BuildOpts().Caps
|
||||||
|
|
||||||
if err := g.Run(ctx, buildFunc); err != nil {
|
if err := g.Run(ctx, buildFunc); err != nil {
|
||||||
return errors.Wrap(err, "failed to run Build function")
|
return errors.Wrap(err, "failed to run Build function")
|
||||||
}
|
}
|
||||||
|
@ -59,14 +62,18 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
|
||||||
return c.solve(ctx, nil, cb, opt, statusChan)
|
return c.solve(ctx, nil, cb, opt, statusChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) gatewayClientForBuild(buildid string) gatewayapi.LLBBridgeClient {
|
func (c *Client) gatewayClientForBuild(buildid string) *gatewayClientForBuild {
|
||||||
g := gatewayapi.NewLLBBridgeClient(c.conn)
|
g := gatewayapi.NewLLBBridgeClient(c.conn)
|
||||||
return &gatewayClientForBuild{g, buildid}
|
return &gatewayClientForBuild{
|
||||||
|
gateway: g,
|
||||||
|
buildID: buildid,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type gatewayClientForBuild struct {
|
type gatewayClientForBuild struct {
|
||||||
gateway gatewayapi.LLBBridgeClient
|
gateway gatewayapi.LLBBridgeClient
|
||||||
buildID string
|
buildID string
|
||||||
|
caps apicaps.CapSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gatewayapi.ResolveImageConfigRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveImageConfigResponse, error) {
|
func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gatewayapi.ResolveImageConfigRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveImageConfigResponse, error) {
|
||||||
|
@ -85,11 +92,17 @@ func (g *gatewayClientForBuild) ReadFile(ctx context.Context, in *gatewayapi.Rea
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gatewayClientForBuild) ReadDir(ctx context.Context, in *gatewayapi.ReadDirRequest, opts ...grpc.CallOption) (*gatewayapi.ReadDirResponse, error) {
|
func (g *gatewayClientForBuild) ReadDir(ctx context.Context, in *gatewayapi.ReadDirRequest, opts ...grpc.CallOption) (*gatewayapi.ReadDirResponse, error) {
|
||||||
|
if err := g.caps.Supports(gatewayapi.CapReadDir); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||||
return g.gateway.ReadDir(ctx, in, opts...)
|
return g.gateway.ReadDir(ctx, in, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.StatFileRequest, opts ...grpc.CallOption) (*gatewayapi.StatFileResponse, error) {
|
func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.StatFileRequest, opts ...grpc.CallOption) (*gatewayapi.StatFileResponse, error) {
|
||||||
|
if err := g.caps.Supports(gatewayapi.CapStatFile); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||||
return g.gateway.StatFile(ctx, in, opts...)
|
return g.gateway.StatFile(ctx, in, opts...)
|
||||||
}
|
}
|
||||||
|
@ -105,6 +118,33 @@ func (g *gatewayClientForBuild) Return(ctx context.Context, in *gatewayapi.Retur
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gatewayClientForBuild) Inputs(ctx context.Context, in *gatewayapi.InputsRequest, opts ...grpc.CallOption) (*gatewayapi.InputsResponse, error) {
|
func (g *gatewayClientForBuild) Inputs(ctx context.Context, in *gatewayapi.InputsRequest, opts ...grpc.CallOption) (*gatewayapi.InputsResponse, error) {
|
||||||
|
if err := g.caps.Supports(gatewayapi.CapFrontendInputs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||||
return g.gateway.Inputs(ctx, in, opts...)
|
return g.gateway.Inputs(ctx, in, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (g *gatewayClientForBuild) NewContainer(ctx context.Context, in *gatewayapi.NewContainerRequest, opts ...grpc.CallOption) (*gatewayapi.NewContainerResponse, error) {
|
||||||
|
if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||||
|
return g.gateway.NewContainer(ctx, in, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gatewayClientForBuild) ReleaseContainer(ctx context.Context, in *gatewayapi.ReleaseContainerRequest, opts ...grpc.CallOption) (*gatewayapi.ReleaseContainerResponse, error) {
|
||||||
|
if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||||
|
return g.gateway.ReleaseContainer(ctx, in, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gatewayClientForBuild) ExecProcess(ctx context.Context, opts ...grpc.CallOption) (gatewayapi.LLBBridge_ExecProcessClient, error) {
|
||||||
|
if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||||
|
return g.gateway.ExecProcess(ctx, opts...)
|
||||||
|
}
|
||||||
|
|
|
@ -6,8 +6,9 @@ import (
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/defaults"
|
||||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||||
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
||||||
controlapi "github.com/moby/buildkit/api/services/control"
|
controlapi "github.com/moby/buildkit/api/services/control"
|
||||||
|
@ -30,7 +31,10 @@ type ClientOpt interface{}
|
||||||
|
|
||||||
// New returns a new buildkit client. Address can be empty for the system-default address.
|
// New returns a new buildkit client. Address can be empty for the system-default address.
|
||||||
func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
|
func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
|
||||||
gopts := []grpc.DialOption{}
|
gopts := []grpc.DialOption{
|
||||||
|
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
|
||||||
|
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
|
||||||
|
}
|
||||||
needDialer := true
|
needDialer := true
|
||||||
needWithInsecure := true
|
needWithInsecure := true
|
||||||
|
|
||||||
|
@ -54,7 +58,7 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
|
||||||
stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))
|
stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))
|
||||||
}
|
}
|
||||||
if wd, ok := o.(*withDialer); ok {
|
if wd, ok := o.(*withDialer); ok {
|
||||||
gopts = append(gopts, grpc.WithDialer(wd.dialer))
|
gopts = append(gopts, grpc.WithContextDialer(wd.dialer))
|
||||||
needDialer = false
|
needDialer = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,9 +67,7 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// TODO(AkihiroSuda): use WithContextDialer (requires grpc 1.19)
|
gopts = append(gopts, grpc.WithContextDialer(dialFn))
|
||||||
// https://github.com/grpc/grpc-go/commit/40cb5618f475e7b9d61aa7920ae4b04ef9bbaf89
|
|
||||||
gopts = append(gopts, grpc.WithDialer(dialFn))
|
|
||||||
}
|
}
|
||||||
if needWithInsecure {
|
if needWithInsecure {
|
||||||
gopts = append(gopts, grpc.WithInsecure())
|
gopts = append(gopts, grpc.WithInsecure())
|
||||||
|
@ -74,6 +76,15 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
|
||||||
address = appdefaults.Address
|
address = appdefaults.Address
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// grpc-go uses a slightly different naming scheme: https://github.com/grpc/grpc/blob/master/doc/naming.md
|
||||||
|
// This will end up setting rfc non-complient :authority header to address string (e.g. tcp://127.0.0.1:1234).
|
||||||
|
// So, here sets right authority header via WithAuthority DialOption.
|
||||||
|
addressURL, err := url.Parse(address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
gopts = append(gopts, grpc.WithAuthority(addressURL.Host))
|
||||||
|
|
||||||
unary = append(unary, grpcerrors.UnaryClientInterceptor)
|
unary = append(unary, grpcerrors.UnaryClientInterceptor)
|
||||||
stream = append(stream, grpcerrors.StreamClientInterceptor)
|
stream = append(stream, grpcerrors.StreamClientInterceptor)
|
||||||
|
|
||||||
|
@ -118,10 +129,10 @@ func WithFailFast() ClientOpt {
|
||||||
}
|
}
|
||||||
|
|
||||||
type withDialer struct {
|
type withDialer struct {
|
||||||
dialer func(string, time.Duration) (net.Conn, error)
|
dialer func(context.Context, string) (net.Conn, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func WithDialer(df func(string, time.Duration) (net.Conn, error)) ClientOpt {
|
func WithContextDialer(df func(context.Context, string) (net.Conn, error)) ClientOpt {
|
||||||
return &withDialer{dialer: df}
|
return &withDialer{dialer: df}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,17 +190,13 @@ type withTracer struct {
|
||||||
tracer opentracing.Tracer
|
tracer opentracing.Tracer
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveDialer(address string) (func(string, time.Duration) (net.Conn, error), error) {
|
func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) {
|
||||||
ch, err := connhelper.GetConnectionHelper(address)
|
ch, err := connhelper.GetConnectionHelper(address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ch != nil {
|
if ch != nil {
|
||||||
f := func(a string, _ time.Duration) (net.Conn, error) {
|
return ch.ContextDialer, nil
|
||||||
ctx := context.Background()
|
|
||||||
return ch.ContextDialer(ctx, a)
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
}
|
||||||
// basic dialer
|
// basic dialer
|
||||||
return dialer, nil
|
return dialer, nil
|
||||||
|
|
|
@ -3,17 +3,18 @@
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
func dialer(ctx context.Context, address string) (net.Conn, error) {
|
||||||
addrParts := strings.SplitN(address, "://", 2)
|
addrParts := strings.SplitN(address, "://", 2)
|
||||||
if len(addrParts) != 2 {
|
if len(addrParts) != 2 {
|
||||||
return nil, errors.Errorf("invalid address %s", address)
|
return nil, errors.Errorf("invalid address %s", address)
|
||||||
}
|
}
|
||||||
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
|
var d net.Dialer
|
||||||
|
return d.DialContext(ctx, addrParts[0], addrParts[1])
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
winio "github.com/Microsoft/go-winio"
|
winio "github.com/Microsoft/go-winio"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
func dialer(ctx context.Context, address string) (net.Conn, error) {
|
||||||
addrParts := strings.SplitN(address, "://", 2)
|
addrParts := strings.SplitN(address, "://", 2)
|
||||||
if len(addrParts) != 2 {
|
if len(addrParts) != 2 {
|
||||||
return nil, errors.Errorf("invalid address %s", address)
|
return nil, errors.Errorf("invalid address %s", address)
|
||||||
|
@ -17,8 +17,9 @@ func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||||
switch addrParts[0] {
|
switch addrParts[0] {
|
||||||
case "npipe":
|
case "npipe":
|
||||||
address = strings.Replace(addrParts[1], "/", "\\", -1)
|
address = strings.Replace(addrParts[1], "/", "\\", -1)
|
||||||
return winio.DialPipe(address, &timeout)
|
return winio.DialPipeContext(ctx, address)
|
||||||
default:
|
default:
|
||||||
return net.DialTimeout(addrParts[0], addrParts[1], timeout)
|
var d net.Dialer
|
||||||
|
return d.DialContext(ctx, addrParts[0], addrParts[1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,14 +16,15 @@ import (
|
||||||
// LLB state can be reconstructed from the definition.
|
// LLB state can be reconstructed from the definition.
|
||||||
type DefinitionOp struct {
|
type DefinitionOp struct {
|
||||||
MarshalCache
|
MarshalCache
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
ops map[digest.Digest]*pb.Op
|
ops map[digest.Digest]*pb.Op
|
||||||
defs map[digest.Digest][]byte
|
defs map[digest.Digest][]byte
|
||||||
metas map[digest.Digest]pb.OpMetadata
|
metas map[digest.Digest]pb.OpMetadata
|
||||||
sources map[digest.Digest][]*SourceLocation
|
sources map[digest.Digest][]*SourceLocation
|
||||||
platforms map[digest.Digest]*specs.Platform
|
platforms map[digest.Digest]*specs.Platform
|
||||||
dgst digest.Digest
|
dgst digest.Digest
|
||||||
index pb.OutputIndex
|
index pb.OutputIndex
|
||||||
|
inputCache map[digest.Digest][]*DefinitionOp
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDefinitionOp returns a new operation from a marshalled definition.
|
// NewDefinitionOp returns a new operation from a marshalled definition.
|
||||||
|
@ -89,13 +90,14 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return &DefinitionOp{
|
return &DefinitionOp{
|
||||||
ops: ops,
|
ops: ops,
|
||||||
defs: defs,
|
defs: defs,
|
||||||
metas: def.Metadata,
|
metas: def.Metadata,
|
||||||
sources: srcs,
|
sources: srcs,
|
||||||
platforms: platforms,
|
platforms: platforms,
|
||||||
dgst: dgst,
|
dgst: dgst,
|
||||||
index: index,
|
index: index,
|
||||||
|
inputCache: make(map[digest.Digest][]*DefinitionOp),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,14 +190,34 @@ func (d *DefinitionOp) Inputs() []Output {
|
||||||
d.mu.Unlock()
|
d.mu.Unlock()
|
||||||
|
|
||||||
for _, input := range op.Inputs {
|
for _, input := range op.Inputs {
|
||||||
vtx := &DefinitionOp{
|
var vtx *DefinitionOp
|
||||||
ops: d.ops,
|
d.mu.Lock()
|
||||||
defs: d.defs,
|
if existingIndexes, ok := d.inputCache[input.Digest]; ok {
|
||||||
metas: d.metas,
|
if int(input.Index) < len(existingIndexes) && existingIndexes[input.Index] != nil {
|
||||||
platforms: d.platforms,
|
vtx = existingIndexes[input.Index]
|
||||||
dgst: input.Digest,
|
}
|
||||||
index: input.Index,
|
|
||||||
}
|
}
|
||||||
|
if vtx == nil {
|
||||||
|
vtx = &DefinitionOp{
|
||||||
|
ops: d.ops,
|
||||||
|
defs: d.defs,
|
||||||
|
metas: d.metas,
|
||||||
|
platforms: d.platforms,
|
||||||
|
dgst: input.Digest,
|
||||||
|
index: input.Index,
|
||||||
|
inputCache: d.inputCache,
|
||||||
|
}
|
||||||
|
existingIndexes := d.inputCache[input.Digest]
|
||||||
|
indexDiff := int(input.Index) - len(existingIndexes)
|
||||||
|
if indexDiff >= 0 {
|
||||||
|
// make room in the slice for the new index being set
|
||||||
|
existingIndexes = append(existingIndexes, make([]*DefinitionOp, indexDiff+1)...)
|
||||||
|
}
|
||||||
|
existingIndexes[input.Index] = vtx
|
||||||
|
d.inputCache[input.Digest] = existingIndexes
|
||||||
|
}
|
||||||
|
d.mu.Unlock()
|
||||||
|
|
||||||
inputs = append(inputs, &output{vertex: vtx, platform: platform, getIndex: func() (pb.OutputIndex, error) {
|
inputs = append(inputs, &output{vertex: vtx, platform: platform, getIndex: func() (pb.OutputIndex, error) {
|
||||||
return pb.OutputIndex(vtx.index), nil
|
return pb.OutputIndex(vtx.index), nil
|
||||||
}})
|
}})
|
||||||
|
|
|
@ -2,7 +2,7 @@ package llb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "crypto/sha256"
|
_ "crypto/sha256" // for opencontainers/go-digest
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -153,7 +153,13 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
||||||
}
|
}
|
||||||
if c.Caps != nil {
|
if c.Caps != nil {
|
||||||
if err := c.Caps.Supports(pb.CapExecMetaSetsDefaultPath); err != nil {
|
if err := c.Caps.Supports(pb.CapExecMetaSetsDefaultPath); err != nil {
|
||||||
env = env.SetDefault("PATH", system.DefaultPathEnv)
|
os := "linux"
|
||||||
|
if c.Platform != nil {
|
||||||
|
os = c.Platform.OS
|
||||||
|
} else if e.constraints.Platform != nil {
|
||||||
|
os = e.constraints.Platform.OS
|
||||||
|
}
|
||||||
|
env = env.SetDefault("PATH", system.DefaultPathEnv(os))
|
||||||
} else {
|
} else {
|
||||||
addCap(&e.constraints, pb.CapExecMetaSetsDefaultPath)
|
addCap(&e.constraints, pb.CapExecMetaSetsDefaultPath)
|
||||||
}
|
}
|
||||||
|
@ -174,11 +180,17 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
||||||
return "", nil, nil, nil, err
|
return "", nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hostname, err := getHostname(e.base)(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
meta := &pb.Meta{
|
meta := &pb.Meta{
|
||||||
Args: args,
|
Args: args,
|
||||||
Env: env.ToArray(),
|
Env: env.ToArray(),
|
||||||
Cwd: cwd,
|
Cwd: cwd,
|
||||||
User: user,
|
User: user,
|
||||||
|
Hostname: hostname,
|
||||||
}
|
}
|
||||||
extraHosts, err := getExtraHosts(e.base)(ctx)
|
extraHosts, err := getExtraHosts(e.base)(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -217,9 +229,9 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
||||||
|
|
||||||
if p := e.proxyEnv; p != nil {
|
if p := e.proxyEnv; p != nil {
|
||||||
peo.Meta.ProxyEnv = &pb.ProxyEnv{
|
peo.Meta.ProxyEnv = &pb.ProxyEnv{
|
||||||
HttpProxy: p.HttpProxy,
|
HttpProxy: p.HTTPProxy,
|
||||||
HttpsProxy: p.HttpsProxy,
|
HttpsProxy: p.HTTPSProxy,
|
||||||
FtpProxy: p.FtpProxy,
|
FtpProxy: p.FTPProxy,
|
||||||
NoProxy: p.NoProxy,
|
NoProxy: p.NoProxy,
|
||||||
}
|
}
|
||||||
addCap(&e.constraints, pb.CapExecMetaProxy)
|
addCap(&e.constraints, pb.CapExecMetaProxy)
|
||||||
|
@ -629,9 +641,9 @@ type MountInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ProxyEnv struct {
|
type ProxyEnv struct {
|
||||||
HttpProxy string
|
HTTPProxy string
|
||||||
HttpsProxy string
|
HTTPSProxy string
|
||||||
FtpProxy string
|
FTPProxy string
|
||||||
NoProxy string
|
NoProxy string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ package llb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "crypto/sha256"
|
_ "crypto/sha256" // for opencontainers/go-digest
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -252,13 +252,13 @@ func (co ChownOpt) SetCopyOption(mi *CopyInfo) {
|
||||||
mi.ChownOpt = &co
|
mi.ChownOpt = &co
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cp *ChownOpt) marshal(base pb.InputIndex) *pb.ChownOpt {
|
func (co *ChownOpt) marshal(base pb.InputIndex) *pb.ChownOpt {
|
||||||
if cp == nil {
|
if co == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return &pb.ChownOpt{
|
return &pb.ChownOpt{
|
||||||
User: cp.User.marshal(base),
|
User: co.User.marshal(base),
|
||||||
Group: cp.Group.marshal(base),
|
Group: co.Group.marshal(base),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -476,17 +476,17 @@ func (a *fileActionCopy) toProtoAction(ctx context.Context, parent string, base
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *fileActionCopy) sourcePath(ctx context.Context) (string, error) {
|
func (a *fileActionCopy) sourcePath(ctx context.Context) (string, error) {
|
||||||
p := path.Clean(c.src)
|
p := path.Clean(a.src)
|
||||||
if !path.IsAbs(p) {
|
if !path.IsAbs(p) {
|
||||||
if c.state != nil {
|
if a.state != nil {
|
||||||
dir, err := c.state.GetDir(ctx)
|
dir, err := a.state.GetDir(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
p = path.Join("/", dir, p)
|
p = path.Join("/", dir, p)
|
||||||
} else if c.fas != nil {
|
} else if a.fas != nil {
|
||||||
dir, err := c.fas.state.GetDir(ctx)
|
dir, err := a.fas.state.GetDir(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ var (
|
||||||
keyDir = contextKeyT("llb.exec.dir")
|
keyDir = contextKeyT("llb.exec.dir")
|
||||||
keyEnv = contextKeyT("llb.exec.env")
|
keyEnv = contextKeyT("llb.exec.env")
|
||||||
keyUser = contextKeyT("llb.exec.user")
|
keyUser = contextKeyT("llb.exec.user")
|
||||||
|
keyHostname = contextKeyT("llb.exec.hostname")
|
||||||
keyExtraHost = contextKeyT("llb.exec.extrahost")
|
keyExtraHost = contextKeyT("llb.exec.extrahost")
|
||||||
keyPlatform = contextKeyT("llb.platform")
|
keyPlatform = contextKeyT("llb.platform")
|
||||||
keyNetwork = contextKeyT("llb.network")
|
keyNetwork = contextKeyT("llb.network")
|
||||||
|
@ -143,6 +144,25 @@ func getUser(s State) func(context.Context) (string, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Hostname(str string) StateOption {
|
||||||
|
return func(s State) State {
|
||||||
|
return s.WithValue(keyHostname, str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHostname(s State) func(context.Context) (string, error) {
|
||||||
|
return func(ctx context.Context) (string, error) {
|
||||||
|
v, err := s.getValue(keyHostname)(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if v != nil {
|
||||||
|
return v.(string), nil
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func args(args ...string) StateOption {
|
func args(args ...string) StateOption {
|
||||||
return func(s State) State {
|
return func(s State) State {
|
||||||
return s.WithValue(keyArgs, args)
|
return s.WithValue(keyArgs, args)
|
||||||
|
@ -155,7 +175,7 @@ func shlexf(str string, replace bool, v ...interface{}) StateOption {
|
||||||
}
|
}
|
||||||
return func(s State) State {
|
return func(s State) State {
|
||||||
arg, err := shlex.Split(str)
|
arg, err := shlex.Split(str)
|
||||||
if err != nil {
|
if err != nil { //nolint
|
||||||
// TODO: handle error
|
// TODO: handle error
|
||||||
}
|
}
|
||||||
return args(arg...)(s)
|
return args(arg...)(s)
|
||||||
|
|
|
@ -2,15 +2,17 @@ package llb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "crypto/sha256"
|
_ "crypto/sha256" // for opencontainers/go-digest
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"os"
|
"os"
|
||||||
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
"github.com/moby/buildkit/util/apicaps"
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
|
"github.com/moby/buildkit/util/sshutil"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
@ -197,15 +199,59 @@ type ImageInfo struct {
|
||||||
RecordType string
|
RecordType string
|
||||||
}
|
}
|
||||||
|
|
||||||
func Git(remote, ref string, opts ...GitOption) State {
|
const (
|
||||||
url := ""
|
gitProtocolHTTP = iota + 1
|
||||||
|
gitProtocolHTTPS
|
||||||
|
gitProtocolSSH
|
||||||
|
gitProtocolGit
|
||||||
|
gitProtocolUnknown
|
||||||
|
)
|
||||||
|
|
||||||
for _, prefix := range []string{
|
var gitSSHRegex = regexp.MustCompile("^([a-z0-9]+@)?[^:]+:.*$")
|
||||||
"http://", "https://", "git://", "git@",
|
|
||||||
} {
|
func getGitProtocol(remote string) (string, int) {
|
||||||
|
prefixes := map[string]int{
|
||||||
|
"http://": gitProtocolHTTP,
|
||||||
|
"https://": gitProtocolHTTPS,
|
||||||
|
"git://": gitProtocolGit,
|
||||||
|
"ssh://": gitProtocolSSH,
|
||||||
|
}
|
||||||
|
protocolType := gitProtocolUnknown
|
||||||
|
for prefix, potentialType := range prefixes {
|
||||||
if strings.HasPrefix(remote, prefix) {
|
if strings.HasPrefix(remote, prefix) {
|
||||||
url = strings.Split(remote, "#")[0]
|
|
||||||
remote = strings.TrimPrefix(remote, prefix)
|
remote = strings.TrimPrefix(remote, prefix)
|
||||||
|
protocolType = potentialType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if protocolType == gitProtocolUnknown && gitSSHRegex.MatchString(remote) {
|
||||||
|
protocolType = gitProtocolSSH
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove name from ssh
|
||||||
|
if protocolType == gitProtocolSSH {
|
||||||
|
parts := strings.SplitN(remote, "@", 2)
|
||||||
|
if len(parts) == 2 {
|
||||||
|
remote = parts[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return remote, protocolType
|
||||||
|
}
|
||||||
|
|
||||||
|
func Git(remote, ref string, opts ...GitOption) State {
|
||||||
|
url := strings.Split(remote, "#")[0]
|
||||||
|
|
||||||
|
var protocolType int
|
||||||
|
remote, protocolType = getGitProtocol(remote)
|
||||||
|
|
||||||
|
var sshHost string
|
||||||
|
if protocolType == gitProtocolSSH {
|
||||||
|
parts := strings.SplitN(remote, ":", 2)
|
||||||
|
if len(parts) == 2 {
|
||||||
|
sshHost = parts[0]
|
||||||
|
// keep remote consistent with http(s) version
|
||||||
|
remote = parts[0] + "/" + parts[1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -233,11 +279,34 @@ func Git(remote, ref string, opts ...GitOption) State {
|
||||||
}
|
}
|
||||||
if gi.AuthTokenSecret != "" {
|
if gi.AuthTokenSecret != "" {
|
||||||
attrs[pb.AttrAuthTokenSecret] = gi.AuthTokenSecret
|
attrs[pb.AttrAuthTokenSecret] = gi.AuthTokenSecret
|
||||||
addCap(&gi.Constraints, pb.CapSourceGitHttpAuth)
|
if gi.addAuthCap {
|
||||||
|
addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if gi.AuthHeaderSecret != "" {
|
if gi.AuthHeaderSecret != "" {
|
||||||
attrs[pb.AttrAuthHeaderSecret] = gi.AuthHeaderSecret
|
attrs[pb.AttrAuthHeaderSecret] = gi.AuthHeaderSecret
|
||||||
addCap(&gi.Constraints, pb.CapSourceGitHttpAuth)
|
if gi.addAuthCap {
|
||||||
|
addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if protocolType == gitProtocolSSH {
|
||||||
|
if gi.KnownSSHHosts != "" {
|
||||||
|
attrs[pb.AttrKnownSSHHosts] = gi.KnownSSHHosts
|
||||||
|
} else if sshHost != "" {
|
||||||
|
keyscan, err := sshutil.SSHKeyScan(sshHost)
|
||||||
|
if err == nil {
|
||||||
|
// best effort
|
||||||
|
attrs[pb.AttrKnownSSHHosts] = keyscan
|
||||||
|
}
|
||||||
|
}
|
||||||
|
addCap(&gi.Constraints, pb.CapSourceGitKnownSSHHosts)
|
||||||
|
|
||||||
|
if gi.MountSSHSock == "" {
|
||||||
|
attrs[pb.AttrMountSSHSock] = "default"
|
||||||
|
} else {
|
||||||
|
attrs[pb.AttrMountSSHSock] = gi.MountSSHSock
|
||||||
|
}
|
||||||
|
addCap(&gi.Constraints, pb.CapSourceGitMountSSHSock)
|
||||||
}
|
}
|
||||||
|
|
||||||
addCap(&gi.Constraints, pb.CapSourceGit)
|
addCap(&gi.Constraints, pb.CapSourceGit)
|
||||||
|
@ -260,6 +329,9 @@ type GitInfo struct {
|
||||||
KeepGitDir bool
|
KeepGitDir bool
|
||||||
AuthTokenSecret string
|
AuthTokenSecret string
|
||||||
AuthHeaderSecret string
|
AuthHeaderSecret string
|
||||||
|
addAuthCap bool
|
||||||
|
KnownSSHHosts string
|
||||||
|
MountSSHSock string
|
||||||
}
|
}
|
||||||
|
|
||||||
func KeepGitDir() GitOption {
|
func KeepGitDir() GitOption {
|
||||||
|
@ -271,12 +343,27 @@ func KeepGitDir() GitOption {
|
||||||
func AuthTokenSecret(v string) GitOption {
|
func AuthTokenSecret(v string) GitOption {
|
||||||
return gitOptionFunc(func(gi *GitInfo) {
|
return gitOptionFunc(func(gi *GitInfo) {
|
||||||
gi.AuthTokenSecret = v
|
gi.AuthTokenSecret = v
|
||||||
|
gi.addAuthCap = true
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func AuthHeaderSecret(v string) GitOption {
|
func AuthHeaderSecret(v string) GitOption {
|
||||||
return gitOptionFunc(func(gi *GitInfo) {
|
return gitOptionFunc(func(gi *GitInfo) {
|
||||||
gi.AuthHeaderSecret = v
|
gi.AuthHeaderSecret = v
|
||||||
|
gi.addAuthCap = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func KnownSSHHosts(key string) GitOption {
|
||||||
|
key = strings.TrimSuffix(key, "\n")
|
||||||
|
return gitOptionFunc(func(gi *GitInfo) {
|
||||||
|
gi.KnownSSHHosts = gi.KnownSSHHosts + key + "\n"
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func MountSSHSock(sshID string) GitOption {
|
||||||
|
return gitOptionFunc(func(gi *GitInfo) {
|
||||||
|
gi.MountSSHSock = sshID
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -320,6 +320,14 @@ func (s State) User(v string) State {
|
||||||
return User(v)(s)
|
return User(v)(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s State) Hostname(v string) State {
|
||||||
|
return Hostname(v)(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s State) GetHostname(ctx context.Context) (string, error) {
|
||||||
|
return getHostname(s)(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
func (s State) Platform(p specs.Platform) State {
|
func (s State) Platform(p specs.Platform) State {
|
||||||
return platform(p)(s)
|
return platform(p)(s)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/gofrs/flock"
|
"github.com/gofrs/flock"
|
||||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package dockerignore // import "github.com/docker/docker/builder/dockerignore"
|
package dockerignore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
|
@ -2,6 +2,7 @@ package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
@ -16,6 +17,64 @@ type Client interface {
|
||||||
ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error)
|
ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error)
|
||||||
BuildOpts() BuildOpts
|
BuildOpts() BuildOpts
|
||||||
Inputs(ctx context.Context) (map[string]llb.State, error)
|
Inputs(ctx context.Context) (map[string]llb.State, error)
|
||||||
|
NewContainer(ctx context.Context, req NewContainerRequest) (Container, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewContainerRequest encapsulates the requirements for a client to define a
|
||||||
|
// new container, without defining the initial process.
|
||||||
|
type NewContainerRequest struct {
|
||||||
|
Mounts []Mount
|
||||||
|
NetMode pb.NetMode
|
||||||
|
Platform *pb.Platform
|
||||||
|
Constraints *pb.WorkerConstraints
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount allows clients to specify a filesystem mount. A Reference to a
|
||||||
|
// previously solved Result is required.
|
||||||
|
type Mount struct {
|
||||||
|
Selector string
|
||||||
|
Dest string
|
||||||
|
ResultID string
|
||||||
|
Ref Reference
|
||||||
|
Readonly bool
|
||||||
|
MountType pb.MountType
|
||||||
|
CacheOpt *pb.CacheOpt
|
||||||
|
SecretOpt *pb.SecretOpt
|
||||||
|
SSHOpt *pb.SSHOpt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Container is used to start new processes inside a container and release the
|
||||||
|
// container resources when done.
|
||||||
|
type Container interface {
|
||||||
|
Start(context.Context, StartRequest) (ContainerProcess, error)
|
||||||
|
Release(context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartRequest encapsulates the arguments to define a process within a
|
||||||
|
// container.
|
||||||
|
type StartRequest struct {
|
||||||
|
Args []string
|
||||||
|
Env []string
|
||||||
|
User string
|
||||||
|
Cwd string
|
||||||
|
Tty bool
|
||||||
|
Stdin io.ReadCloser
|
||||||
|
Stdout, Stderr io.WriteCloser
|
||||||
|
SecurityMode pb.SecurityMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// WinSize is same as executor.WinSize, copied here to prevent circular package
|
||||||
|
// dependencies.
|
||||||
|
type WinSize struct {
|
||||||
|
Rows uint32
|
||||||
|
Cols uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerProcess represents a process within a container.
|
||||||
|
type ContainerProcess interface {
|
||||||
|
Wait() error
|
||||||
|
Resize(ctx context.Context, size WinSize) error
|
||||||
|
// TODO Signal(ctx context.Context, sig os.Signal)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Reference interface {
|
type Reference interface {
|
||||||
|
@ -46,6 +105,7 @@ type StatRequest struct {
|
||||||
|
|
||||||
// SolveRequest is same as frontend.SolveRequest but avoiding dependency
|
// SolveRequest is same as frontend.SolveRequest but avoiding dependency
|
||||||
type SolveRequest struct {
|
type SolveRequest struct {
|
||||||
|
Evaluate bool
|
||||||
Definition *pb.Definition
|
Definition *pb.Definition
|
||||||
Frontend string
|
Frontend string
|
||||||
FrontendOpt map[string]string
|
FrontendOpt map[string]string
|
||||||
|
|
|
@ -3,10 +3,12 @@ package grpcclient
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gogo/googleapis/google/rpc"
|
"github.com/gogo/googleapis/google/rpc"
|
||||||
|
@ -15,26 +17,33 @@ import (
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/frontend/gateway/client"
|
"github.com/moby/buildkit/frontend/gateway/client"
|
||||||
pb "github.com/moby/buildkit/frontend/gateway/pb"
|
pb "github.com/moby/buildkit/frontend/gateway/pb"
|
||||||
|
"github.com/moby/buildkit/identity"
|
||||||
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
opspb "github.com/moby/buildkit/solver/pb"
|
opspb "github.com/moby/buildkit/solver/pb"
|
||||||
"github.com/moby/buildkit/util/apicaps"
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
fstypes "github.com/tonistiigi/fsutil/types"
|
fstypes "github.com/tonistiigi/fsutil/types"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
const frontendPrefix = "BUILDKIT_FRONTEND_OPT_"
|
const frontendPrefix = "BUILDKIT_FRONTEND_OPT_"
|
||||||
|
|
||||||
type GrpcClient interface {
|
type GrpcClient interface {
|
||||||
|
client.Client
|
||||||
Run(context.Context, client.BuildFunc) error
|
Run(context.Context, client.BuildFunc) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
|
func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) {
|
||||||
ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
pingCtx, pingCancel := context.WithTimeout(ctx, 15*time.Second)
|
||||||
defer cancel()
|
defer pingCancel()
|
||||||
resp, err := c.Ping(ctx, &pb.PingRequest{})
|
resp, err := c.Ping(pingCtx, &pb.PingRequest{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -56,6 +65,7 @@ func New(ctx context.Context, opts map[string]string, session, product string, c
|
||||||
caps: pb.Caps.CapSet(resp.FrontendAPICaps),
|
caps: pb.Caps.CapSet(resp.FrontendAPICaps),
|
||||||
llbCaps: opspb.Caps.CapSet(resp.LLBCaps),
|
llbCaps: opspb.Caps.CapSet(resp.LLBCaps),
|
||||||
requests: map[string]*pb.SolveRequest{},
|
requests: map[string]*pb.SolveRequest{},
|
||||||
|
execMsgs: newMessageForwarder(ctx, c),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,10 +177,21 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err = c.execMsgs.Release()
|
||||||
|
if err != nil && retError != nil {
|
||||||
|
retError = err
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if res, err = f(ctx, c); err != nil {
|
if res, err = f(ctx, c); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if res == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if err := c.caps.Supports(pb.CapReturnMap); len(res.Refs) > 1 && err != nil {
|
if err := c.caps.Supports(pb.CapReturnMap); len(res.Refs) > 1 && err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -253,6 +274,7 @@ type grpcClient struct {
|
||||||
caps apicaps.CapSet
|
caps apicaps.CapSet
|
||||||
llbCaps apicaps.CapSet
|
llbCaps apicaps.CapSet
|
||||||
requests map[string]*pb.SolveRequest
|
requests map[string]*pb.SolveRequest
|
||||||
|
execMsgs *messageForwarder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) {
|
func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) {
|
||||||
|
@ -276,7 +298,7 @@ func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, erro
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (*client.Result, error) {
|
func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *client.Result, err error) {
|
||||||
if creq.Definition != nil {
|
if creq.Definition != nil {
|
||||||
for _, md := range creq.Definition.Metadata {
|
for _, md := range creq.Definition.Metadata {
|
||||||
for cap := range md.Caps {
|
for cap := range md.Caps {
|
||||||
|
@ -322,13 +344,45 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (*clie
|
||||||
req.ExporterAttr = []byte("{}")
|
req.ExporterAttr = []byte("{}")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if creq.Evaluate {
|
||||||
|
if c.caps.Supports(pb.CapGatewayEvaluateSolve) == nil {
|
||||||
|
req.Evaluate = creq.Evaluate
|
||||||
|
} else {
|
||||||
|
// If evaluate is not supported, fallback to running Stat(".") in order to
|
||||||
|
// trigger an evaluation of the result.
|
||||||
|
defer func() {
|
||||||
|
if res == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
id string
|
||||||
|
ref client.Reference
|
||||||
|
)
|
||||||
|
ref, err = res.SingleRef()
|
||||||
|
if err != nil {
|
||||||
|
for refID := range res.Refs {
|
||||||
|
id = refID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
id = ref.(*reference).id
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = c.client.StatFile(ctx, &pb.StatFileRequest{
|
||||||
|
Ref: id,
|
||||||
|
Path: ".",
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := c.client.Solve(ctx, req)
|
resp, err := c.client.Solve(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
res := &client.Result{}
|
res = &client.Result{}
|
||||||
|
|
||||||
if resp.Result == nil {
|
if resp.Result == nil {
|
||||||
if id := resp.Ref; id != "" {
|
if id := resp.Ref; id != "" {
|
||||||
c.requests[id] = req
|
c.requests[id] = req
|
||||||
|
@ -423,14 +477,460 @@ func (c *grpcClient) Inputs(ctx context.Context) (map[string]llb.State, error) {
|
||||||
inputs[key] = llb.NewState(op)
|
inputs[key] = llb.NewState(op)
|
||||||
}
|
}
|
||||||
return inputs, nil
|
return inputs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// procMessageForwarder is created per container process to act as the
|
||||||
|
// communication channel between the process and the ExecProcess message
|
||||||
|
// stream.
|
||||||
|
type procMessageForwarder struct {
|
||||||
|
done chan struct{}
|
||||||
|
closeOnce sync.Once
|
||||||
|
msgs chan *pb.ExecMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProcMessageForwarder() *procMessageForwarder {
|
||||||
|
return &procMessageForwarder{
|
||||||
|
done: make(chan struct{}),
|
||||||
|
msgs: make(chan *pb.ExecMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *procMessageForwarder) Send(ctx context.Context, m *pb.ExecMessage) {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case <-b.done:
|
||||||
|
b.closeOnce.Do(func() {
|
||||||
|
close(b.msgs)
|
||||||
|
})
|
||||||
|
case b.msgs <- m:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *procMessageForwarder) Recv(ctx context.Context) (m *pb.ExecMessage, ok bool) {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, true
|
||||||
|
case <-b.done:
|
||||||
|
return nil, false
|
||||||
|
case m = <-b.msgs:
|
||||||
|
return m, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *procMessageForwarder) Close() {
|
||||||
|
close(b.done)
|
||||||
|
b.Recv(context.Background()) // flush any messages in queue
|
||||||
|
b.Send(context.Background(), nil) // ensure channel is closed
|
||||||
|
}
|
||||||
|
|
||||||
|
// messageForwarder manages a single grpc stream for ExecProcess to facilitate
|
||||||
|
// a pub/sub message channel for each new process started from the client
|
||||||
|
// connection.
|
||||||
|
type messageForwarder struct {
|
||||||
|
client pb.LLBBridgeClient
|
||||||
|
ctx context.Context
|
||||||
|
cancel func()
|
||||||
|
eg *errgroup.Group
|
||||||
|
mu sync.Mutex
|
||||||
|
pids map[string]*procMessageForwarder
|
||||||
|
stream pb.LLBBridge_ExecProcessClient
|
||||||
|
// startOnce used to only start the exec message forwarder once,
|
||||||
|
// so we only have one exec stream per client
|
||||||
|
startOnce sync.Once
|
||||||
|
// startErr tracks the error when initializing the stream, it will
|
||||||
|
// be returned on subsequent calls to Start
|
||||||
|
startErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMessageForwarder(ctx context.Context, client pb.LLBBridgeClient) *messageForwarder {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
return &messageForwarder{
|
||||||
|
client: client,
|
||||||
|
pids: map[string]*procMessageForwarder{},
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
eg: eg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *messageForwarder) Start() (err error) {
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
m.startErr = err
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if m.startErr != nil {
|
||||||
|
return m.startErr
|
||||||
|
}
|
||||||
|
|
||||||
|
m.startOnce.Do(func() {
|
||||||
|
m.stream, err = m.client.ExecProcess(m.ctx)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.eg.Go(func() error {
|
||||||
|
for {
|
||||||
|
msg, err := m.stream.Recv()
|
||||||
|
if errors.Is(err, io.EOF) || grpcerrors.Code(err) == codes.Canceled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
logrus.Debugf("|<--- %s", debugMessage(msg))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mu.Lock()
|
||||||
|
msgs, ok := m.pids[msg.ProcessID]
|
||||||
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
logrus.Debugf("Received exec message for unregistered process: %s", msg.String())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
msgs.Send(m.ctx, msg)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func debugMessage(msg *pb.ExecMessage) string {
|
||||||
|
switch m := msg.GetInput().(type) {
|
||||||
|
case *pb.ExecMessage_Init:
|
||||||
|
return fmt.Sprintf("Init Message %s", msg.ProcessID)
|
||||||
|
case *pb.ExecMessage_File:
|
||||||
|
if m.File.EOF {
|
||||||
|
return fmt.Sprintf("File Message %s, fd=%d, EOF", msg.ProcessID, m.File.Fd)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("File Message %s, fd=%d, %d bytes", msg.ProcessID, m.File.Fd, len(m.File.Data))
|
||||||
|
case *pb.ExecMessage_Resize:
|
||||||
|
return fmt.Sprintf("Resize Message %s", msg.ProcessID)
|
||||||
|
case *pb.ExecMessage_Started:
|
||||||
|
return fmt.Sprintf("Started Message %s", msg.ProcessID)
|
||||||
|
case *pb.ExecMessage_Exit:
|
||||||
|
return fmt.Sprintf("Exit Message %s, code=%d, err=%s", msg.ProcessID, m.Exit.Code, m.Exit.Error)
|
||||||
|
case *pb.ExecMessage_Done:
|
||||||
|
return fmt.Sprintf("Done Message %s", msg.ProcessID)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Unknown Message %s", msg.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *messageForwarder) Send(msg *pb.ExecMessage) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
_, ok := m.pids[msg.ProcessID]
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("process %s has ended, not sending message %#v", msg.ProcessID, msg.Input)
|
||||||
|
}
|
||||||
|
logrus.Debugf("|---> %s", debugMessage(msg))
|
||||||
|
return m.stream.Send(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *messageForwarder) Release() error {
|
||||||
|
m.cancel()
|
||||||
|
return m.eg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *messageForwarder) Register(pid string) *procMessageForwarder {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
sender := newProcMessageForwarder()
|
||||||
|
m.pids[pid] = sender
|
||||||
|
return sender
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *messageForwarder) Deregister(pid string) {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
sender, ok := m.pids[pid]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delete(m.pids, pid)
|
||||||
|
sender.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type msgWriter struct {
|
||||||
|
mux *messageForwarder
|
||||||
|
fd uint32
|
||||||
|
processID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *msgWriter) Write(msg []byte) (int, error) {
|
||||||
|
err := w.mux.Send(&pb.ExecMessage{
|
||||||
|
ProcessID: w.processID,
|
||||||
|
Input: &pb.ExecMessage_File{
|
||||||
|
File: &pb.FdMessage{
|
||||||
|
Fd: w.fd,
|
||||||
|
Data: msg,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(msg), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) {
|
||||||
|
err := c.caps.Supports(pb.CapGatewayExec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
id := identity.NewID()
|
||||||
|
var mounts []*opspb.Mount
|
||||||
|
for _, m := range req.Mounts {
|
||||||
|
resultID := m.ResultID
|
||||||
|
if m.Ref != nil {
|
||||||
|
ref, ok := m.Ref.(*reference)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("unexpected type for reference, got %T", m.Ref)
|
||||||
|
}
|
||||||
|
resultID = ref.id
|
||||||
|
}
|
||||||
|
mounts = append(mounts, &opspb.Mount{
|
||||||
|
Dest: m.Dest,
|
||||||
|
Selector: m.Selector,
|
||||||
|
Readonly: m.Readonly,
|
||||||
|
MountType: m.MountType,
|
||||||
|
ResultID: resultID,
|
||||||
|
CacheOpt: m.CacheOpt,
|
||||||
|
SecretOpt: m.SecretOpt,
|
||||||
|
SSHOpt: m.SSHOpt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("|---> NewContainer %s", id)
|
||||||
|
_, err = c.client.NewContainer(ctx, &pb.NewContainerRequest{
|
||||||
|
ContainerID: id,
|
||||||
|
Mounts: mounts,
|
||||||
|
Platform: req.Platform,
|
||||||
|
Constraints: req.Constraints,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure message forwarder is started, only sets up stream first time called
|
||||||
|
err = c.execMsgs.Start()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &container{
|
||||||
|
client: c.client,
|
||||||
|
id: id,
|
||||||
|
execMsgs: c.execMsgs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type container struct {
|
||||||
|
client pb.LLBBridgeClient
|
||||||
|
id string
|
||||||
|
execMsgs *messageForwarder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctr *container) Start(ctx context.Context, req client.StartRequest) (client.ContainerProcess, error) {
|
||||||
|
pid := fmt.Sprintf("%s:%s", ctr.id, identity.NewID())
|
||||||
|
msgs := ctr.execMsgs.Register(pid)
|
||||||
|
|
||||||
|
init := &pb.InitMessage{
|
||||||
|
ContainerID: ctr.id,
|
||||||
|
Meta: &opspb.Meta{
|
||||||
|
Args: req.Args,
|
||||||
|
Env: req.Env,
|
||||||
|
Cwd: req.Cwd,
|
||||||
|
User: req.User,
|
||||||
|
},
|
||||||
|
Tty: req.Tty,
|
||||||
|
Security: req.SecurityMode,
|
||||||
|
}
|
||||||
|
if req.Stdin != nil {
|
||||||
|
init.Fds = append(init.Fds, 0)
|
||||||
|
}
|
||||||
|
if req.Stdout != nil {
|
||||||
|
init.Fds = append(init.Fds, 1)
|
||||||
|
}
|
||||||
|
if req.Stderr != nil {
|
||||||
|
init.Fds = append(init.Fds, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := ctr.execMsgs.Send(&pb.ExecMessage{
|
||||||
|
ProcessID: pid,
|
||||||
|
Input: &pb.ExecMessage_Init{
|
||||||
|
Init: init,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, _ := msgs.Recv(ctx)
|
||||||
|
if msg == nil {
|
||||||
|
return nil, errors.Errorf("failed to receive started message")
|
||||||
|
}
|
||||||
|
started := msg.GetStarted()
|
||||||
|
if started == nil {
|
||||||
|
return nil, errors.Errorf("expecting started message, got %T", msg.GetInput())
|
||||||
|
}
|
||||||
|
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
done := make(chan struct{})
|
||||||
|
|
||||||
|
ctrProc := &containerProcess{
|
||||||
|
execMsgs: ctr.execMsgs,
|
||||||
|
id: pid,
|
||||||
|
eg: eg,
|
||||||
|
}
|
||||||
|
|
||||||
|
var stdinReader *io.PipeReader
|
||||||
|
ctrProc.eg.Go(func() error {
|
||||||
|
<-done
|
||||||
|
if stdinReader != nil {
|
||||||
|
return stdinReader.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if req.Stdin != nil {
|
||||||
|
var stdinWriter io.WriteCloser
|
||||||
|
stdinReader, stdinWriter = io.Pipe()
|
||||||
|
// This go routine is intentionally not part of the errgroup because
|
||||||
|
// if os.Stdin is used for req.Stdin then this will block until
|
||||||
|
// the user closes the input, which will likely be after we are done
|
||||||
|
// with the container, so we can't Wait on it.
|
||||||
|
go func() {
|
||||||
|
io.Copy(stdinWriter, req.Stdin)
|
||||||
|
stdinWriter.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctrProc.eg.Go(func() error {
|
||||||
|
m := &msgWriter{
|
||||||
|
mux: ctr.execMsgs,
|
||||||
|
processID: pid,
|
||||||
|
fd: 0,
|
||||||
|
}
|
||||||
|
_, err := io.Copy(m, stdinReader)
|
||||||
|
// ignore ErrClosedPipe, it is EOF for our usage.
|
||||||
|
if err != nil && !errors.Is(err, io.ErrClosedPipe) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// not an error so must be eof
|
||||||
|
return ctr.execMsgs.Send(&pb.ExecMessage{
|
||||||
|
ProcessID: pid,
|
||||||
|
Input: &pb.ExecMessage_File{
|
||||||
|
File: &pb.FdMessage{
|
||||||
|
Fd: 0,
|
||||||
|
EOF: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrProc.eg.Go(func() error {
|
||||||
|
var closeDoneOnce sync.Once
|
||||||
|
var exitError error
|
||||||
|
for {
|
||||||
|
msg, ok := msgs.Recv(ctx)
|
||||||
|
if !ok {
|
||||||
|
// no more messages, return
|
||||||
|
return exitError
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg == nil {
|
||||||
|
// empty message from ctx cancel, so just start shutting down
|
||||||
|
// input, but continue processing more exit/done messages
|
||||||
|
closeDoneOnce.Do(func() {
|
||||||
|
close(done)
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if file := msg.GetFile(); file != nil {
|
||||||
|
var out io.WriteCloser
|
||||||
|
switch file.Fd {
|
||||||
|
case 1:
|
||||||
|
out = req.Stdout
|
||||||
|
case 2:
|
||||||
|
out = req.Stderr
|
||||||
|
}
|
||||||
|
if out == nil {
|
||||||
|
// if things are plumbed correctly this should never happen
|
||||||
|
return errors.Errorf("missing writer for output fd %d", file.Fd)
|
||||||
|
}
|
||||||
|
if len(file.Data) > 0 {
|
||||||
|
_, err := out.Write(file.Data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if exit := msg.GetExit(); exit != nil {
|
||||||
|
// capture exit message to exitError so we can return it after
|
||||||
|
// the server sends the Done message
|
||||||
|
closeDoneOnce.Do(func() {
|
||||||
|
close(done)
|
||||||
|
})
|
||||||
|
if exit.Code == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
exitError = grpcerrors.FromGRPC(status.ErrorProto(&spb.Status{
|
||||||
|
Code: exit.Error.Code,
|
||||||
|
Message: exit.Error.Message,
|
||||||
|
Details: convertGogoAny(exit.Error.Details),
|
||||||
|
}))
|
||||||
|
if exit.Code != errdefs.ContainerdUnknownExitStatus {
|
||||||
|
exitError = &errdefs.ExitError{ExitCode: exit.Code, Err: exitError}
|
||||||
|
}
|
||||||
|
} else if serverDone := msg.GetDone(); serverDone != nil {
|
||||||
|
return exitError
|
||||||
|
} else {
|
||||||
|
return errors.Errorf("unexpected Exec Message for pid %s: %T", pid, msg.GetInput())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return ctrProc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctr *container) Release(ctx context.Context) error {
|
||||||
|
logrus.Debugf("|---> ReleaseContainer %s", ctr.id)
|
||||||
|
_, err := ctr.client.ReleaseContainer(ctx, &pb.ReleaseContainerRequest{
|
||||||
|
ContainerID: ctr.id,
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type containerProcess struct {
|
||||||
|
execMsgs *messageForwarder
|
||||||
|
id string
|
||||||
|
eg *errgroup.Group
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctrProc *containerProcess) Wait() error {
|
||||||
|
defer ctrProc.execMsgs.Deregister(ctrProc.id)
|
||||||
|
return ctrProc.eg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctrProc *containerProcess) Resize(_ context.Context, size client.WinSize) error {
|
||||||
|
return ctrProc.execMsgs.Send(&pb.ExecMessage{
|
||||||
|
ProcessID: ctrProc.id,
|
||||||
|
Input: &pb.ExecMessage_Resize{
|
||||||
|
Resize: &pb.ResizeMessage{
|
||||||
|
Cols: size.Cols,
|
||||||
|
Rows: size.Rows,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type reference struct {
|
type reference struct {
|
||||||
c *grpcClient
|
c *grpcClient
|
||||||
id string
|
id string
|
||||||
def *opspb.Definition
|
def *opspb.Definition
|
||||||
output llb.Output
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newReference(c *grpcClient, ref *pb.Ref) (*reference, error) {
|
func newReference(c *grpcClient, ref *pb.Ref) (*reference, error) {
|
||||||
|
@ -502,11 +1002,11 @@ func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fsty
|
||||||
}
|
}
|
||||||
|
|
||||||
func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) {
|
func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) {
|
||||||
dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
|
dialOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
|
||||||
return stdioConn(), nil
|
return stdioConn(), nil
|
||||||
})
|
})
|
||||||
|
|
||||||
cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor))
|
cc, err := grpc.DialContext(ctx, "localhost", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "failed to create grpc client")
|
return nil, nil, errors.Wrap(err, "failed to create grpc client")
|
||||||
}
|
}
|
||||||
|
@ -593,6 +1093,14 @@ func product() string {
|
||||||
return os.Getenv("BUILDKIT_EXPORTEDPRODUCT")
|
return os.Getenv("BUILDKIT_EXPORTEDPRODUCT")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func convertGogoAny(in []*gogotypes.Any) []*any.Any {
|
||||||
|
out := make([]*any.Any, len(in))
|
||||||
|
for i := range in {
|
||||||
|
out[i] = &any.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
func convertToGogoAny(in []*any.Any) []*gogotypes.Any {
|
func convertToGogoAny(in []*any.Any) []*gogotypes.Any {
|
||||||
out := make([]*gogotypes.Any, len(in))
|
out := make([]*gogotypes.Any, len(in))
|
||||||
for i := range in {
|
for i := range in {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package moby_buildkit_v1_frontend
|
package moby_buildkit_v1_frontend //nolint:golint
|
||||||
|
|
||||||
import "github.com/moby/buildkit/util/apicaps"
|
import "github.com/moby/buildkit/util/apicaps"
|
||||||
|
|
||||||
|
@ -35,6 +35,18 @@ const (
|
||||||
|
|
||||||
// CapGatewaySolveMetadata can be used to check if solve calls from gateway reliably return metadata
|
// CapGatewaySolveMetadata can be used to check if solve calls from gateway reliably return metadata
|
||||||
CapGatewaySolveMetadata apicaps.CapID = "gateway.solve.metadata"
|
CapGatewaySolveMetadata apicaps.CapID = "gateway.solve.metadata"
|
||||||
|
|
||||||
|
// CapGatewayExec is the capability to create and interact with new
|
||||||
|
// containers directly through the gateway
|
||||||
|
CapGatewayExec apicaps.CapID = "gateway.exec"
|
||||||
|
|
||||||
|
// CapFrontendCaps can be used to check that frontends define support for certain capabilities
|
||||||
|
CapFrontendCaps apicaps.CapID = "frontend.caps"
|
||||||
|
|
||||||
|
// CapGatewayEvaluateSolve is a capability to immediately unlazy solve
|
||||||
|
// results. This is generally used by the client to return and handle solve
|
||||||
|
// errors.
|
||||||
|
CapGatewayEvaluateSolve apicaps.CapID = "gateway.solve.evaluate"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -136,4 +148,25 @@ func init() {
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Status: apicaps.CapStatusExperimental,
|
Status: apicaps.CapStatusExperimental,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Caps.Init(apicaps.Cap{
|
||||||
|
ID: CapGatewayExec,
|
||||||
|
Name: "gateway exec",
|
||||||
|
Enabled: true,
|
||||||
|
Status: apicaps.CapStatusExperimental,
|
||||||
|
})
|
||||||
|
|
||||||
|
Caps.Init(apicaps.Cap{
|
||||||
|
ID: CapFrontendCaps,
|
||||||
|
Name: "frontend capabilities",
|
||||||
|
Enabled: true,
|
||||||
|
Status: apicaps.CapStatusExperimental,
|
||||||
|
})
|
||||||
|
|
||||||
|
Caps.Init(apicaps.Cap{
|
||||||
|
ID: CapGatewayEvaluateSolve,
|
||||||
|
Name: "gateway evaluate solve",
|
||||||
|
Enabled: true,
|
||||||
|
Status: apicaps.CapStatusExperimental,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -28,6 +28,10 @@ service LLBBridge {
|
||||||
rpc Return(ReturnRequest) returns (ReturnResponse);
|
rpc Return(ReturnRequest) returns (ReturnResponse);
|
||||||
// apicaps:CapFrontendInputs
|
// apicaps:CapFrontendInputs
|
||||||
rpc Inputs(InputsRequest) returns (InputsResponse);
|
rpc Inputs(InputsRequest) returns (InputsResponse);
|
||||||
|
|
||||||
|
rpc NewContainer(NewContainerRequest) returns (NewContainerResponse);
|
||||||
|
rpc ReleaseContainer(ReleaseContainerRequest) returns (ReleaseContainerResponse);
|
||||||
|
rpc ExecProcess(stream ExecMessage) returns (stream ExecMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
message Result {
|
message Result {
|
||||||
|
@ -103,6 +107,8 @@ message SolveRequest {
|
||||||
|
|
||||||
// apicaps:CapFrontendInputs
|
// apicaps:CapFrontendInputs
|
||||||
map<string, pb.Definition> FrontendInputs = 13;
|
map<string, pb.Definition> FrontendInputs = 13;
|
||||||
|
|
||||||
|
bool Evaluate = 14;
|
||||||
}
|
}
|
||||||
|
|
||||||
// CacheOptionsEntry corresponds to the control.CacheOptionsEntry
|
// CacheOptionsEntry corresponds to the control.CacheOptionsEntry
|
||||||
|
@ -162,3 +168,71 @@ message PongResponse{
|
||||||
repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false];
|
repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false];
|
||||||
repeated moby.buildkit.v1.types.WorkerRecord Workers = 3;
|
repeated moby.buildkit.v1.types.WorkerRecord Workers = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message NewContainerRequest {
|
||||||
|
string ContainerID = 1;
|
||||||
|
// For mount input values we can use random identifiers passed with ref
|
||||||
|
repeated pb.Mount Mounts = 2;
|
||||||
|
pb.NetMode Network = 3;
|
||||||
|
pb.Platform platform = 4;
|
||||||
|
pb.WorkerConstraints constraints = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message NewContainerResponse{}
|
||||||
|
|
||||||
|
message ReleaseContainerRequest {
|
||||||
|
string ContainerID = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ReleaseContainerResponse{}
|
||||||
|
|
||||||
|
message ExecMessage {
|
||||||
|
string ProcessID = 1;
|
||||||
|
oneof Input {
|
||||||
|
// InitMessage sent from client to server will start a new process in a
|
||||||
|
// container
|
||||||
|
InitMessage Init = 2;
|
||||||
|
// FdMessage used from client to server for input (stdin) and
|
||||||
|
// from server to client for output (stdout, stderr)
|
||||||
|
FdMessage File = 3;
|
||||||
|
// ResizeMessage used from client to server for terminal resize events
|
||||||
|
ResizeMessage Resize = 4;
|
||||||
|
// StartedMessage sent from server to client after InitMessage to
|
||||||
|
// indicate the process has started.
|
||||||
|
StartedMessage Started = 5;
|
||||||
|
// ExitMessage sent from server to client will contain the exit code
|
||||||
|
// when the process ends.
|
||||||
|
ExitMessage Exit = 6;
|
||||||
|
// DoneMessage from server to client will be the last message for any
|
||||||
|
// process. Note that FdMessage might be sent after ExitMessage.
|
||||||
|
DoneMessage Done = 7;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message InitMessage{
|
||||||
|
string ContainerID = 1;
|
||||||
|
pb.Meta Meta = 2;
|
||||||
|
repeated uint32 Fds = 3;
|
||||||
|
bool Tty = 4;
|
||||||
|
pb.SecurityMode Security = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExitMessage {
|
||||||
|
uint32 Code = 1;
|
||||||
|
google.rpc.Status Error = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StartedMessage{}
|
||||||
|
|
||||||
|
message DoneMessage{}
|
||||||
|
|
||||||
|
message FdMessage{
|
||||||
|
uint32 Fd = 1; // what fd the data was from
|
||||||
|
bool EOF = 2; // true if eof was reached
|
||||||
|
bytes Data = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResizeMessage{
|
||||||
|
uint32 Rows = 1;
|
||||||
|
uint32 Cols = 2;
|
||||||
|
}
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
package moby_buildkit_v1_frontend
|
package moby_buildkit_v1_frontend //nolint:golint
|
||||||
|
|
||||||
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto
|
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto
|
||||||
|
|
|
@ -3,76 +3,77 @@ module github.com/moby/buildkit
|
||||||
go 1.13
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/AkihiroSuda/containerd-fuse-overlayfs v0.0.0-20200512015515-32086ef23a5a
|
github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
|
||||||
github.com/BurntSushi/toml v0.3.1
|
github.com/BurntSushi/toml v0.3.1
|
||||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
|
github.com/Microsoft/go-winio v0.4.15
|
||||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 // indirect
|
github.com/Microsoft/hcsshim v0.8.10
|
||||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
|
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
|
||||||
github.com/containerd/cgroups v0.0.0-20200327175542-b44481373989 // indirect
|
github.com/containerd/console v1.0.1
|
||||||
github.com/containerd/console v1.0.0
|
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
|
||||||
github.com/containerd/containerd v1.4.0-0
|
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe
|
||||||
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb
|
github.com/containerd/go-cni v1.0.1
|
||||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b // indirect
|
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0
|
||||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75
|
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116
|
||||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328
|
github.com/containerd/typeurl v1.0.1
|
||||||
github.com/coreos/go-systemd/v22 v22.0.0
|
github.com/coreos/go-systemd/v22 v22.1.0
|
||||||
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24
|
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
|
||||||
github.com/docker/distribution v2.7.1+incompatible
|
github.com/docker/distribution v2.7.1+incompatible
|
||||||
github.com/docker/docker v0.0.0
|
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
|
||||||
github.com/docker/docker-credential-helpers v0.6.0 // indirect
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/docker/go-connections v0.3.0
|
github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f
|
||||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20200226230617-d8334ccdb9be
|
github.com/gofrs/flock v0.7.3
|
||||||
github.com/gofrs/flock v0.7.0
|
|
||||||
github.com/gogo/googleapis v1.3.2
|
github.com/gogo/googleapis v1.3.2
|
||||||
github.com/gogo/protobuf v1.3.1
|
github.com/gogo/protobuf v1.3.1
|
||||||
github.com/golang/protobuf v1.3.3
|
// protobuf: the actual version is replaced in replace()
|
||||||
github.com/google/go-cmp v0.4.0
|
github.com/golang/protobuf v1.4.2
|
||||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
|
github.com/google/go-cmp v0.4.1
|
||||||
github.com/google/uuid v1.1.1 // indirect
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||||
github.com/gorilla/mux v1.7.4 // indirect
|
github.com/gorilla/mux v1.8.0 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
|
||||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
|
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
|
||||||
github.com/hashicorp/go-immutable-radix v1.0.0
|
github.com/hashicorp/go-immutable-radix v1.0.0
|
||||||
github.com/hashicorp/golang-lru v0.5.1
|
github.com/hashicorp/golang-lru v0.5.3
|
||||||
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
|
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
|
||||||
github.com/imdario/mergo v0.3.9 // indirect
|
|
||||||
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
|
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
|
||||||
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
|
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
|
||||||
github.com/mitchellh/hashstructure v1.0.0
|
github.com/mitchellh/hashstructure v1.0.0
|
||||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c
|
github.com/moby/locker v1.0.1
|
||||||
|
github.com/moby/sys/mount v0.1.1 // indirect; force more current version of sys/mount than go mod selects automatically
|
||||||
|
github.com/moby/sys/mountinfo v0.4.0 // indirect; force more current version of sys/mountinfo than go mod selects automatically
|
||||||
|
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 // indirect
|
||||||
|
github.com/morikuni/aec v1.0.0
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.0.1
|
github.com/opencontainers/image-spec v1.0.1
|
||||||
github.com/opencontainers/runc v1.0.0-rc10
|
github.com/opencontainers/runc v1.0.0-rc92
|
||||||
github.com/opencontainers/runtime-spec v1.0.2
|
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6
|
||||||
github.com/opencontainers/selinux v1.5.1 // indirect
|
github.com/opentracing-contrib/go-stdlib v1.0.0
|
||||||
github.com/opentracing-contrib/go-stdlib v0.0.0-20171029140428-b1a47cfbdd75
|
github.com/opentracing/opentracing-go v1.2.0
|
||||||
github.com/opentracing/opentracing-go v1.1.0
|
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/pkg/profile v1.2.1
|
github.com/pkg/profile v1.5.0
|
||||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||||
github.com/sirupsen/logrus v1.4.2
|
github.com/sirupsen/logrus v1.7.0
|
||||||
github.com/stretchr/testify v1.5.1
|
github.com/stretchr/testify v1.5.1
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
|
github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85
|
||||||
github.com/tonistiigi/fsutil v0.0.0-20200512175118-ae3a8d753069
|
|
||||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||||
github.com/uber/jaeger-client-go v2.11.2+incompatible
|
github.com/uber/jaeger-client-go v2.25.0+incompatible
|
||||||
github.com/uber/jaeger-lib v1.2.1 // indirect
|
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
|
||||||
github.com/urfave/cli v1.22.2
|
github.com/urfave/cli v1.22.2
|
||||||
github.com/vishvananda/netlink v1.1.0 // indirect
|
go.etcd.io/bbolt v1.3.5
|
||||||
go.etcd.io/bbolt v1.3.3
|
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9
|
||||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d
|
golang.org/x/net v0.0.0-20200707034311-ab3426394381
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a
|
||||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
|
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
// genproto: the actual version is replaced in replace()
|
||||||
google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9
|
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece
|
||||||
google.golang.org/grpc v1.27.1
|
google.golang.org/grpc v1.29.1
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200512144102-f13ba8f2f2fd
|
// protobuf: corresponds to containerd
|
||||||
github.com/docker/docker => github.com/docker/docker v17.12.0-ce-rc1.0.20200310163718-4634ce647cf2+incompatible
|
github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
|
||||||
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
|
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
|
||||||
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
|
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
|
||||||
|
// genproto: corresponds to containerd
|
||||||
|
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,16 +2,33 @@ package auth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/subtle"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/crypto/nacl/sign"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CredentialsFunc(sm *session.Manager, g session.Group) func(string) (string, string, error) {
|
var salt []byte
|
||||||
return func(host string) (string, string, error) {
|
var saltOnce sync.Once
|
||||||
var user, secret string
|
|
||||||
err := sm.Any(context.TODO(), g, func(ctx context.Context, _ string, c session.Caller) error {
|
// getSalt returns unique component per daemon restart to avoid persistent keys
|
||||||
|
func getSalt() []byte {
|
||||||
|
saltOnce.Do(func() {
|
||||||
|
salt = make([]byte, 32)
|
||||||
|
rand.Read(salt)
|
||||||
|
})
|
||||||
|
return salt
|
||||||
|
}
|
||||||
|
|
||||||
|
func CredentialsFunc(sm *session.Manager, g session.Group) func(string) (session, username, secret string, err error) {
|
||||||
|
return func(host string) (string, string, string, error) {
|
||||||
|
var sessionID, user, secret string
|
||||||
|
err := sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error {
|
||||||
client := NewAuthClient(c.Conn())
|
client := NewAuthClient(c.Conn())
|
||||||
|
|
||||||
resp, err := client.Credentials(ctx, &CredentialsRequest{
|
resp, err := client.Credentials(ctx, &CredentialsRequest{
|
||||||
|
@ -23,13 +40,91 @@ func CredentialsFunc(sm *session.Manager, g session.Group) func(string) (string,
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
sessionID = id
|
||||||
user = resp.Username
|
user = resp.Username
|
||||||
secret = resp.Secret
|
secret = resp.Secret
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", "", err
|
||||||
}
|
}
|
||||||
return user, secret, nil
|
return sessionID, user, secret, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func FetchToken(req *FetchTokenRequest, sm *session.Manager, g session.Group) (resp *FetchTokenResponse, err error) {
|
||||||
|
err = sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error {
|
||||||
|
client := NewAuthClient(c.Conn())
|
||||||
|
|
||||||
|
resp, err = client.FetchToken(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func VerifyTokenAuthority(host string, pubKey *[32]byte, sm *session.Manager, g session.Group) (sessionID string, ok bool, err error) {
|
||||||
|
var verified bool
|
||||||
|
err = sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error {
|
||||||
|
client := NewAuthClient(c.Conn())
|
||||||
|
|
||||||
|
payload := make([]byte, 32)
|
||||||
|
rand.Read(payload)
|
||||||
|
resp, err := client.VerifyTokenAuthority(ctx, &VerifyTokenAuthorityRequest{
|
||||||
|
Host: host,
|
||||||
|
Salt: getSalt(),
|
||||||
|
Payload: payload,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if grpcerrors.Code(err) == codes.Unimplemented {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var dt []byte
|
||||||
|
dt, ok = sign.Open(nil, resp.Signed, pubKey)
|
||||||
|
if ok && subtle.ConstantTimeCompare(dt, payload) == 1 {
|
||||||
|
verified = true
|
||||||
|
}
|
||||||
|
sessionID = id
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
return sessionID, verified, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTokenAuthority(host string, sm *session.Manager, g session.Group) (sessionID string, pubKey *[32]byte, err error) {
|
||||||
|
err = sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error {
|
||||||
|
client := NewAuthClient(c.Conn())
|
||||||
|
|
||||||
|
resp, err := client.GetTokenAuthority(ctx, &GetTokenAuthorityRequest{
|
||||||
|
Host: host,
|
||||||
|
Salt: getSalt(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if grpcerrors.Code(err) == codes.Unimplemented || grpcerrors.Code(err) == codes.Unavailable {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(resp.PublicKey) != 32 {
|
||||||
|
return errors.Errorf("invalid pubkey length %d", len(pubKey))
|
||||||
|
}
|
||||||
|
|
||||||
|
sessionID = id
|
||||||
|
pubKey = new([32]byte)
|
||||||
|
copy((*pubKey)[:], resp.PublicKey)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return sessionID, pubKey, nil
|
||||||
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -6,9 +6,11 @@ option go_package = "auth";
|
||||||
|
|
||||||
service Auth{
|
service Auth{
|
||||||
rpc Credentials(CredentialsRequest) returns (CredentialsResponse);
|
rpc Credentials(CredentialsRequest) returns (CredentialsResponse);
|
||||||
|
rpc FetchToken(FetchTokenRequest) returns (FetchTokenResponse);
|
||||||
|
rpc GetTokenAuthority(GetTokenAuthorityRequest) returns (GetTokenAuthorityResponse);
|
||||||
|
rpc VerifyTokenAuthority(VerifyTokenAuthorityRequest) returns (VerifyTokenAuthorityResponse);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
message CredentialsRequest {
|
message CredentialsRequest {
|
||||||
string Host = 1;
|
string Host = 1;
|
||||||
}
|
}
|
||||||
|
@ -17,3 +19,36 @@ message CredentialsResponse {
|
||||||
string Username = 1;
|
string Username = 1;
|
||||||
string Secret = 2;
|
string Secret = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message FetchTokenRequest {
|
||||||
|
string ClientID = 1;
|
||||||
|
string Host = 2;
|
||||||
|
string Realm = 3;
|
||||||
|
string Service = 4;
|
||||||
|
repeated string Scopes = 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FetchTokenResponse {
|
||||||
|
string Token = 1;
|
||||||
|
int64 ExpiresIn = 2; // seconds
|
||||||
|
int64 IssuedAt = 3; // timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetTokenAuthorityRequest {
|
||||||
|
string Host = 1;
|
||||||
|
bytes Salt = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetTokenAuthorityResponse {
|
||||||
|
bytes PublicKey = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message VerifyTokenAuthorityRequest {
|
||||||
|
string Host = 1;
|
||||||
|
bytes Payload = 2;
|
||||||
|
bytes Salt = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message VerifyTokenAuthorityResponse {
|
||||||
|
bytes Signed = 1;
|
||||||
|
}
|
||||||
|
|
|
@ -2,24 +2,46 @@ package authprovider
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
authutil "github.com/containerd/containerd/remotes/docker/auth"
|
||||||
|
remoteserrors "github.com/containerd/containerd/remotes/errors"
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/cli/cli/config"
|
||||||
"github.com/docker/cli/cli/config/configfile"
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/session/auth"
|
"github.com/moby/buildkit/session/auth"
|
||||||
|
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/crypto/nacl/sign"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewDockerAuthProvider(stderr io.Writer) session.Attachable {
|
func NewDockerAuthProvider(stderr io.Writer) session.Attachable {
|
||||||
return &authProvider{
|
return &authProvider{
|
||||||
config: config.LoadDefaultConfigFile(stderr),
|
config: config.LoadDefaultConfigFile(stderr),
|
||||||
|
seeds: &tokenSeeds{dir: config.Dir()},
|
||||||
|
loggerCache: map[string]struct{}{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type authProvider struct {
|
type authProvider struct {
|
||||||
config *configfile.ConfigFile
|
config *configfile.ConfigFile
|
||||||
|
seeds *tokenSeeds
|
||||||
|
logger progresswriter.Logger
|
||||||
|
loggerCache map[string]struct{}
|
||||||
|
|
||||||
// The need for this mutex is not well understood.
|
// The need for this mutex is not well understood.
|
||||||
// Without it, the docker cli on OS X hangs when
|
// Without it, the docker cli on OS X hangs when
|
||||||
|
@ -28,17 +50,80 @@ type authProvider struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ap *authProvider) SetLogger(l progresswriter.Logger) {
|
||||||
|
ap.mu.Lock()
|
||||||
|
ap.logger = l
|
||||||
|
ap.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (ap *authProvider) Register(server *grpc.Server) {
|
func (ap *authProvider) Register(server *grpc.Server) {
|
||||||
auth.RegisterAuthServer(server, ap)
|
auth.RegisterAuthServer(server, ap)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) {
|
func (ap *authProvider) FetchToken(ctx context.Context, req *auth.FetchTokenRequest) (rr *auth.FetchTokenResponse, err error) {
|
||||||
|
creds, err := ap.credentials(req.Host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
to := authutil.TokenOptions{
|
||||||
|
Realm: req.Realm,
|
||||||
|
Service: req.Service,
|
||||||
|
Scopes: req.Scopes,
|
||||||
|
Username: creds.Username,
|
||||||
|
Secret: creds.Secret,
|
||||||
|
}
|
||||||
|
|
||||||
|
if creds.Secret != "" {
|
||||||
|
done := func(progresswriter.SubLogger) error {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err = errors.Wrap(err, "failed to fetch oauth token")
|
||||||
|
}()
|
||||||
|
ap.mu.Lock()
|
||||||
|
name := fmt.Sprintf("[auth] %v token for %s", strings.Join(trimScopePrefix(req.Scopes), " "), req.Host)
|
||||||
|
if _, ok := ap.loggerCache[name]; !ok {
|
||||||
|
progresswriter.Wrap(name, ap.logger, done)
|
||||||
|
}
|
||||||
|
ap.mu.Unlock()
|
||||||
|
// try GET first because Docker Hub does not support POST
|
||||||
|
// switch once support has landed
|
||||||
|
resp, err := authutil.FetchToken(ctx, http.DefaultClient, nil, to)
|
||||||
|
if err != nil {
|
||||||
|
var errStatus remoteserrors.ErrUnexpectedStatus
|
||||||
|
if errors.As(err, &errStatus) {
|
||||||
|
// retry with POST request
|
||||||
|
// As of September 2017, GCR is known to return 404.
|
||||||
|
// As of February 2018, JFrog Artifactory is known to return 401.
|
||||||
|
if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 {
|
||||||
|
resp, err := authutil.FetchTokenWithOAuth(ctx, http.DefaultClient, nil, "buildkit-client", to)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return toTokenResponse(resp.AccessToken, resp.IssuedAt, resp.ExpiresIn), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return toTokenResponse(resp.Token, resp.IssuedAt, resp.ExpiresIn), nil
|
||||||
|
}
|
||||||
|
// do request anonymously
|
||||||
|
resp, err := authutil.FetchToken(ctx, http.DefaultClient, nil, to)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to fetch anonymous token")
|
||||||
|
}
|
||||||
|
return toTokenResponse(resp.Token, resp.IssuedAt, resp.ExpiresIn), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *authProvider) credentials(host string) (*auth.CredentialsResponse, error) {
|
||||||
ap.mu.Lock()
|
ap.mu.Lock()
|
||||||
defer ap.mu.Unlock()
|
defer ap.mu.Unlock()
|
||||||
if req.Host == "registry-1.docker.io" {
|
if host == "registry-1.docker.io" {
|
||||||
req.Host = "https://index.docker.io/v1/"
|
host = "https://index.docker.io/v1/"
|
||||||
}
|
}
|
||||||
ac, err := ap.config.GetAuthConfig(req.Host)
|
ac, err := ap.config.GetAuthConfig(host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -51,3 +136,85 @@ func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRe
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) {
|
||||||
|
resp, err := ap.credentials(req.Host)
|
||||||
|
if err != nil || resp.Secret != "" {
|
||||||
|
ap.mu.Lock()
|
||||||
|
defer ap.mu.Unlock()
|
||||||
|
_, ok := ap.loggerCache[req.Host]
|
||||||
|
ap.loggerCache[req.Host] = struct{}{}
|
||||||
|
if !ok {
|
||||||
|
return resp, progresswriter.Wrap(fmt.Sprintf("[auth] sharing credentials for %s", req.Host), ap.logger, func(progresswriter.SubLogger) error {
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *authProvider) GetTokenAuthority(ctx context.Context, req *auth.GetTokenAuthorityRequest) (*auth.GetTokenAuthorityResponse, error) {
|
||||||
|
key, err := ap.getAuthorityKey(req.Host, req.Salt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &auth.GetTokenAuthorityResponse{PublicKey: key[32:]}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *authProvider) VerifyTokenAuthority(ctx context.Context, req *auth.VerifyTokenAuthorityRequest) (*auth.VerifyTokenAuthorityResponse, error) {
|
||||||
|
key, err := ap.getAuthorityKey(req.Host, req.Salt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
priv := new([64]byte)
|
||||||
|
copy((*priv)[:], key)
|
||||||
|
|
||||||
|
return &auth.VerifyTokenAuthorityResponse{Signed: sign.Sign(nil, req.Payload, priv)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ap *authProvider) getAuthorityKey(host string, salt []byte) (ed25519.PrivateKey, error) {
|
||||||
|
if v, err := strconv.ParseBool(os.Getenv("BUILDKIT_NO_CLIENT_TOKEN")); err == nil && v {
|
||||||
|
return nil, status.Errorf(codes.Unavailable, "client side tokens disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
creds, err := ap.credentials(host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
seed, err := ap.seeds.getSeed(host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mac := hmac.New(sha256.New, salt)
|
||||||
|
if creds.Secret != "" {
|
||||||
|
mac.Write(seed)
|
||||||
|
enc := json.NewEncoder(mac)
|
||||||
|
enc.Encode(creds)
|
||||||
|
}
|
||||||
|
|
||||||
|
sum := mac.Sum(nil)
|
||||||
|
|
||||||
|
return ed25519.NewKeyFromSeed(sum[:ed25519.SeedSize]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toTokenResponse(token string, issuedAt time.Time, expires int) *auth.FetchTokenResponse {
|
||||||
|
resp := &auth.FetchTokenResponse{
|
||||||
|
Token: token,
|
||||||
|
ExpiresIn: int64(expires),
|
||||||
|
}
|
||||||
|
if !issuedAt.IsZero() {
|
||||||
|
resp.IssuedAt = issuedAt.Unix()
|
||||||
|
}
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func trimScopePrefix(scopes []string) []string {
|
||||||
|
out := make([]string, len(scopes))
|
||||||
|
for i, s := range scopes {
|
||||||
|
out[i] = strings.TrimPrefix(s, "repository:")
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
84
vendor/github.com/moby/buildkit/session/auth/authprovider/tokenseed.go
generated
vendored
Normal file
84
vendor/github.com/moby/buildkit/session/auth/authprovider/tokenseed.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
package authprovider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/gofrs/flock"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type tokenSeeds struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
dir string
|
||||||
|
m map[string]seed
|
||||||
|
}
|
||||||
|
|
||||||
|
type seed struct {
|
||||||
|
Seed []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *tokenSeeds) getSeed(host string) ([]byte, error) {
|
||||||
|
ts.mu.Lock()
|
||||||
|
defer ts.mu.Unlock()
|
||||||
|
|
||||||
|
if err := os.MkdirAll(ts.dir, 0755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ts.m == nil {
|
||||||
|
ts.m = map[string]seed{}
|
||||||
|
}
|
||||||
|
|
||||||
|
l := flock.New(filepath.Join(ts.dir, ".token_seed.lock"))
|
||||||
|
if err := l.Lock(); err != nil {
|
||||||
|
if !errors.Is(err, syscall.EROFS) && errors.Is(err, syscall.EPERM) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
defer l.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
fp := filepath.Join(ts.dir, ".token_seed")
|
||||||
|
|
||||||
|
// we include client side randomness to avoid chosen plaintext attack from the daemon side
|
||||||
|
dt, err := ioutil.ReadFile(fp)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) && !errors.Is(err, syscall.ENOTDIR) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := json.Unmarshal(dt, &ts.m); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to parse %s", fp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v, ok := ts.m[host]
|
||||||
|
if !ok {
|
||||||
|
v = seed{Seed: newSeed()}
|
||||||
|
}
|
||||||
|
|
||||||
|
ts.m[host] = v
|
||||||
|
|
||||||
|
dt, err = json.MarshalIndent(ts.m, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ioutil.WriteFile(fp, dt, 0600); err != nil {
|
||||||
|
if !errors.Is(err, syscall.EROFS) && !errors.Is(err, syscall.EPERM) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v.Seed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSeed() []byte {
|
||||||
|
b := make([]byte, 16)
|
||||||
|
rand.Read(b)
|
||||||
|
return b
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package filesync
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
io "io"
|
io "io"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
@ -13,7 +14,13 @@ import (
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func sendDiffCopy(stream grpc.Stream, fs fsutil.FS, progress progressCb) error {
|
type Stream interface {
|
||||||
|
Context() context.Context
|
||||||
|
SendMsg(m interface{}) error
|
||||||
|
RecvMsg(m interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func sendDiffCopy(stream Stream, fs fsutil.FS, progress progressCb) error {
|
||||||
return errors.WithStack(fsutil.Send(stream.Context(), stream, fs, progress))
|
return errors.WithStack(fsutil.Send(stream.Context(), stream, fs, progress))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,7 +70,7 @@ func (wc *streamWriterCloser) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) error {
|
func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) error {
|
||||||
st := time.Now()
|
st := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
logrus.Debugf("diffcopy took: %v", time.Since(st))
|
logrus.Debugf("diffcopy took: %v", time.Since(st))
|
||||||
|
@ -83,7 +90,7 @@ func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progres
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
|
func syncTargetDiffCopy(ds grpc.ServerStream, dest string) error {
|
||||||
if err := os.MkdirAll(dest, 0700); err != nil {
|
if err := os.MkdirAll(dest, 0700); err != nil {
|
||||||
return errors.Wrapf(err, "failed to create synctarget dest dir %s", dest)
|
return errors.Wrapf(err, "failed to create synctarget dest dir %s", dest)
|
||||||
}
|
}
|
||||||
|
@ -101,7 +108,7 @@ func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error {
|
func writeTargetFile(ds grpc.ServerStream, wc io.WriteCloser) error {
|
||||||
for {
|
for {
|
||||||
bm := BytesMessage{}
|
bm := BytesMessage{}
|
||||||
if err := ds.RecvMsg(&bm); err != nil {
|
if err := ds.RecvMsg(&bm); err != nil {
|
||||||
|
|
|
@ -129,8 +129,8 @@ type progressCb func(int, bool)
|
||||||
|
|
||||||
type protocol struct {
|
type protocol struct {
|
||||||
name string
|
name string
|
||||||
sendFn func(stream grpc.Stream, fs fsutil.FS, progress progressCb) error
|
sendFn func(stream Stream, fs fsutil.FS, progress progressCb) error
|
||||||
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error
|
recvFn func(stream grpc.ClientStream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func isProtoSupported(p string) bool {
|
func isProtoSupported(p string) bool {
|
||||||
|
|
|
@ -74,7 +74,7 @@ func (sm *Manager) Any(ctx context.Context, g Group, f func(context.Context, str
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := sm.Get(timeoutCtx, id)
|
c, err := sm.Get(timeoutCtx, id, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lastErr = err
|
lastErr = err
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -31,7 +31,7 @@ func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.
|
||||||
var stream []grpc.StreamClientInterceptor
|
var stream []grpc.StreamClientInterceptor
|
||||||
|
|
||||||
var dialCount int64
|
var dialCount int64
|
||||||
dialer := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
|
dialer := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
|
||||||
if c := atomic.AddInt64(&dialCount, 1); c > 1 {
|
if c := atomic.AddInt64(&dialCount, 1); c > 1 {
|
||||||
return nil, errors.Errorf("only one connection allowed")
|
return nil, errors.Errorf("only one connection allowed")
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.
|
||||||
dialOpts = append(dialOpts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...)))
|
dialOpts = append(dialOpts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...)))
|
||||||
}
|
}
|
||||||
|
|
||||||
cc, err := grpc.DialContext(ctx, "", dialOpts...)
|
cc, err := grpc.DialContext(ctx, "localhost", dialOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "failed to create grpc client")
|
return nil, nil, errors.Wrap(err, "failed to create grpc client")
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,21 +33,26 @@ func Dialer(api controlapi.ControlClient) session.Dialer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func streamToConn(stream grpc.Stream) (net.Conn, <-chan struct{}) {
|
type stream interface {
|
||||||
|
Context() context.Context
|
||||||
|
SendMsg(m interface{}) error
|
||||||
|
RecvMsg(m interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func streamToConn(stream stream) (net.Conn, <-chan struct{}) {
|
||||||
closeCh := make(chan struct{})
|
closeCh := make(chan struct{})
|
||||||
c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh}
|
c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh}
|
||||||
return c, closeCh
|
return c, closeCh
|
||||||
}
|
}
|
||||||
|
|
||||||
type conn struct {
|
type conn struct {
|
||||||
stream grpc.Stream
|
stream stream
|
||||||
buf []byte
|
buf []byte
|
||||||
lastBuf []byte
|
lastBuf []byte
|
||||||
|
|
||||||
closedOnce sync.Once
|
closedOnce sync.Once
|
||||||
readMu sync.Mutex
|
readMu sync.Mutex
|
||||||
writeMu sync.Mutex
|
writeMu sync.Mutex
|
||||||
err error
|
|
||||||
closeCh chan struct{}
|
closeCh chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a session by ID
|
// Get returns a session by ID
|
||||||
func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) {
|
func (sm *Manager) Get(ctx context.Context, id string, noWait bool) (Caller, error) {
|
||||||
// session prefix is used to identify vertexes with different contexts so
|
// session prefix is used to identify vertexes with different contexts so
|
||||||
// they would not collide, but for lookup we don't need the prefix
|
// they would not collide, but for lookup we don't need the prefix
|
||||||
if p := strings.SplitN(id, ":", 2); len(p) == 2 && len(p[1]) > 0 {
|
if p := strings.SplitN(id, ":", 2); len(p) == 2 && len(p[1]) > 0 {
|
||||||
|
@ -180,7 +180,7 @@ func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) {
|
||||||
}
|
}
|
||||||
var ok bool
|
var ok bool
|
||||||
c, ok = sm.sessions[id]
|
c, ok = sm.sessions[id]
|
||||||
if !ok || c.closed() {
|
if (!ok || c.closed()) && !noWait {
|
||||||
sm.updateCondition.Wait()
|
sm.updateCondition.Wait()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -188,6 +188,10 @@ func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,8 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/moby/buildkit/session/secrets"
|
"github.com/moby/buildkit/session/secrets"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -25,7 +23,7 @@ func NewStore(files []Source) (secrets.SecretStore, error) {
|
||||||
return nil, errors.Errorf("secret missing ID")
|
return nil, errors.Errorf("secret missing ID")
|
||||||
}
|
}
|
||||||
if f.Env == "" && f.FilePath == "" {
|
if f.Env == "" && f.FilePath == "" {
|
||||||
if hasEnv(f.ID) {
|
if _, ok := os.LookupEnv(f.ID); ok {
|
||||||
f.Env = f.ID
|
f.Env = f.ID
|
||||||
} else {
|
} else {
|
||||||
f.FilePath = f.ID
|
f.FilePath = f.ID
|
||||||
|
@ -65,22 +63,3 @@ func (fs *fileStore) GetSecret(ctx context.Context, id string) ([]byte, error) {
|
||||||
}
|
}
|
||||||
return dt, nil
|
return dt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasEnv(name string) bool {
|
|
||||||
for _, entry := range os.Environ() {
|
|
||||||
idx := strings.IndexRune(entry, '=')
|
|
||||||
if idx == -1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
// Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent.
|
|
||||||
if strings.EqualFold(entry[:idx], name) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if entry[:idx] == name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
|
@ -6,10 +6,14 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
context "golang.org/x/net/context"
|
context "golang.org/x/net/context"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Copy(ctx context.Context, conn io.ReadWriteCloser, stream grpc.Stream, closeStream func() error) error {
|
type Stream interface {
|
||||||
|
SendMsg(m interface{}) error
|
||||||
|
RecvMsg(m interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error {
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
g.Go(func() (retErr error) {
|
g.Go(func() (retErr error) {
|
||||||
|
|
17
vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go
generated
vendored
17
vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go
generated
vendored
|
@ -6,6 +6,8 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
|
@ -139,7 +141,7 @@ func toAgentSource(paths []string) (source, error) {
|
||||||
socket = p
|
socket = p
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
keys = true
|
|
||||||
f, err := os.Open(p)
|
f, err := os.Open(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return source{}, errors.Wrapf(err, "failed to open %s", p)
|
return source{}, errors.Wrapf(err, "failed to open %s", p)
|
||||||
|
@ -151,11 +153,24 @@ func toAgentSource(paths []string) (source, error) {
|
||||||
|
|
||||||
k, err := ssh.ParseRawPrivateKey(dt)
|
k, err := ssh.ParseRawPrivateKey(dt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// On Windows, os.ModeSocket isn't appropriately set on the file mode.
|
||||||
|
// https://github.com/golang/go/issues/33357
|
||||||
|
// If parsing the file fails, check to see if it kind of looks like socket-shaped.
|
||||||
|
if runtime.GOOS == "windows" && strings.Contains(string(dt), "socket") {
|
||||||
|
if keys {
|
||||||
|
return source{}, errors.Errorf("invalid combination of keys and sockets")
|
||||||
|
}
|
||||||
|
socket = p
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
return source{}, errors.Wrapf(err, "failed to parse %s", p) // TODO: prompt passphrase?
|
return source{}, errors.Wrapf(err, "failed to parse %s", p) // TODO: prompt passphrase?
|
||||||
}
|
}
|
||||||
if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil {
|
if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil {
|
||||||
return source{}, errors.Wrapf(err, "failed to add %s to agent", p)
|
return source{}, errors.Wrapf(err, "failed to add %s to agent", p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
keys = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if socket != "" {
|
if socket != "" {
|
||||||
|
|
|
@ -0,0 +1,399 @@
|
||||||
|
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||||
|
// source: errdefs.proto
|
||||||
|
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/gogo/protobuf/proto"
|
||||||
|
pb "github.com/moby/buildkit/solver/pb"
|
||||||
|
math "math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
type Vertex struct {
|
||||||
|
Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Vertex) Reset() { *m = Vertex{} }
|
||||||
|
func (m *Vertex) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Vertex) ProtoMessage() {}
|
||||||
|
func (*Vertex) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_689dc58a5060aff5, []int{0}
|
||||||
|
}
|
||||||
|
func (m *Vertex) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Vertex.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Vertex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Vertex.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Vertex) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Vertex.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Vertex) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Vertex.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Vertex) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Vertex.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Vertex proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Vertex) GetDigest() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Digest
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type Source struct {
|
||||||
|
Info *pb.SourceInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"`
|
||||||
|
Ranges []*pb.Range `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Source) Reset() { *m = Source{} }
|
||||||
|
func (m *Source) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Source) ProtoMessage() {}
|
||||||
|
func (*Source) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_689dc58a5060aff5, []int{1}
|
||||||
|
}
|
||||||
|
func (m *Source) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Source.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Source) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Source.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Source) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Source.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Source) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Source.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Source) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Source.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Source proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Source) GetInfo() *pb.SourceInfo {
|
||||||
|
if m != nil {
|
||||||
|
return m.Info
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Source) GetRanges() []*pb.Range {
|
||||||
|
if m != nil {
|
||||||
|
return m.Ranges
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type FrontendCap struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FrontendCap) Reset() { *m = FrontendCap{} }
|
||||||
|
func (m *FrontendCap) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*FrontendCap) ProtoMessage() {}
|
||||||
|
func (*FrontendCap) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_689dc58a5060aff5, []int{2}
|
||||||
|
}
|
||||||
|
func (m *FrontendCap) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_FrontendCap.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *FrontendCap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_FrontendCap.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *FrontendCap) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_FrontendCap.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *FrontendCap) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_FrontendCap.Size(m)
|
||||||
|
}
|
||||||
|
func (m *FrontendCap) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_FrontendCap.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_FrontendCap proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *FrontendCap) GetName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type Subrequest struct {
|
||||||
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Subrequest) Reset() { *m = Subrequest{} }
|
||||||
|
func (m *Subrequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Subrequest) ProtoMessage() {}
|
||||||
|
func (*Subrequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_689dc58a5060aff5, []int{3}
|
||||||
|
}
|
||||||
|
func (m *Subrequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Subrequest.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Subrequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Subrequest.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Subrequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Subrequest.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Subrequest) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Subrequest.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Subrequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Subrequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Subrequest proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Subrequest) GetName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type Solve struct {
|
||||||
|
InputIDs []string `protobuf:"bytes,1,rep,name=inputIDs,proto3" json:"inputIDs,omitempty"`
|
||||||
|
MountIDs []string `protobuf:"bytes,2,rep,name=mountIDs,proto3" json:"mountIDs,omitempty"`
|
||||||
|
Op *pb.Op `protobuf:"bytes,3,opt,name=op,proto3" json:"op,omitempty"`
|
||||||
|
// Types that are valid to be assigned to Subject:
|
||||||
|
// *Solve_File
|
||||||
|
// *Solve_Cache
|
||||||
|
Subject isSolve_Subject `protobuf_oneof:"subject"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Solve) Reset() { *m = Solve{} }
|
||||||
|
func (m *Solve) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Solve) ProtoMessage() {}
|
||||||
|
func (*Solve) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_689dc58a5060aff5, []int{4}
|
||||||
|
}
|
||||||
|
func (m *Solve) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Solve.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Solve) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Solve.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Solve) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Solve.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Solve) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Solve.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Solve) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Solve.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Solve proto.InternalMessageInfo
|
||||||
|
|
||||||
|
type isSolve_Subject interface {
|
||||||
|
isSolve_Subject()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Solve_File struct {
|
||||||
|
File *FileAction `protobuf:"bytes,4,opt,name=file,proto3,oneof" json:"file,omitempty"`
|
||||||
|
}
|
||||||
|
type Solve_Cache struct {
|
||||||
|
Cache *ContentCache `protobuf:"bytes,5,opt,name=cache,proto3,oneof" json:"cache,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Solve_File) isSolve_Subject() {}
|
||||||
|
func (*Solve_Cache) isSolve_Subject() {}
|
||||||
|
|
||||||
|
func (m *Solve) GetSubject() isSolve_Subject {
|
||||||
|
if m != nil {
|
||||||
|
return m.Subject
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Solve) GetInputIDs() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.InputIDs
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Solve) GetMountIDs() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.MountIDs
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Solve) GetOp() *pb.Op {
|
||||||
|
if m != nil {
|
||||||
|
return m.Op
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Solve) GetFile() *FileAction {
|
||||||
|
if x, ok := m.GetSubject().(*Solve_File); ok {
|
||||||
|
return x.File
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Solve) GetCache() *ContentCache {
|
||||||
|
if x, ok := m.GetSubject().(*Solve_Cache); ok {
|
||||||
|
return x.Cache
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||||
|
func (*Solve) XXX_OneofWrappers() []interface{} {
|
||||||
|
return []interface{}{
|
||||||
|
(*Solve_File)(nil),
|
||||||
|
(*Solve_Cache)(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileAction struct {
|
||||||
|
// Index of the file action that failed the exec.
|
||||||
|
Index int64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FileAction) Reset() { *m = FileAction{} }
|
||||||
|
func (m *FileAction) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*FileAction) ProtoMessage() {}
|
||||||
|
func (*FileAction) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_689dc58a5060aff5, []int{5}
|
||||||
|
}
|
||||||
|
func (m *FileAction) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_FileAction.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *FileAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_FileAction.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *FileAction) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_FileAction.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *FileAction) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_FileAction.Size(m)
|
||||||
|
}
|
||||||
|
func (m *FileAction) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_FileAction.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_FileAction proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *FileAction) GetIndex() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Index
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContentCache struct {
|
||||||
|
// Original index of result that failed the slow cache calculation.
|
||||||
|
Index int64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ContentCache) Reset() { *m = ContentCache{} }
|
||||||
|
func (m *ContentCache) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*ContentCache) ProtoMessage() {}
|
||||||
|
func (*ContentCache) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_689dc58a5060aff5, []int{6}
|
||||||
|
}
|
||||||
|
func (m *ContentCache) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ContentCache.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ContentCache) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ContentCache.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *ContentCache) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ContentCache.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *ContentCache) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ContentCache.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ContentCache) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ContentCache.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ContentCache proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *ContentCache) GetIndex() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Index
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Vertex)(nil), "errdefs.Vertex")
|
||||||
|
proto.RegisterType((*Source)(nil), "errdefs.Source")
|
||||||
|
proto.RegisterType((*FrontendCap)(nil), "errdefs.FrontendCap")
|
||||||
|
proto.RegisterType((*Subrequest)(nil), "errdefs.Subrequest")
|
||||||
|
proto.RegisterType((*Solve)(nil), "errdefs.Solve")
|
||||||
|
proto.RegisterType((*FileAction)(nil), "errdefs.FileAction")
|
||||||
|
proto.RegisterType((*ContentCache)(nil), "errdefs.ContentCache")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("errdefs.proto", fileDescriptor_689dc58a5060aff5) }
|
||||||
|
|
||||||
|
var fileDescriptor_689dc58a5060aff5 = []byte{
|
||||||
|
// 348 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcd, 0x8e, 0xd3, 0x30,
|
||||||
|
0x14, 0x85, 0x27, 0xbf, 0x43, 0x6e, 0x81, 0x85, 0x81, 0x51, 0x34, 0xab, 0x8c, 0xc5, 0xa2, 0x48,
|
||||||
|
0x90, 0x48, 0xc3, 0x13, 0x40, 0xd1, 0x68, 0x66, 0x55, 0xc9, 0x95, 0xd8, 0xc7, 0xc9, 0x4d, 0x6b,
|
||||||
|
0x48, 0x6c, 0xe3, 0xd8, 0xa8, 0xbc, 0x1b, 0x0f, 0x87, 0xe2, 0xa4, 0x65, 0x16, 0xdd, 0xe5, 0xe4,
|
||||||
|
0xfb, 0x7c, 0xed, 0x63, 0xc3, 0x2b, 0x34, 0xa6, 0xc5, 0x6e, 0x2c, 0xb5, 0x51, 0x56, 0x91, 0xeb,
|
||||||
|
0x25, 0xde, 0x7e, 0xdc, 0x0b, 0x7b, 0x70, 0xbc, 0x6c, 0xd4, 0x50, 0x0d, 0x8a, 0xff, 0xa9, 0xb8,
|
||||||
|
0x13, 0x7d, 0xfb, 0x53, 0xd8, 0x6a, 0x54, 0xfd, 0x6f, 0x34, 0x95, 0xe6, 0x95, 0xd2, 0xcb, 0x32,
|
||||||
|
0x5a, 0x40, 0xfa, 0x1d, 0x8d, 0xc5, 0x23, 0xb9, 0x81, 0xb4, 0x15, 0x7b, 0x1c, 0x6d, 0x1e, 0x14,
|
||||||
|
0xc1, 0x3a, 0x63, 0x4b, 0xa2, 0x5b, 0x48, 0x77, 0xca, 0x99, 0x06, 0x09, 0x85, 0x58, 0xc8, 0x4e,
|
||||||
|
0x79, 0xbe, 0xba, 0x7f, 0x5d, 0x6a, 0x5e, 0xce, 0xe4, 0x49, 0x76, 0x8a, 0x79, 0x46, 0xee, 0x20,
|
||||||
|
0x35, 0xb5, 0xdc, 0xe3, 0x98, 0x87, 0x45, 0xb4, 0x5e, 0xdd, 0x67, 0x93, 0xc5, 0xa6, 0x3f, 0x6c,
|
||||||
|
0x01, 0xf4, 0x0e, 0x56, 0x0f, 0x46, 0x49, 0x8b, 0xb2, 0xdd, 0xd4, 0x9a, 0x10, 0x88, 0x65, 0x3d,
|
||||||
|
0xe0, 0xb2, 0xab, 0xff, 0xa6, 0x05, 0xc0, 0xce, 0x71, 0x83, 0xbf, 0x1c, 0x8e, 0xf6, 0xa2, 0xf1,
|
||||||
|
0x37, 0x80, 0x64, 0x37, 0xf5, 0x21, 0xb7, 0xf0, 0x42, 0x48, 0xed, 0xec, 0xd3, 0xb7, 0x31, 0x0f,
|
||||||
|
0x8a, 0x68, 0x9d, 0xb1, 0x73, 0x9e, 0xd8, 0xa0, 0x9c, 0xf4, 0x2c, 0x9c, 0xd9, 0x29, 0x93, 0x1b,
|
||||||
|
0x08, 0x95, 0xce, 0x23, 0xdf, 0x25, 0x9d, 0x4e, 0xb9, 0xd5, 0x2c, 0x54, 0x9a, 0x7c, 0x80, 0xb8,
|
||||||
|
0x13, 0x3d, 0xe6, 0xb1, 0x27, 0x6f, 0xca, 0xd3, 0x35, 0x3f, 0x88, 0x1e, 0xbf, 0x34, 0x56, 0x28,
|
||||||
|
0xf9, 0x78, 0xc5, 0xbc, 0x42, 0x3e, 0x41, 0xd2, 0xd4, 0xcd, 0x01, 0xf3, 0xc4, 0xbb, 0xef, 0xce,
|
||||||
|
0xee, 0xc6, 0xd7, 0xb3, 0x9b, 0x09, 0x3e, 0x5e, 0xb1, 0xd9, 0xfa, 0x9a, 0xc1, 0xf5, 0xe8, 0xf8,
|
||||||
|
0x0f, 0x6c, 0x2c, 0xa5, 0x00, 0xff, 0xe7, 0x91, 0xb7, 0x90, 0x08, 0xd9, 0xe2, 0xd1, 0x37, 0x8c,
|
||||||
|
0xd8, 0x1c, 0xe8, 0x7b, 0x78, 0xf9, 0x7c, 0xce, 0x65, 0x8b, 0xa7, 0xfe, 0x1d, 0x3f, 0xff, 0x0b,
|
||||||
|
0x00, 0x00, 0xff, 0xff, 0x1e, 0xfa, 0x9c, 0x6f, 0x0f, 0x02, 0x00, 0x00,
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package errdefs;
|
||||||
|
|
||||||
|
import "github.com/moby/buildkit/solver/pb/ops.proto";
|
||||||
|
|
||||||
|
message Vertex {
|
||||||
|
string digest = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Source {
|
||||||
|
pb.SourceInfo info = 1;
|
||||||
|
repeated pb.Range ranges = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message FrontendCap {
|
||||||
|
string name = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Subrequest {
|
||||||
|
string name = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Solve {
|
||||||
|
repeated string inputIDs = 1;
|
||||||
|
repeated string mountIDs = 2;
|
||||||
|
pb.Op op = 3;
|
||||||
|
|
||||||
|
oneof subject {
|
||||||
|
FileAction file = 4;
|
||||||
|
ContentCache cache = 5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message FileAction {
|
||||||
|
// Index of the file action that failed the exec.
|
||||||
|
int64 index = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ContentCache {
|
||||||
|
// Original index of result that failed the slow cache calculation.
|
||||||
|
int64 index = 1;
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ContainerdUnknownExitStatus is returned when containerd is unable to
|
||||||
|
// determine the exit status of a process. This can happen if the process never starts
|
||||||
|
// or if an error was encountered when obtaining the exit status, it is set to 255.
|
||||||
|
//
|
||||||
|
// This const is defined here to prevent importing github.com/containerd/containerd
|
||||||
|
// and corresponds with https://github.com/containerd/containerd/blob/40b22ef0741028917761d8c5d5d29e0d19038836/task.go#L52-L55
|
||||||
|
ContainerdUnknownExitStatus = 255
|
||||||
|
)
|
||||||
|
|
||||||
|
// ExitError will be returned when the container process exits with a non-zero
|
||||||
|
// exit code.
|
||||||
|
type ExitError struct {
|
||||||
|
ExitCode uint32
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *ExitError) Error() string {
|
||||||
|
if err.Err != nil {
|
||||||
|
return err.Err.Error()
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("exit code: %d", err.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *ExitError) Unwrap() error {
|
||||||
|
if err.Err == nil {
|
||||||
|
return fmt.Errorf("exit code: %d", err.ExitCode)
|
||||||
|
}
|
||||||
|
return err.Err
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
fmt "fmt"
|
||||||
|
|
||||||
|
"github.com/containerd/typeurl"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
typeurl.Register((*FrontendCap)(nil), "github.com/moby/buildkit", "errdefs.FrontendCap+json")
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnsupportedFrontendCapError struct {
|
||||||
|
FrontendCap
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnsupportedFrontendCapError) Error() string {
|
||||||
|
msg := fmt.Sprintf("unsupported frontend capability %s", e.FrontendCap.Name)
|
||||||
|
if e.error != nil {
|
||||||
|
msg += ": " + e.error.Error()
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnsupportedFrontendCapError) Unwrap() error {
|
||||||
|
return e.error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnsupportedFrontendCapError) ToProto() grpcerrors.TypedErrorProto {
|
||||||
|
return &e.FrontendCap
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUnsupportedFrontendCapError(name string) error {
|
||||||
|
return &UnsupportedFrontendCapError{FrontendCap: FrontendCap{Name: name}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *FrontendCap) WrapError(err error) error {
|
||||||
|
return &UnsupportedFrontendCapError{error: err, FrontendCap: *v}
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=. errdefs.proto
|
|
@ -0,0 +1,20 @@
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import "github.com/moby/buildkit/solver/pb"
|
||||||
|
|
||||||
|
type OpError struct {
|
||||||
|
error
|
||||||
|
Op *pb.Op
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *OpError) Unwrap() error {
|
||||||
|
return e.error
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithOp(err error, iface interface{}) error {
|
||||||
|
op, ok := iface.(*pb.Op)
|
||||||
|
if err == nil || !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return &OpError{error: err, Op: op}
|
||||||
|
}
|
|
@ -0,0 +1,74 @@
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/containerd/typeurl"
|
||||||
|
"github.com/golang/protobuf/jsonpb"
|
||||||
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
typeurl.Register((*Solve)(nil), "github.com/moby/buildkit", "errdefs.Solve+json")
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:golint
|
||||||
|
type IsSolve_Subject isSolve_Subject
|
||||||
|
|
||||||
|
// SolveError will be returned when an error is encountered during a solve that
|
||||||
|
// has an exec op.
|
||||||
|
type SolveError struct {
|
||||||
|
Solve
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SolveError) Error() string {
|
||||||
|
return e.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SolveError) Unwrap() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SolveError) ToProto() grpcerrors.TypedErrorProto {
|
||||||
|
return &e.Solve
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithSolveError(err error, subject IsSolve_Subject, inputIDs, mountIDs []string) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
oe *OpError
|
||||||
|
op *pb.Op
|
||||||
|
)
|
||||||
|
if errors.As(err, &oe) {
|
||||||
|
op = oe.Op
|
||||||
|
}
|
||||||
|
return &SolveError{
|
||||||
|
Err: err,
|
||||||
|
Solve: Solve{
|
||||||
|
InputIDs: inputIDs,
|
||||||
|
MountIDs: mountIDs,
|
||||||
|
Op: op,
|
||||||
|
Subject: subject,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Solve) WrapError(err error) error {
|
||||||
|
return &SolveError{Err: err, Solve: *v}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Solve) MarshalJSON() ([]byte, error) {
|
||||||
|
m := jsonpb.Marshaler{}
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
err := m.Marshal(buf, v)
|
||||||
|
return buf.Bytes(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Solve) UnmarshalJSON(b []byte) error {
|
||||||
|
return jsonpb.Unmarshal(bytes.NewReader(b), v)
|
||||||
|
}
|
|
@ -0,0 +1,128 @@
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
pb "github.com/moby/buildkit/solver/pb"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func WithSource(err error, src Source) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &ErrorSource{Source: src, error: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ErrorSource struct {
|
||||||
|
Source
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ErrorSource) Unwrap() error {
|
||||||
|
return e.error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ErrorSource) ToProto() grpcerrors.TypedErrorProto {
|
||||||
|
return &e.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
func Sources(err error) []*Source {
|
||||||
|
var out []*Source
|
||||||
|
var es *ErrorSource
|
||||||
|
if errors.As(err, &es) {
|
||||||
|
out = Sources(es.Unwrap())
|
||||||
|
out = append(out, &es.Source)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Source) WrapError(err error) error {
|
||||||
|
return &ErrorSource{error: err, Source: *s}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Source) Print(w io.Writer) error {
|
||||||
|
si := s.Info
|
||||||
|
if si == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lines := strings.Split(string(si.Data), "\n")
|
||||||
|
|
||||||
|
start, end, ok := getStartEndLine(s.Ranges)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if start > len(lines) || start < 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if end > len(lines) {
|
||||||
|
end = len(lines)
|
||||||
|
}
|
||||||
|
|
||||||
|
pad := 2
|
||||||
|
if end == start {
|
||||||
|
pad = 4
|
||||||
|
}
|
||||||
|
var p int
|
||||||
|
|
||||||
|
prepadStart := start
|
||||||
|
for {
|
||||||
|
if p >= pad {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if start > 1 {
|
||||||
|
start--
|
||||||
|
p++
|
||||||
|
}
|
||||||
|
if end != len(lines) {
|
||||||
|
end++
|
||||||
|
p++
|
||||||
|
}
|
||||||
|
p++
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "%s:%d\n--------------------\n", si.Filename, prepadStart)
|
||||||
|
for i := start; i <= end; i++ {
|
||||||
|
pfx := " "
|
||||||
|
if containsLine(s.Ranges, i) {
|
||||||
|
pfx = ">>>"
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, " %3d | %s %s\n", i, pfx, lines[i-1])
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "--------------------\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsLine(rr []*pb.Range, l int) bool {
|
||||||
|
for _, r := range rr {
|
||||||
|
e := r.End.Line
|
||||||
|
if e < r.Start.Line {
|
||||||
|
e = r.Start.Line
|
||||||
|
}
|
||||||
|
if r.Start.Line <= int32(l) && e >= int32(l) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStartEndLine(rr []*pb.Range) (start int, end int, ok bool) {
|
||||||
|
first := true
|
||||||
|
for _, r := range rr {
|
||||||
|
e := r.End.Line
|
||||||
|
if e < r.Start.Line {
|
||||||
|
e = r.Start.Line
|
||||||
|
}
|
||||||
|
if first || int(r.Start.Line) < start {
|
||||||
|
start = int(r.Start.Line)
|
||||||
|
}
|
||||||
|
if int(e) > end {
|
||||||
|
end = int(e)
|
||||||
|
}
|
||||||
|
first = false
|
||||||
|
}
|
||||||
|
return start, end, !first
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
fmt "fmt"
|
||||||
|
|
||||||
|
"github.com/containerd/typeurl"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
typeurl.Register((*Subrequest)(nil), "github.com/moby/buildkit", "errdefs.Subrequest+json")
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnsupportedSubrequestError struct {
|
||||||
|
Subrequest
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnsupportedSubrequestError) Error() string {
|
||||||
|
msg := fmt.Sprintf("unsupported request %s", e.Subrequest.Name)
|
||||||
|
if e.error != nil {
|
||||||
|
msg += ": " + e.error.Error()
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnsupportedSubrequestError) Unwrap() error {
|
||||||
|
return e.error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnsupportedSubrequestError) ToProto() grpcerrors.TypedErrorProto {
|
||||||
|
return &e.Subrequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUnsupportedSubrequestError(name string) error {
|
||||||
|
return &UnsupportedSubrequestError{Subrequest: Subrequest{Name: name}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Subrequest) WrapError(err error) error {
|
||||||
|
return &UnsupportedSubrequestError{error: err, Subrequest: *v}
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
package errdefs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/containerd/typeurl"
|
||||||
|
"github.com/moby/buildkit/util/grpcerrors"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
typeurl.Register((*Vertex)(nil), "github.com/moby/buildkit", "errdefs.Vertex+json")
|
||||||
|
typeurl.Register((*Source)(nil), "github.com/moby/buildkit", "errdefs.Source+json")
|
||||||
|
}
|
||||||
|
|
||||||
|
type VertexError struct {
|
||||||
|
Vertex
|
||||||
|
error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *VertexError) Unwrap() error {
|
||||||
|
return e.error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *VertexError) ToProto() grpcerrors.TypedErrorProto {
|
||||||
|
return &e.Vertex
|
||||||
|
}
|
||||||
|
|
||||||
|
func WrapVertex(err error, dgst digest.Digest) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &VertexError{Vertex: Vertex{Digest: dgst.String()}, error: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Vertex) WrapError(err error) error {
|
||||||
|
return &VertexError{error: err, Vertex: *v}
|
||||||
|
}
|
|
@ -4,6 +4,8 @@ const AttrKeepGitDir = "git.keepgitdir"
|
||||||
const AttrFullRemoteURL = "git.fullurl"
|
const AttrFullRemoteURL = "git.fullurl"
|
||||||
const AttrAuthHeaderSecret = "git.authheadersecret"
|
const AttrAuthHeaderSecret = "git.authheadersecret"
|
||||||
const AttrAuthTokenSecret = "git.authtokensecret"
|
const AttrAuthTokenSecret = "git.authtokensecret"
|
||||||
|
const AttrKnownSSHHosts = "git.knownsshhosts"
|
||||||
|
const AttrMountSSHSock = "git.mountsshsock"
|
||||||
const AttrLocalSessionID = "local.session"
|
const AttrLocalSessionID = "local.session"
|
||||||
const AttrLocalUniqueID = "local.unique"
|
const AttrLocalUniqueID = "local.unique"
|
||||||
const AttrIncludePatterns = "local.includepattern"
|
const AttrIncludePatterns = "local.includepattern"
|
||||||
|
|
|
@ -19,10 +19,12 @@ const (
|
||||||
CapSourceLocalExcludePatterns apicaps.CapID = "source.local.excludepatterns"
|
CapSourceLocalExcludePatterns apicaps.CapID = "source.local.excludepatterns"
|
||||||
CapSourceLocalSharedKeyHint apicaps.CapID = "source.local.sharedkeyhint"
|
CapSourceLocalSharedKeyHint apicaps.CapID = "source.local.sharedkeyhint"
|
||||||
|
|
||||||
CapSourceGit apicaps.CapID = "source.git"
|
CapSourceGit apicaps.CapID = "source.git"
|
||||||
CapSourceGitKeepDir apicaps.CapID = "source.git.keepgitdir"
|
CapSourceGitKeepDir apicaps.CapID = "source.git.keepgitdir"
|
||||||
CapSourceGitFullURL apicaps.CapID = "source.git.fullurl"
|
CapSourceGitFullURL apicaps.CapID = "source.git.fullurl"
|
||||||
CapSourceGitHttpAuth apicaps.CapID = "source.git.httpauth"
|
CapSourceGitHTTPAuth apicaps.CapID = "source.git.httpauth"
|
||||||
|
CapSourceGitKnownSSHHosts apicaps.CapID = "source.git.knownsshhosts"
|
||||||
|
CapSourceGitMountSSHSock apicaps.CapID = "source.git.mountsshsock"
|
||||||
|
|
||||||
CapSourceHTTP apicaps.CapID = "source.http"
|
CapSourceHTTP apicaps.CapID = "source.http"
|
||||||
CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum"
|
CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum"
|
||||||
|
@ -133,7 +135,19 @@ func init() {
|
||||||
})
|
})
|
||||||
|
|
||||||
Caps.Init(apicaps.Cap{
|
Caps.Init(apicaps.Cap{
|
||||||
ID: CapSourceGitHttpAuth,
|
ID: CapSourceGitHTTPAuth,
|
||||||
|
Enabled: true,
|
||||||
|
Status: apicaps.CapStatusExperimental,
|
||||||
|
})
|
||||||
|
|
||||||
|
Caps.Init(apicaps.Cap{
|
||||||
|
ID: CapSourceGitKnownSSHHosts,
|
||||||
|
Enabled: true,
|
||||||
|
Status: apicaps.CapStatusExperimental,
|
||||||
|
})
|
||||||
|
|
||||||
|
Caps.Init(apicaps.Cap{
|
||||||
|
ID: CapSourceGitMountSSHSock,
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Status: apicaps.CapStatusExperimental,
|
Status: apicaps.CapStatusExperimental,
|
||||||
})
|
})
|
||||||
|
|
|
@ -467,6 +467,7 @@ type Meta struct {
|
||||||
User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"`
|
User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"`
|
||||||
ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"`
|
ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"`
|
||||||
ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"`
|
ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"`
|
||||||
|
Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Meta) Reset() { *m = Meta{} }
|
func (m *Meta) Reset() { *m = Meta{} }
|
||||||
|
@ -540,6 +541,13 @@ func (m *Meta) GetExtraHosts() []*HostIP {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Meta) GetHostname() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Hostname
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
// Mount specifies how to mount an input Op as a filesystem.
|
// Mount specifies how to mount an input Op as a filesystem.
|
||||||
type Mount struct {
|
type Mount struct {
|
||||||
Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"`
|
Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"`
|
||||||
|
@ -551,6 +559,7 @@ type Mount struct {
|
||||||
CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"`
|
CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"`
|
||||||
SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"`
|
SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"`
|
||||||
SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"`
|
SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"`
|
||||||
|
ResultID string `protobuf:"bytes,23,opt,name=resultID,proto3" json:"resultID,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Mount) Reset() { *m = Mount{} }
|
func (m *Mount) Reset() { *m = Mount{} }
|
||||||
|
@ -631,6 +640,13 @@ func (m *Mount) GetSSHOpt() *SSHOpt {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Mount) GetResultID() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.ResultID
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
// CacheOpt defines options specific to cache mounts
|
// CacheOpt defines options specific to cache mounts
|
||||||
type CacheOpt struct {
|
type CacheOpt struct {
|
||||||
// ID is an optional namespace for the mount
|
// ID is an optional namespace for the mount
|
||||||
|
@ -2316,144 +2332,146 @@ func init() {
|
||||||
func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) }
|
func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) }
|
||||||
|
|
||||||
var fileDescriptor_8de16154b2733812 = []byte{
|
var fileDescriptor_8de16154b2733812 = []byte{
|
||||||
// 2189 bytes of a gzipped FileDescriptorProto
|
// 2217 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0xc7,
|
||||||
0xf1, 0x17, 0xdf, 0x64, 0x51, 0x92, 0xf9, 0xef, 0xf5, 0xee, 0x9f, 0xab, 0x38, 0x92, 0x76, 0xec,
|
0x15, 0x17, 0xbf, 0xc9, 0x47, 0x49, 0x66, 0x27, 0x4e, 0xc2, 0xa8, 0xae, 0xa4, 0x6c, 0xdc, 0x40,
|
||||||
0x2c, 0x64, 0xd9, 0xa6, 0x00, 0x2d, 0xb0, 0x5e, 0x2c, 0x82, 0x20, 0xe2, 0xc3, 0x10, 0xd7, 0xb6,
|
0x96, 0x6d, 0x0a, 0x50, 0x80, 0x38, 0x08, 0x8a, 0xa2, 0xe2, 0x87, 0x21, 0xc6, 0xb6, 0x28, 0x0c,
|
||||||
0x28, 0x34, 0xfd, 0xc8, 0xcd, 0x18, 0x0d, 0x9b, 0xd4, 0x40, 0xe4, 0xf4, 0xa0, 0xa7, 0x69, 0x8b,
|
0xfd, 0xd1, 0x9b, 0xb1, 0x5a, 0x0e, 0xa9, 0x85, 0xc8, 0x9d, 0xc5, 0xec, 0xd0, 0x16, 0x2f, 0x3d,
|
||||||
0x97, 0x1c, 0xfc, 0x09, 0x16, 0x08, 0x90, 0x5b, 0x02, 0xe4, 0x12, 0x20, 0xf7, 0x5c, 0x73, 0xdf,
|
0xf8, 0x2f, 0x08, 0x50, 0xa0, 0xb7, 0x16, 0xe8, 0xa5, 0x7f, 0x41, 0xaf, 0x3d, 0x16, 0xc8, 0x31,
|
||||||
0xe3, 0x22, 0xc8, 0x61, 0x91, 0xc3, 0x26, 0xb0, 0x3f, 0x47, 0x80, 0xa0, 0xaa, 0x7b, 0x1e, 0x94,
|
0x87, 0x1e, 0x82, 0x1e, 0xd2, 0xc2, 0xbe, 0xf7, 0x3f, 0x28, 0x50, 0xbc, 0x37, 0xb3, 0x1f, 0x94,
|
||||||
0x65, 0xd8, 0x46, 0x82, 0x9c, 0xd8, 0xfd, 0xab, 0x5f, 0x57, 0x57, 0x57, 0x55, 0xd7, 0x54, 0x13,
|
0x65, 0xd8, 0x46, 0x8b, 0x9e, 0x76, 0xe6, 0xbd, 0xdf, 0xbc, 0x79, 0xf3, 0xbe, 0xe6, 0xcd, 0x42,
|
||||||
0x6a, 0x32, 0x8c, 0x5a, 0xa1, 0x92, 0x5a, 0xb2, 0x7c, 0x78, 0xb2, 0x71, 0x67, 0xe2, 0xeb, 0xd3,
|
0x4d, 0x86, 0x51, 0x2b, 0x54, 0x52, 0x4b, 0x96, 0x0f, 0x4f, 0x36, 0x6e, 0x4f, 0x7c, 0x7d, 0x3a,
|
||||||
0xf9, 0x49, 0xcb, 0x93, 0xb3, 0xbd, 0x89, 0x9c, 0xc8, 0x3d, 0x12, 0x9d, 0xcc, 0xc7, 0x34, 0xa3,
|
0x3f, 0x69, 0x79, 0x72, 0xb6, 0x37, 0x91, 0x13, 0xb9, 0x47, 0xac, 0x93, 0xf9, 0x98, 0x66, 0x34,
|
||||||
0x09, 0x8d, 0xcc, 0x12, 0xe7, 0x0f, 0x79, 0xc8, 0x0f, 0x42, 0xf6, 0x19, 0x94, 0xfd, 0x20, 0x9c,
|
0xa1, 0x91, 0x59, 0xe2, 0xfc, 0x31, 0x0f, 0xf9, 0x41, 0xc8, 0x3e, 0x85, 0xb2, 0x1f, 0x84, 0x73,
|
||||||
0xeb, 0xa8, 0x99, 0xdb, 0x2e, 0xec, 0xd4, 0xf7, 0x6b, 0xad, 0xf0, 0xa4, 0xd5, 0x47, 0x84, 0x5b,
|
0x1d, 0x35, 0x73, 0xdb, 0x85, 0x9d, 0xfa, 0x7e, 0xad, 0x15, 0x9e, 0xb4, 0xfa, 0x48, 0xe1, 0x96,
|
||||||
0x01, 0xdb, 0x86, 0xa2, 0x38, 0x17, 0x5e, 0x33, 0xbf, 0x9d, 0xdb, 0xa9, 0xef, 0x03, 0x12, 0x7a,
|
0xc1, 0xb6, 0xa1, 0x28, 0xce, 0x85, 0xd7, 0xcc, 0x6f, 0xe7, 0x76, 0xea, 0xfb, 0x80, 0x80, 0xde,
|
||||||
0xe7, 0xc2, 0x1b, 0x84, 0x87, 0x2b, 0x9c, 0x24, 0xec, 0x73, 0x28, 0x47, 0x72, 0xae, 0x3c, 0xd1,
|
0xb9, 0xf0, 0x06, 0xe1, 0xe1, 0x0a, 0x27, 0x0e, 0xfb, 0x1c, 0xca, 0x91, 0x9c, 0x2b, 0x4f, 0x34,
|
||||||
0x2c, 0x10, 0x67, 0x15, 0x39, 0x43, 0x42, 0x88, 0x65, 0xa5, 0xa8, 0x69, 0xec, 0x4f, 0x45, 0xb3,
|
0x0b, 0x84, 0x59, 0x45, 0xcc, 0x90, 0x28, 0x84, 0xb2, 0x5c, 0x94, 0x34, 0xf6, 0xa7, 0xa2, 0x59,
|
||||||
0x98, 0x6a, 0xba, 0xe7, 0x4f, 0x0d, 0x87, 0x24, 0xec, 0x3a, 0x94, 0x4e, 0xe6, 0xfe, 0x74, 0xd4,
|
0x4c, 0x25, 0xdd, 0xf5, 0xa7, 0x06, 0x43, 0x1c, 0xf6, 0x19, 0x94, 0x4e, 0xe6, 0xfe, 0x74, 0xd4,
|
||||||
0x2c, 0x11, 0xa5, 0x8e, 0x94, 0x36, 0x02, 0xc4, 0x31, 0x32, 0xb6, 0x03, 0xd5, 0x70, 0xea, 0xea,
|
0x2c, 0x11, 0xa4, 0x8e, 0x90, 0x36, 0x12, 0x08, 0x63, 0x78, 0x6c, 0x07, 0xaa, 0xe1, 0xd4, 0xd5,
|
||||||
0xb1, 0x54, 0xb3, 0x26, 0xa4, 0x1b, 0x1e, 0x5b, 0x8c, 0x27, 0x52, 0x76, 0x17, 0xea, 0x9e, 0x0c,
|
0x63, 0xa9, 0x66, 0x4d, 0x48, 0x37, 0x3c, 0xb6, 0x34, 0x9e, 0x70, 0xd9, 0x1d, 0xa8, 0x7b, 0x32,
|
||||||
0x22, 0xad, 0x5c, 0x3f, 0xd0, 0x51, 0xb3, 0x4e, 0xe4, 0x8f, 0x91, 0xfc, 0x54, 0xaa, 0x33, 0xa1,
|
0x88, 0xb4, 0x72, 0xfd, 0x40, 0x47, 0xcd, 0x3a, 0x81, 0x3f, 0x44, 0xf0, 0x13, 0xa9, 0xce, 0x84,
|
||||||
0x3a, 0xa9, 0x90, 0x67, 0x99, 0xed, 0x22, 0xe4, 0x65, 0xe8, 0xfc, 0x36, 0x07, 0xd5, 0x58, 0x2b,
|
0xea, 0xa4, 0x4c, 0x9e, 0x45, 0xb6, 0x8b, 0x90, 0x97, 0xa1, 0xf3, 0xbb, 0x1c, 0x54, 0x63, 0xa9,
|
||||||
0x73, 0x60, 0xf5, 0x40, 0x79, 0xa7, 0xbe, 0x16, 0x9e, 0x9e, 0x2b, 0xd1, 0xcc, 0x6d, 0xe7, 0x76,
|
0xcc, 0x81, 0xd5, 0x03, 0xe5, 0x9d, 0xfa, 0x5a, 0x78, 0x7a, 0xae, 0x44, 0x33, 0xb7, 0x9d, 0xdb,
|
||||||
0x6a, 0x7c, 0x09, 0x63, 0xeb, 0x90, 0x1f, 0x0c, 0xc9, 0x51, 0x35, 0x9e, 0x1f, 0x0c, 0x59, 0x13,
|
0xa9, 0xf1, 0x25, 0x1a, 0x5b, 0x87, 0xfc, 0x60, 0x48, 0x86, 0xaa, 0xf1, 0xfc, 0x60, 0xc8, 0x9a,
|
||||||
0x2a, 0x4f, 0x5c, 0xe5, 0xbb, 0x81, 0x26, 0xcf, 0xd4, 0x78, 0x3c, 0x65, 0xd7, 0xa0, 0x36, 0x18,
|
0x50, 0x79, 0xec, 0x2a, 0xdf, 0x0d, 0x34, 0x59, 0xa6, 0xc6, 0xe3, 0x29, 0xbb, 0x06, 0xb5, 0xc1,
|
||||||
0x3e, 0x11, 0x2a, 0xf2, 0x65, 0x40, 0xfe, 0xa8, 0xf1, 0x14, 0x60, 0x9b, 0x00, 0x83, 0xe1, 0x3d,
|
0xf0, 0xb1, 0x50, 0x91, 0x2f, 0x03, 0xb2, 0x47, 0x8d, 0xa7, 0x04, 0xb6, 0x09, 0x30, 0x18, 0xde,
|
||||||
0xe1, 0xa2, 0xd2, 0xa8, 0x59, 0xda, 0x2e, 0xec, 0xd4, 0x78, 0x06, 0x71, 0x7e, 0x0d, 0x25, 0x8a,
|
0x15, 0x2e, 0x0a, 0x8d, 0x9a, 0xa5, 0xed, 0xc2, 0x4e, 0x8d, 0x67, 0x28, 0xce, 0x6f, 0xa0, 0x44,
|
||||||
0x11, 0xfb, 0x06, 0xca, 0x23, 0x7f, 0x22, 0x22, 0x6d, 0xcc, 0x69, 0xef, 0x7f, 0xf7, 0xe3, 0xd6,
|
0x3e, 0x62, 0xdf, 0x40, 0x79, 0xe4, 0x4f, 0x44, 0xa4, 0x8d, 0x3a, 0xed, 0xfd, 0xef, 0x7e, 0xdc,
|
||||||
0xca, 0xdf, 0x7f, 0xdc, 0xda, 0xcd, 0x24, 0x83, 0x0c, 0x45, 0xe0, 0xc9, 0x40, 0xbb, 0x7e, 0x20,
|
0x5a, 0xf9, 0xfb, 0x8f, 0x5b, 0xbb, 0x99, 0x60, 0x90, 0xa1, 0x08, 0x3c, 0x19, 0x68, 0xd7, 0x0f,
|
||||||
0x54, 0xb4, 0x37, 0x91, 0x77, 0xcc, 0x92, 0x56, 0x97, 0x7e, 0xb8, 0xd5, 0xc0, 0x6e, 0x42, 0xc9,
|
0x84, 0x8a, 0xf6, 0x26, 0xf2, 0xb6, 0x59, 0xd2, 0xea, 0xd2, 0x87, 0x5b, 0x09, 0xec, 0x06, 0x94,
|
||||||
0x0f, 0x46, 0xe2, 0x9c, 0xec, 0x2f, 0xb4, 0x3f, 0xb2, 0xaa, 0xea, 0x83, 0xb9, 0x0e, 0xe7, 0xba,
|
0xfc, 0x60, 0x24, 0xce, 0x49, 0xff, 0x42, 0xfb, 0x03, 0x2b, 0xaa, 0x3e, 0x98, 0xeb, 0x70, 0xae,
|
||||||
0x8f, 0x22, 0x6e, 0x18, 0xce, 0xef, 0x73, 0x50, 0x36, 0x39, 0xc0, 0xae, 0x41, 0x71, 0x26, 0xb4,
|
0xfb, 0xc8, 0xe2, 0x06, 0xe1, 0xfc, 0x21, 0x07, 0x65, 0x13, 0x03, 0xec, 0x1a, 0x14, 0x67, 0x42,
|
||||||
0x4b, 0xfb, 0xd7, 0xf7, 0xab, 0xe8, 0xdb, 0x87, 0x42, 0xbb, 0x9c, 0x50, 0x4c, 0xaf, 0x99, 0x9c,
|
0xbb, 0xb4, 0x7f, 0x7d, 0xbf, 0x8a, 0xb6, 0x7d, 0x20, 0xb4, 0xcb, 0x89, 0x8a, 0xe1, 0x35, 0x93,
|
||||||
0xa3, 0xef, 0xf3, 0x69, 0x7a, 0x3d, 0x44, 0x84, 0x5b, 0x01, 0xfb, 0x19, 0x54, 0x02, 0xa1, 0x5f,
|
0x73, 0xb4, 0x7d, 0x3e, 0x0d, 0xaf, 0x07, 0x48, 0xe1, 0x96, 0xc1, 0x7e, 0x0e, 0x95, 0x40, 0xe8,
|
||||||
0x48, 0x75, 0x46, 0x3e, 0x5a, 0x37, 0x41, 0x3f, 0x12, 0xfa, 0xa1, 0x1c, 0x09, 0x1e, 0xcb, 0xd8,
|
0xe7, 0x52, 0x9d, 0x91, 0x8d, 0xd6, 0x8d, 0xd3, 0x8f, 0x84, 0x7e, 0x20, 0x47, 0x82, 0xc7, 0x3c,
|
||||||
0x6d, 0xa8, 0x46, 0xc2, 0x9b, 0x2b, 0x5f, 0x2f, 0xc8, 0x5f, 0xeb, 0xfb, 0x0d, 0xca, 0x32, 0x8b,
|
0x76, 0x0b, 0xaa, 0x91, 0xf0, 0xe6, 0xca, 0xd7, 0x0b, 0xb2, 0xd7, 0xfa, 0x7e, 0x83, 0xa2, 0xcc,
|
||||||
0x11, 0x39, 0x61, 0x38, 0x7f, 0xca, 0x41, 0x11, 0xcd, 0x60, 0x0c, 0x8a, 0xae, 0x9a, 0x98, 0xec,
|
0xd2, 0x08, 0x9c, 0x20, 0x9c, 0xbf, 0xe6, 0xa0, 0x88, 0x6a, 0x30, 0x06, 0x45, 0x57, 0x4d, 0x4c,
|
||||||
0xae, 0x71, 0x1a, 0xb3, 0x06, 0x14, 0x44, 0xf0, 0x9c, 0x2c, 0xaa, 0x71, 0x1c, 0x22, 0xe2, 0xbd,
|
0x74, 0xd7, 0x38, 0x8d, 0x59, 0x03, 0x0a, 0x22, 0x78, 0x46, 0x1a, 0xd5, 0x38, 0x0e, 0x91, 0xe2,
|
||||||
0x18, 0xd9, 0x18, 0xe1, 0x10, 0xd7, 0xcd, 0x23, 0xa1, 0x6c, 0x68, 0x68, 0xcc, 0x6e, 0x42, 0x2d,
|
0x3d, 0x1f, 0x59, 0x1f, 0xe1, 0x10, 0xd7, 0xcd, 0x23, 0xa1, 0xac, 0x6b, 0x68, 0xcc, 0x6e, 0x40,
|
||||||
0x54, 0xf2, 0x7c, 0xf1, 0x0c, 0x57, 0x97, 0x32, 0x89, 0x87, 0x60, 0x2f, 0x78, 0xce, 0xab, 0xa1,
|
0x2d, 0x54, 0xf2, 0x7c, 0xf1, 0x14, 0x57, 0x97, 0x32, 0x81, 0x87, 0xc4, 0x5e, 0xf0, 0x8c, 0x57,
|
||||||
0x1d, 0xb1, 0x5d, 0x00, 0x71, 0xae, 0x95, 0x7b, 0x28, 0x23, 0x1d, 0x35, 0xcb, 0x74, 0x76, 0xca,
|
0x43, 0x3b, 0x62, 0xbb, 0x00, 0xe2, 0x5c, 0x2b, 0xf7, 0x50, 0x46, 0x3a, 0x6a, 0x96, 0xe9, 0xec,
|
||||||
0x77, 0x04, 0xfa, 0xc7, 0x3c, 0x23, 0x75, 0xfe, 0x9a, 0x87, 0x12, 0xb9, 0x84, 0xed, 0x60, 0x04,
|
0x14, 0xef, 0x48, 0xe8, 0x1f, 0xf3, 0x0c, 0x97, 0x6d, 0x40, 0xf5, 0x54, 0x46, 0x3a, 0x70, 0x67,
|
||||||
0xc2, 0xb9, 0x09, 0x66, 0xa1, 0xcd, 0x6c, 0x04, 0x80, 0x62, 0x9d, 0x04, 0x00, 0xe3, 0xbe, 0x81,
|
0xa2, 0x59, 0xa1, 0xed, 0x92, 0xb9, 0xf3, 0xaf, 0x3c, 0x94, 0xc8, 0x5c, 0x6c, 0x07, 0xbd, 0x13,
|
||||||
0xde, 0x98, 0x0a, 0x4f, 0x4b, 0x65, 0xd3, 0x2d, 0x99, 0xa3, 0xe9, 0x23, 0xcc, 0x08, 0x73, 0x1a,
|
0xce, 0x8d, 0xa3, 0x0b, 0x6d, 0x66, 0xbd, 0x03, 0x14, 0x07, 0x89, 0x73, 0x30, 0x26, 0x36, 0xd0,
|
||||||
0x1a, 0xb3, 0x5b, 0x50, 0x96, 0x14, 0x46, 0x3a, 0xd0, 0x5b, 0x82, 0x6b, 0x29, 0xa8, 0x5c, 0x09,
|
0x52, 0x53, 0xe1, 0x69, 0xa9, 0x6c, 0x28, 0x26, 0x73, 0x3c, 0xd6, 0x08, 0xa3, 0xc5, 0x9c, 0x94,
|
||||||
0x77, 0x24, 0x83, 0xe9, 0x82, 0x8e, 0x59, 0xe5, 0xc9, 0x9c, 0xdd, 0x82, 0x1a, 0xc5, 0xed, 0xd1,
|
0xc6, 0xec, 0x26, 0x94, 0x25, 0xb9, 0x98, 0x0e, 0xfb, 0x06, 0xc7, 0x5b, 0x08, 0x0a, 0x57, 0xc2,
|
||||||
0x22, 0x14, 0xcd, 0x32, 0xc5, 0x61, 0x2d, 0x89, 0x29, 0x82, 0x3c, 0x95, 0xe3, 0x45, 0xf5, 0x5c,
|
0x1d, 0xc9, 0x60, 0xba, 0x20, 0x13, 0x54, 0x79, 0x32, 0x67, 0x37, 0xa1, 0x46, 0x3e, 0x7d, 0xb8,
|
||||||
0xef, 0x54, 0x0c, 0x42, 0xdd, 0xbc, 0x9a, 0xfa, 0xab, 0x63, 0x31, 0x9e, 0x48, 0x51, 0x6d, 0x24,
|
0x08, 0x45, 0xb3, 0x4c, 0x3e, 0x5a, 0x4b, 0xfc, 0x8d, 0x44, 0x9e, 0xf2, 0x31, 0x89, 0x3d, 0xd7,
|
||||||
0x3c, 0x25, 0x34, 0x52, 0x3f, 0x26, 0xea, 0x9a, 0x0d, 0xaf, 0x01, 0x79, 0x2a, 0x67, 0x0e, 0x94,
|
0x3b, 0x15, 0x83, 0x50, 0x37, 0xaf, 0xa6, 0xb6, 0xec, 0x58, 0x1a, 0x4f, 0xb8, 0x28, 0x36, 0x12,
|
||||||
0x87, 0xc3, 0x43, 0x64, 0x7e, 0x92, 0x16, 0x12, 0x83, 0x70, 0x2b, 0x71, 0xfa, 0x50, 0x8d, 0xb7,
|
0x9e, 0x12, 0x1a, 0xa1, 0x1f, 0x12, 0x74, 0xcd, 0xba, 0xde, 0x10, 0x79, 0xca, 0x67, 0x0e, 0x94,
|
||||||
0xc1, 0x5b, 0xd9, 0xef, 0xda, 0xfb, 0x9a, 0xef, 0x77, 0xd9, 0x1d, 0xa8, 0x44, 0xa7, 0xae, 0xf2,
|
0x87, 0xc3, 0x43, 0x44, 0x7e, 0x94, 0x16, 0x19, 0x43, 0xe1, 0x96, 0x63, 0xce, 0x10, 0xcd, 0xa7,
|
||||||
0x83, 0x09, 0xf9, 0x6e, 0x7d, 0xff, 0xa3, 0xc4, 0xaa, 0xa1, 0xc1, 0x51, 0x53, 0xcc, 0x71, 0x24,
|
0xba, 0xdf, 0x6d, 0x7e, 0x6c, 0x0c, 0x14, 0xcf, 0x9d, 0x3e, 0x54, 0x63, 0x15, 0x30, 0x9b, 0xfb,
|
||||||
0xd4, 0x12, 0x33, 0xde, 0xd0, 0xd5, 0x80, 0xc2, 0xdc, 0x1f, 0x91, 0x9e, 0x35, 0x8e, 0x43, 0x44,
|
0x5d, 0x9b, 0xe7, 0xf9, 0x7e, 0x97, 0xdd, 0x86, 0x4a, 0x74, 0xea, 0x2a, 0x3f, 0x98, 0x90, 0x5d,
|
||||||
0x26, 0xbe, 0xc9, 0xa5, 0x35, 0x8e, 0x43, 0x0c, 0xc8, 0x4c, 0x8e, 0x4c, 0xd9, 0x5b, 0xe3, 0x34,
|
0xd7, 0xf7, 0x3f, 0x48, 0x34, 0x1e, 0x1a, 0x3a, 0xee, 0x12, 0x63, 0x1c, 0x09, 0xb5, 0x44, 0xc5,
|
||||||
0x46, 0x1f, 0xcb, 0x50, 0xfb, 0x32, 0x70, 0xa7, 0xb1, 0x8f, 0xe3, 0xb9, 0x33, 0x8d, 0xcf, 0xf7,
|
0xd7, 0x64, 0x35, 0xa0, 0x30, 0xf7, 0x47, 0x24, 0x67, 0x8d, 0xe3, 0x10, 0x29, 0x13, 0xdf, 0xc4,
|
||||||
0x3f, 0xd9, 0xed, 0x37, 0x39, 0xa8, 0xc6, 0xb5, 0x1a, 0x0b, 0x8f, 0x3f, 0x12, 0x81, 0xf6, 0xc7,
|
0xe0, 0x1a, 0xc7, 0x21, 0x3a, 0x6b, 0x26, 0x47, 0xa6, 0x5c, 0xae, 0x71, 0x1a, 0xa3, 0xee, 0x32,
|
||||||
0xbe, 0x50, 0x76, 0xe3, 0x0c, 0xc2, 0xee, 0x40, 0xc9, 0xd5, 0x5a, 0xc5, 0xd7, 0xf9, 0xff, 0xb3,
|
0xd4, 0xbe, 0x0c, 0xdc, 0x69, 0x6c, 0xff, 0x78, 0xee, 0x4c, 0xe3, 0xb3, 0xff, 0x5f, 0x76, 0xfb,
|
||||||
0x85, 0xbe, 0x75, 0x80, 0x92, 0x5e, 0xa0, 0xd5, 0x82, 0x1b, 0xd6, 0xc6, 0x57, 0x00, 0x29, 0x88,
|
0x6d, 0x0e, 0xaa, 0x71, 0x8d, 0xc7, 0x82, 0xe5, 0x8f, 0x44, 0xa0, 0xfd, 0xb1, 0x2f, 0x94, 0xdd,
|
||||||
0xb6, 0x9e, 0x89, 0x85, 0xd5, 0x8a, 0x43, 0x76, 0x15, 0x4a, 0xcf, 0xdd, 0xe9, 0x5c, 0xd8, 0x1c,
|
0x38, 0x43, 0x61, 0xb7, 0xa1, 0xe4, 0x6a, 0xad, 0xe2, 0x32, 0xf0, 0x71, 0xf6, 0x82, 0x68, 0x1d,
|
||||||
0x36, 0x93, 0xaf, 0xf3, 0x5f, 0xe5, 0x9c, 0xbf, 0xe4, 0xa1, 0x62, 0x0b, 0x3f, 0xbb, 0x0d, 0x15,
|
0x20, 0xa7, 0x17, 0x68, 0xb5, 0xe0, 0x06, 0xb5, 0xf1, 0x15, 0x40, 0x4a, 0x44, 0x5d, 0xcf, 0xc4,
|
||||||
0x2a, 0xfc, 0xd6, 0xa2, 0xcb, 0x2f, 0x46, 0x4c, 0x61, 0x7b, 0xc9, 0x17, 0x2d, 0x63, 0xa3, 0x55,
|
0xc2, 0x4a, 0xc5, 0x21, 0xbb, 0x0a, 0xa5, 0x67, 0xee, 0x74, 0x2e, 0x6c, 0x7c, 0x9b, 0xc9, 0xd7,
|
||||||
0x65, 0xbe, 0x6c, 0xd6, 0xc6, 0xf4, 0xfb, 0x56, 0x18, 0x89, 0xb1, 0xfd, 0x74, 0xad, 0x23, 0xbb,
|
0xf9, 0xaf, 0x72, 0xce, 0x5f, 0xf2, 0x50, 0xb1, 0x17, 0x06, 0xbb, 0x05, 0x15, 0xba, 0x30, 0xac,
|
||||||
0x2b, 0xc6, 0x7e, 0xe0, 0xa3, 0x7f, 0x38, 0x8a, 0xd8, 0xed, 0xf8, 0xd4, 0x45, 0xd2, 0xf8, 0x49,
|
0x46, 0x97, 0x27, 0x4d, 0x0c, 0x61, 0x7b, 0xc9, 0x4d, 0x98, 0xd1, 0xd1, 0x8a, 0x32, 0x37, 0xa2,
|
||||||
0x56, 0xe3, 0x9b, 0x87, 0xee, 0x43, 0x3d, 0xb3, 0xcd, 0x25, 0xa7, 0xbe, 0x91, 0x3d, 0xb5, 0xdd,
|
0xd5, 0x31, 0xbd, 0x17, 0x0b, 0x23, 0x31, 0xb6, 0x57, 0xde, 0x3a, 0xa2, 0xbb, 0x62, 0xec, 0x07,
|
||||||
0x92, 0xd4, 0x99, 0xef, 0x6e, 0xea, 0x85, 0xff, 0xc0, 0x7f, 0x5f, 0x02, 0xa4, 0x2a, 0xdf, 0xbf,
|
0x3e, 0xda, 0x87, 0x23, 0x8b, 0xdd, 0x8a, 0x4f, 0x5d, 0x24, 0x89, 0x1f, 0x65, 0x25, 0xbe, 0x7e,
|
||||||
0xb0, 0x38, 0x2f, 0x0b, 0x00, 0x83, 0x10, 0x4b, 0xe7, 0xc8, 0xa5, 0xfa, 0xbd, 0xea, 0x4f, 0x02,
|
0xe8, 0x3e, 0xd4, 0x33, 0xdb, 0x5c, 0x72, 0xea, 0xeb, 0xd9, 0x53, 0xdb, 0x2d, 0x49, 0x9c, 0xb9,
|
||||||
0xa9, 0xc4, 0x33, 0xba, 0xaa, 0xb4, 0xbe, 0xca, 0xeb, 0x06, 0xa3, 0x1b, 0xc3, 0x0e, 0xa0, 0x3e,
|
0xaf, 0x53, 0x2b, 0xfc, 0x17, 0xf6, 0xfb, 0x12, 0x20, 0x15, 0xf9, 0xee, 0x45, 0xc7, 0x79, 0x51,
|
||||||
0x12, 0x91, 0xa7, 0x7c, 0x4a, 0x28, 0xeb, 0xf4, 0x2d, 0x3c, 0x53, 0xaa, 0xa7, 0xd5, 0x4d, 0x19,
|
0x00, 0x18, 0x84, 0x58, 0x72, 0x47, 0x2e, 0xd5, 0xfd, 0x55, 0x7f, 0x12, 0x48, 0x25, 0x9e, 0x52,
|
||||||
0xc6, 0x57, 0xd9, 0x35, 0x6c, 0x1f, 0x56, 0xc5, 0x79, 0x28, 0x95, 0xb6, 0xbb, 0x98, 0xfe, 0xe0,
|
0x1a, 0xd3, 0xfa, 0x2a, 0xaf, 0x1b, 0x1a, 0x65, 0x0c, 0x3b, 0x80, 0xfa, 0x48, 0x44, 0x9e, 0xf2,
|
||||||
0x8a, 0xe9, 0x34, 0x10, 0xa7, 0x9d, 0x78, 0x5d, 0xa4, 0x13, 0xe6, 0x42, 0xd1, 0x73, 0x43, 0xf3,
|
0x29, 0xa0, 0xac, 0xd1, 0xb7, 0xf0, 0x4c, 0xa9, 0x9c, 0x56, 0x37, 0x45, 0x18, 0x5b, 0x65, 0xd7,
|
||||||
0x71, 0xac, 0xef, 0x37, 0x2f, 0xec, 0xd7, 0x71, 0x43, 0xe3, 0xb4, 0xf6, 0x17, 0x78, 0xd6, 0x97,
|
0xb0, 0x7d, 0x58, 0x15, 0xe7, 0xa1, 0x54, 0xda, 0xee, 0x62, 0xfa, 0x8a, 0x2b, 0xa6, 0x43, 0x41,
|
||||||
0xff, 0xd8, 0xba, 0x95, 0xf9, 0x22, 0xce, 0xe4, 0xc9, 0x62, 0x8f, 0xf2, 0xe5, 0xcc, 0xd7, 0x7b,
|
0x3a, 0xed, 0xc4, 0xeb, 0x22, 0x9d, 0x30, 0x17, 0x8a, 0x9e, 0x1b, 0x9a, 0x4b, 0xb5, 0xbe, 0xdf,
|
||||||
0x73, 0xed, 0x4f, 0xf7, 0xdc, 0xd0, 0x47, 0x75, 0xb8, 0xb0, 0xdf, 0xe5, 0xa4, 0x7a, 0xe3, 0x17,
|
0xbc, 0xb0, 0x5f, 0xc7, 0x0d, 0x8d, 0xd1, 0xda, 0x5f, 0xe0, 0x59, 0x5f, 0xfc, 0x63, 0xeb, 0x66,
|
||||||
0xd0, 0xb8, 0x68, 0xf7, 0x87, 0xc4, 0x60, 0xe3, 0x2e, 0xd4, 0x12, 0x3b, 0xde, 0xb5, 0xb0, 0x9a,
|
0xe6, 0x26, 0x9d, 0xc9, 0x93, 0xc5, 0x1e, 0xc5, 0xcb, 0x99, 0xaf, 0xf7, 0xe6, 0xda, 0x9f, 0xee,
|
||||||
0x0d, 0xde, 0x9f, 0x73, 0x50, 0x36, 0xb7, 0x8a, 0xdd, 0x85, 0xda, 0x54, 0x7a, 0x2e, 0x1a, 0x10,
|
0xb9, 0xa1, 0x8f, 0xe2, 0x70, 0x61, 0xbf, 0xcb, 0x49, 0xf4, 0xc6, 0x2f, 0xa1, 0x71, 0x51, 0xef,
|
||||||
0xb7, 0x68, 0x9f, 0xa6, 0x97, 0xae, 0xf5, 0x20, 0x96, 0x19, 0xaf, 0xa6, 0x5c, 0x4c, 0x32, 0x3f,
|
0xf7, 0xf1, 0xc1, 0xc6, 0x1d, 0xa8, 0x25, 0x7a, 0xbc, 0x6d, 0x61, 0x35, 0xeb, 0xbc, 0x3f, 0xe7,
|
||||||
0x18, 0xcb, 0xf8, 0x16, 0xac, 0xa7, 0x8b, 0xfa, 0xc1, 0x58, 0x72, 0x23, 0xdc, 0xb8, 0x0f, 0xeb,
|
0xa0, 0x6c, 0xb2, 0x8a, 0xdd, 0x81, 0xda, 0x54, 0x7a, 0x2e, 0x2a, 0x10, 0xb7, 0x76, 0x9f, 0xa4,
|
||||||
0xcb, 0x2a, 0x2e, 0xb1, 0xf3, 0xfa, 0x72, 0xba, 0x52, 0x5d, 0x4e, 0x16, 0x65, 0xcd, 0xbe, 0x0b,
|
0x49, 0xd7, 0xba, 0x1f, 0xf3, 0x8c, 0x55, 0x53, 0x2c, 0x06, 0x99, 0x1f, 0x8c, 0x65, 0x9c, 0x05,
|
||||||
0xb5, 0x04, 0x67, 0xbb, 0x6f, 0x1a, 0xbe, 0x9a, 0x5d, 0x99, 0xb1, 0xd5, 0x99, 0x02, 0xa4, 0xa6,
|
0xeb, 0xe9, 0xa2, 0x7e, 0x30, 0x96, 0xdc, 0x30, 0x37, 0xee, 0xc1, 0xfa, 0xb2, 0x88, 0x4b, 0xf4,
|
||||||
0x61, 0xb1, 0xc2, 0x5e, 0x30, 0x70, 0x67, 0x71, 0x93, 0x95, 0xcc, 0xe9, 0xdb, 0xe6, 0x6a, 0x97,
|
0xfc, 0x6c, 0x39, 0x5c, 0xa9, 0x66, 0x27, 0x8b, 0xb2, 0x6a, 0xdf, 0x81, 0x5a, 0x42, 0x67, 0xbb,
|
||||||
0x4c, 0x59, 0xe5, 0x34, 0x66, 0x2d, 0x80, 0x51, 0x72, 0x61, 0xdf, 0x72, 0x8d, 0x33, 0x0c, 0x67,
|
0xaf, 0x2b, 0xbe, 0x9a, 0x5d, 0x99, 0xd1, 0xd5, 0x99, 0x02, 0xa4, 0xaa, 0x61, 0xb1, 0xc2, 0x1e,
|
||||||
0x00, 0xd5, 0xd8, 0x08, 0xb6, 0x0d, 0xf5, 0xc8, 0xee, 0x8c, 0x9d, 0x0f, 0x6e, 0x57, 0xe2, 0x59,
|
0x92, 0xee, 0x51, 0xa3, 0x46, 0x32, 0xa7, 0x7b, 0xcf, 0xd5, 0x2e, 0xa9, 0xb2, 0xca, 0x69, 0xcc,
|
||||||
0x08, 0x3b, 0x18, 0xe5, 0x06, 0x13, 0xb1, 0xd4, 0xc1, 0x70, 0x44, 0xb8, 0x15, 0x38, 0x4f, 0xa1,
|
0x5a, 0x00, 0xa3, 0x24, 0x61, 0xdf, 0x90, 0xc6, 0x19, 0x84, 0x33, 0x80, 0x6a, 0xac, 0x04, 0xdb,
|
||||||
0x44, 0x00, 0x5e, 0xb3, 0x48, 0xbb, 0x4a, 0xdb, 0x66, 0xc8, 0x34, 0x07, 0x32, 0xa2, 0x6d, 0xdb,
|
0x86, 0x7a, 0x64, 0x77, 0xc6, 0x8e, 0x09, 0xb7, 0x2b, 0xf1, 0x2c, 0x09, 0x3b, 0x1f, 0xe5, 0x06,
|
||||||
0x45, 0x4c, 0x44, 0x6e, 0x08, 0xec, 0x06, 0xb6, 0x20, 0x23, 0xeb, 0xd1, 0xcb, 0x78, 0x28, 0x76,
|
0x13, 0xb1, 0xd4, 0xf9, 0x70, 0xa4, 0x70, 0xcb, 0x70, 0x9e, 0x40, 0x89, 0x08, 0x98, 0x66, 0x91,
|
||||||
0x7e, 0x0e, 0xd5, 0x18, 0xc6, 0x93, 0x3f, 0xf0, 0x03, 0x61, 0x4d, 0xa4, 0x31, 0x36, 0x91, 0x9d,
|
0x76, 0x95, 0xb6, 0x4d, 0x94, 0x69, 0x2a, 0x64, 0x44, 0xdb, 0xb6, 0x8b, 0x18, 0x88, 0xdc, 0x00,
|
||||||
0x53, 0x57, 0xb9, 0x9e, 0x16, 0xa6, 0x0d, 0x28, 0xf1, 0x14, 0x70, 0xae, 0x43, 0x3d, 0x73, 0x7b,
|
0xd8, 0x75, 0x6c, 0x5d, 0x46, 0xd6, 0xa2, 0x97, 0xe1, 0x90, 0xed, 0xfc, 0x02, 0xaa, 0x31, 0x19,
|
||||||
0x30, 0xdd, 0x9e, 0x50, 0x18, 0xcd, 0x1d, 0x36, 0x13, 0xe7, 0x25, 0xb6, 0xb8, 0x71, 0xd7, 0xf2,
|
0x4f, 0x7e, 0xdf, 0x0f, 0x84, 0x55, 0x91, 0xc6, 0xd8, 0x7c, 0x76, 0x4e, 0x5d, 0xe5, 0x7a, 0x5a,
|
||||||
0x53, 0x80, 0x53, 0xad, 0xc3, 0x67, 0xd4, 0xc6, 0x58, 0xdf, 0xd7, 0x10, 0x21, 0x06, 0xdb, 0x82,
|
0x98, 0x16, 0xa1, 0xc4, 0x53, 0x82, 0xf3, 0x19, 0xd4, 0x33, 0xd9, 0x83, 0xe1, 0xf6, 0x98, 0xdc,
|
||||||
0x3a, 0x4e, 0x22, 0x2b, 0x37, 0xf9, 0x4e, 0x2b, 0x22, 0x43, 0xf8, 0x09, 0xd4, 0xc6, 0xc9, 0xf2,
|
0x68, 0x72, 0xd8, 0x4c, 0x9c, 0x17, 0xd8, 0x1a, 0xc7, 0xdd, 0xce, 0xcf, 0x00, 0x4e, 0xb5, 0x0e,
|
||||||
0x82, 0x0d, 0x5d, 0xbc, 0xfa, 0x53, 0xa8, 0x06, 0xd2, 0xca, 0x4c, 0x57, 0x55, 0x09, 0x24, 0x89,
|
0x9f, 0x52, 0xfb, 0x63, 0x6d, 0x5f, 0x43, 0x0a, 0x21, 0xd8, 0x16, 0xd4, 0x71, 0x12, 0x59, 0xbe,
|
||||||
0x9c, 0x5b, 0xf0, 0x7f, 0x6f, 0xf4, 0xe3, 0xec, 0x13, 0x28, 0x8f, 0xfd, 0xa9, 0xa6, 0xa2, 0x8f,
|
0x89, 0x77, 0x5a, 0x11, 0x19, 0xc0, 0x4f, 0xa1, 0x36, 0x4e, 0x96, 0x17, 0xac, 0xeb, 0xe2, 0xd5,
|
||||||
0x8d, 0x9a, 0x9d, 0x39, 0xff, 0xca, 0x01, 0xa4, 0x91, 0xc5, 0x7c, 0xc5, 0xea, 0x8d, 0x9c, 0x55,
|
0x9f, 0x40, 0x35, 0x90, 0x96, 0x67, 0xba, 0xb1, 0x4a, 0x20, 0x89, 0xe5, 0xdc, 0x84, 0x9f, 0xbc,
|
||||||
0x53, 0xad, 0xa7, 0x50, 0x9d, 0xd9, 0x3a, 0x60, 0x63, 0x76, 0x6d, 0x39, 0x1b, 0x5a, 0x71, 0x99,
|
0xd6, 0xc7, 0xb3, 0x8f, 0xa0, 0x3c, 0xf6, 0xa7, 0x9a, 0x8a, 0x3e, 0x36, 0x78, 0x76, 0xe6, 0xfc,
|
||||||
0x30, 0x15, 0x62, 0xdf, 0x56, 0x88, 0x0f, 0xe9, 0x99, 0x93, 0x1d, 0xa8, 0x19, 0xc9, 0xbe, 0x7d,
|
0x3b, 0x07, 0x90, 0x7a, 0x16, 0xe3, 0x15, 0xab, 0x37, 0x62, 0x56, 0x4d, 0xb5, 0x9e, 0x42, 0x75,
|
||||||
0x20, 0xbd, 0x68, 0xdc, 0x4a, 0x36, 0xee, 0xc3, 0xda, 0xd2, 0x96, 0xef, 0xf9, 0x4d, 0x48, 0xeb,
|
0x66, 0xeb, 0x80, 0xf5, 0xd9, 0xb5, 0xe5, 0x68, 0x68, 0xc5, 0x65, 0xc2, 0x54, 0x88, 0x7d, 0x5b,
|
||||||
0x59, 0xf6, 0x96, 0xdd, 0x86, 0xb2, 0x69, 0x22, 0x31, 0x25, 0x70, 0x64, 0xd5, 0xd0, 0x98, 0x3a,
|
0x21, 0xde, 0xa7, 0xd7, 0x4e, 0x76, 0xa0, 0x46, 0x25, 0xfb, 0x66, 0x82, 0x34, 0xd1, 0xb8, 0xe5,
|
||||||
0x86, 0xe3, 0xf8, 0x05, 0xd2, 0x3f, 0x76, 0xf6, 0xa1, 0x6c, 0x9e, 0x58, 0x6c, 0x07, 0x2a, 0xae,
|
0x6c, 0xdc, 0x83, 0xb5, 0xa5, 0x2d, 0xdf, 0xf1, 0x4e, 0x48, 0xeb, 0x59, 0x36, 0xcb, 0x6e, 0x41,
|
||||||
0x67, 0xae, 0x63, 0xa6, 0x24, 0xa0, 0xf0, 0x80, 0x60, 0x1e, 0x8b, 0x9d, 0xbf, 0xe5, 0x01, 0x52,
|
0xd9, 0x34, 0x9f, 0x18, 0x12, 0x38, 0xb2, 0x62, 0x68, 0x4c, 0x1d, 0xc3, 0x71, 0xfc, 0x72, 0xe9,
|
||||||
0xfc, 0x03, 0xba, 0xd2, 0xaf, 0x61, 0x3d, 0x12, 0x9e, 0x0c, 0x46, 0xae, 0x5a, 0x90, 0xd4, 0x3e,
|
0x1f, 0x3b, 0xfb, 0x50, 0x36, 0x4f, 0x33, 0xb6, 0x03, 0x15, 0xd7, 0x33, 0xe9, 0x98, 0x29, 0x09,
|
||||||
0x25, 0x2e, 0x5b, 0x72, 0x81, 0x99, 0xe9, 0x50, 0x0b, 0xef, 0xee, 0x50, 0x77, 0xa0, 0xe8, 0xc9,
|
0xc8, 0x3c, 0x20, 0x32, 0x8f, 0xd9, 0xce, 0xdf, 0xf2, 0x00, 0x29, 0xfd, 0x3d, 0x3a, 0xd6, 0xaf,
|
||||||
0x70, 0x61, 0x3f, 0x14, 0x6c, 0xf9, 0x20, 0x1d, 0x19, 0x2e, 0xf0, 0x41, 0x89, 0x0c, 0xd6, 0x82,
|
0x61, 0x3d, 0x12, 0x9e, 0x0c, 0x46, 0xae, 0x5a, 0x10, 0xd7, 0x3e, 0x41, 0x2e, 0x5b, 0x72, 0x01,
|
||||||
0xf2, 0xec, 0x8c, 0x1e, 0x9d, 0xa6, 0x61, 0xbf, 0xba, 0xcc, 0x7d, 0x78, 0x86, 0x63, 0x7c, 0xa2,
|
0x99, 0xe9, 0x5e, 0x0b, 0x6f, 0xef, 0x5e, 0x77, 0xa0, 0xe8, 0xc9, 0x70, 0x61, 0x2f, 0x0a, 0xb6,
|
||||||
0x1a, 0x16, 0xbb, 0x05, 0xa5, 0xd9, 0xd9, 0xc8, 0x57, 0xd4, 0xdb, 0xd6, 0x4d, 0x67, 0x98, 0xa5,
|
0x7c, 0x90, 0x8e, 0x0c, 0x17, 0xf8, 0x10, 0x45, 0x04, 0x6b, 0x41, 0x79, 0x76, 0x46, 0x8f, 0x55,
|
||||||
0x77, 0x7d, 0x85, 0x0f, 0x51, 0xe2, 0x30, 0x07, 0xf2, 0x6a, 0xd6, 0xac, 0x10, 0xb3, 0x71, 0xc1,
|
0xd3, 0xe8, 0x5f, 0x5d, 0xc6, 0x3e, 0x38, 0xc3, 0x31, 0x3e, 0x6d, 0x0d, 0x8a, 0xdd, 0x84, 0xd2,
|
||||||
0x9b, 0xb3, 0xc3, 0x15, 0x9e, 0x57, 0xb3, 0x76, 0x15, 0xca, 0xc6, 0xaf, 0xce, 0x1f, 0x0b, 0xb0,
|
0xec, 0x6c, 0xe4, 0x2b, 0xea, 0x7b, 0xeb, 0xa6, 0x33, 0xcc, 0xc2, 0xbb, 0xbe, 0xc2, 0x07, 0x2c,
|
||||||
0xbe, 0x6c, 0x25, 0xe6, 0x41, 0xa4, 0xbc, 0x38, 0x0f, 0x22, 0xe5, 0x25, 0xcd, 0x7b, 0x3e, 0xd3,
|
0x61, 0x98, 0x03, 0x79, 0x35, 0xa3, 0x5e, 0xbf, 0x6e, 0x5e, 0x31, 0x19, 0x6b, 0xce, 0x0e, 0x57,
|
||||||
0xbc, 0x3b, 0x50, 0x92, 0x2f, 0x02, 0xa1, 0xb2, 0xaf, 0xeb, 0xce, 0xa9, 0x7c, 0x11, 0x60, 0x9b,
|
0x78, 0x5e, 0xcd, 0xda, 0x55, 0x28, 0x1b, 0xbb, 0x3a, 0x7f, 0x2a, 0xc0, 0xfa, 0xb2, 0x96, 0x18,
|
||||||
0x6a, 0x44, 0x4b, 0x5d, 0x5f, 0xc9, 0x76, 0x7d, 0x37, 0x60, 0x6d, 0x2c, 0xa7, 0x53, 0xf9, 0x62,
|
0x07, 0x91, 0xf2, 0xe2, 0x38, 0x88, 0x94, 0x97, 0x34, 0xf6, 0xf9, 0x4c, 0x63, 0xef, 0x40, 0x49,
|
||||||
0xb8, 0x98, 0x4d, 0xfd, 0xe0, 0xcc, 0xb6, 0x7e, 0xcb, 0x20, 0xdb, 0x81, 0x2b, 0x23, 0x5f, 0xa1,
|
0x3e, 0x0f, 0x84, 0xca, 0xbe, 0xca, 0x3b, 0xa7, 0xf2, 0x79, 0x80, 0x6d, 0xaa, 0x61, 0x2d, 0x75,
|
||||||
0x39, 0x1d, 0x19, 0x68, 0x11, 0xd0, 0x7b, 0x05, 0x79, 0x17, 0x61, 0xf6, 0x0d, 0x6c, 0xbb, 0x5a,
|
0x7d, 0x25, 0xdb, 0xf5, 0x5d, 0x87, 0xb5, 0xb1, 0x9c, 0x4e, 0xe5, 0xf3, 0xe1, 0x62, 0x36, 0xf5,
|
||||||
0x8b, 0x59, 0xa8, 0x1f, 0x07, 0xa1, 0xeb, 0x9d, 0x75, 0xa5, 0x47, 0x77, 0x76, 0x16, 0xba, 0xda,
|
0x83, 0x33, 0xdb, 0xfa, 0x2d, 0x13, 0xd9, 0x0e, 0x5c, 0x19, 0xf9, 0x0a, 0xd5, 0xe9, 0xc8, 0x40,
|
||||||
0x3f, 0xf1, 0xa7, 0xf8, 0x34, 0xab, 0xd0, 0xd2, 0x77, 0xf2, 0xd8, 0xe7, 0xb0, 0xee, 0x29, 0xe1,
|
0x8b, 0x80, 0xde, 0x39, 0x88, 0xbb, 0x48, 0x66, 0xdf, 0xc0, 0xb6, 0xab, 0xb5, 0x98, 0x85, 0xfa,
|
||||||
0x6a, 0xd1, 0x15, 0x91, 0x3e, 0x76, 0xf5, 0x69, 0xb3, 0x4a, 0x2b, 0x2f, 0xa0, 0x78, 0x06, 0x17,
|
0x51, 0x10, 0xba, 0xde, 0x59, 0x57, 0x7a, 0x94, 0xb3, 0xb3, 0xd0, 0xd5, 0xfe, 0x89, 0x3f, 0xc5,
|
||||||
0xad, 0x7d, 0xea, 0x4f, 0x47, 0x9e, 0xab, 0x46, 0xcd, 0x9a, 0x39, 0xc3, 0x12, 0xc8, 0x5a, 0xc0,
|
0x27, 0x5d, 0x85, 0x96, 0xbe, 0x15, 0xc7, 0x3e, 0x87, 0x75, 0x4f, 0x09, 0x57, 0x8b, 0xae, 0x88,
|
||||||
0x08, 0xe8, 0xcd, 0x42, 0xbd, 0x48, 0xa8, 0x40, 0xd4, 0x4b, 0x24, 0x58, 0x38, 0xb5, 0x3f, 0x13,
|
0xf4, 0xb1, 0xab, 0x4f, 0x9b, 0x55, 0x5a, 0x79, 0x81, 0x8a, 0x67, 0x70, 0x51, 0xdb, 0x27, 0xfe,
|
||||||
0x91, 0x76, 0x67, 0x21, 0xfd, 0x2b, 0x50, 0xe0, 0x29, 0xe0, 0x7c, 0x9b, 0x83, 0xc6, 0xc5, 0x14,
|
0x74, 0xe4, 0xb9, 0x6a, 0xd4, 0xac, 0x99, 0x33, 0x2c, 0x11, 0x59, 0x0b, 0x18, 0x11, 0x7a, 0xb3,
|
||||||
0x41, 0x07, 0x87, 0x68, 0xa6, 0xbd, 0x6c, 0x38, 0x4e, 0x9c, 0x9e, 0xcf, 0x38, 0x3d, 0xfe, 0x42,
|
0x50, 0x2f, 0x12, 0x28, 0x10, 0xf4, 0x12, 0x0e, 0x16, 0x4e, 0xed, 0xcf, 0x44, 0xa4, 0xdd, 0x59,
|
||||||
0x15, 0x32, 0x5f, 0xa8, 0x24, 0x80, 0xc5, 0xb7, 0x07, 0x70, 0xc9, 0xa4, 0xd2, 0x45, 0x93, 0x7e,
|
0x48, 0x7f, 0x13, 0x0a, 0x3c, 0x25, 0x38, 0xdf, 0xe6, 0xa0, 0x71, 0x31, 0x44, 0xd0, 0xc0, 0x21,
|
||||||
0x97, 0x83, 0x2b, 0x17, 0xd2, 0xf0, 0xbd, 0x2d, 0xda, 0x86, 0xfa, 0xcc, 0x3d, 0x13, 0xc7, 0xae,
|
0xaa, 0x69, 0x93, 0x0d, 0xc7, 0x89, 0xd1, 0xf3, 0x19, 0xa3, 0xc7, 0x37, 0x54, 0x21, 0x73, 0x43,
|
||||||
0xa2, 0xe0, 0x16, 0x4c, 0x0b, 0x97, 0x81, 0xfe, 0x0b, 0xf6, 0x05, 0xb0, 0x9a, 0xcd, 0xfd, 0x4b,
|
0x25, 0x0e, 0x2c, 0xbe, 0xd9, 0x81, 0x4b, 0x2a, 0x95, 0x2e, 0xaa, 0xf4, 0xfb, 0x1c, 0x5c, 0xb9,
|
||||||
0x6d, 0x8b, 0x43, 0x79, 0x24, 0xf5, 0x3d, 0x39, 0xb7, 0x5f, 0xbf, 0x38, 0x94, 0x31, 0xf8, 0x66,
|
0x10, 0x86, 0xef, 0xac, 0xd1, 0x36, 0xd4, 0x67, 0xee, 0x99, 0x38, 0x76, 0x15, 0x39, 0xb7, 0x60,
|
||||||
0xc0, 0x0b, 0x97, 0x04, 0xdc, 0x39, 0x82, 0x6a, 0x6c, 0x20, 0xdb, 0xb2, 0x4f, 0xf5, 0x5c, 0xfa,
|
0x5a, 0xb8, 0x0c, 0xe9, 0x7f, 0xa0, 0x5f, 0x00, 0xab, 0xd9, 0xd8, 0xbf, 0x54, 0xb7, 0xd8, 0x95,
|
||||||
0x97, 0xd1, 0xe3, 0x48, 0x28, 0xb4, 0xdd, 0xbc, 0xdb, 0x3f, 0x83, 0xd2, 0x44, 0xc9, 0x79, 0x68,
|
0x47, 0x52, 0xdf, 0x95, 0x73, 0x7b, 0xfb, 0xc5, 0xae, 0x8c, 0x89, 0xaf, 0x3b, 0xbc, 0x70, 0x89,
|
||||||
0x6b, 0xeb, 0x12, 0xc3, 0x48, 0x9c, 0x21, 0x54, 0x2c, 0xc2, 0x76, 0xa1, 0x7c, 0xb2, 0x38, 0x8a,
|
0xc3, 0x9d, 0x23, 0xa8, 0xc6, 0x0a, 0xb2, 0x2d, 0xfb, 0xc4, 0xcf, 0xa5, 0xbf, 0x9a, 0x1e, 0x45,
|
||||||
0x9b, 0x0f, 0x7b, 0xb1, 0x71, 0x3e, 0xb2, 0x0c, 0xac, 0x16, 0x86, 0xc1, 0xae, 0x42, 0xf1, 0x64,
|
0x42, 0xa1, 0xee, 0xe6, 0xbd, 0xff, 0x29, 0x94, 0x26, 0x4a, 0xce, 0x43, 0x5b, 0x5b, 0x97, 0x10,
|
||||||
0xd1, 0xef, 0x9a, 0x07, 0x19, 0xd6, 0x1c, 0x9c, 0xb5, 0xcb, 0xc6, 0x20, 0xe7, 0x01, 0xac, 0x66,
|
0x86, 0xe3, 0x0c, 0xa1, 0x62, 0x29, 0x6c, 0x17, 0xca, 0x27, 0x8b, 0xa3, 0xb8, 0xf9, 0xb0, 0x89,
|
||||||
0xd7, 0xa1, 0x53, 0x32, 0x4d, 0x0d, 0x8d, 0xd3, 0xe2, 0x9a, 0x7f, 0x47, 0x71, 0xdd, 0xdd, 0x81,
|
0x8d, 0xf3, 0x91, 0x45, 0x60, 0xb5, 0x30, 0x08, 0x76, 0x15, 0x8a, 0x27, 0x8b, 0x7e, 0xd7, 0x3c,
|
||||||
0x8a, 0xfd, 0x53, 0x84, 0xd5, 0xa0, 0xf4, 0xf8, 0x68, 0xd8, 0x7b, 0xd4, 0x58, 0x61, 0x55, 0x28,
|
0xc8, 0xb0, 0xe6, 0xe0, 0xac, 0x5d, 0x36, 0x0a, 0x39, 0xf7, 0x61, 0x35, 0xbb, 0x0e, 0x8d, 0x92,
|
||||||
0x1e, 0x0e, 0x86, 0x8f, 0x1a, 0x39, 0x1c, 0x1d, 0x0d, 0x8e, 0x7a, 0x8d, 0xfc, 0xee, 0x4d, 0x58,
|
0x69, 0x6a, 0x68, 0x9c, 0x16, 0xd7, 0xfc, 0x5b, 0x8a, 0xeb, 0xee, 0x0e, 0x54, 0xec, 0xcf, 0x14,
|
||||||
0xcd, 0xfe, 0x2d, 0xc2, 0xea, 0x50, 0x19, 0x1e, 0x1c, 0x75, 0xdb, 0x83, 0x5f, 0x35, 0x56, 0xd8,
|
0x56, 0x83, 0xd2, 0xa3, 0xa3, 0x61, 0xef, 0x61, 0x63, 0x85, 0x55, 0xa1, 0x78, 0x38, 0x18, 0x3e,
|
||||||
0x2a, 0x54, 0xfb, 0x47, 0xc3, 0x5e, 0xe7, 0x31, 0xef, 0x35, 0x72, 0xbb, 0xbf, 0x84, 0x5a, 0xf2,
|
0x6c, 0xe4, 0x70, 0x74, 0x34, 0x38, 0xea, 0x35, 0xf2, 0xbb, 0x37, 0x60, 0x35, 0xfb, 0x3b, 0x85,
|
||||||
0x72, 0x47, 0x0d, 0xed, 0xfe, 0x51, 0xb7, 0xb1, 0xc2, 0x00, 0xca, 0xc3, 0x5e, 0x87, 0xf7, 0x50,
|
0xd5, 0xa1, 0x32, 0x3c, 0x38, 0xea, 0xb6, 0x07, 0xbf, 0x6e, 0xac, 0xb0, 0x55, 0xa8, 0xf6, 0x8f,
|
||||||
0x6f, 0x05, 0x0a, 0xc3, 0xe1, 0x61, 0x23, 0x8f, 0xbb, 0x76, 0x0e, 0x3a, 0x87, 0xbd, 0x46, 0x01,
|
0x86, 0xbd, 0xce, 0x23, 0xde, 0x6b, 0xe4, 0x76, 0x7f, 0x05, 0xb5, 0xe4, 0x55, 0x8f, 0x12, 0xda,
|
||||||
0x87, 0x8f, 0x1e, 0x1e, 0xdf, 0x1b, 0x36, 0x8a, 0xbb, 0x5f, 0xc2, 0x95, 0x0b, 0x2f, 0x67, 0x5a,
|
0xfd, 0xa3, 0x6e, 0x63, 0x85, 0x01, 0x94, 0x87, 0xbd, 0x0e, 0xef, 0xa1, 0xdc, 0x0a, 0x14, 0x86,
|
||||||
0x7d, 0x78, 0xc0, 0x7b, 0xa8, 0xa9, 0x0e, 0x95, 0x63, 0xde, 0x7f, 0x72, 0xf0, 0xa8, 0xd7, 0xc8,
|
0xc3, 0xc3, 0x46, 0x1e, 0x77, 0xed, 0x1c, 0x74, 0x0e, 0x7b, 0x8d, 0x02, 0x0e, 0x1f, 0x3e, 0x38,
|
||||||
0xa1, 0xe0, 0xc1, 0xa0, 0x73, 0xbf, 0xd7, 0x6d, 0xe4, 0xdb, 0xd7, 0xbe, 0x7b, 0xb5, 0x99, 0xfb,
|
0xbe, 0x3b, 0x6c, 0x14, 0x77, 0xbf, 0x84, 0x2b, 0x17, 0x5e, 0xce, 0xb4, 0xfa, 0xf0, 0x80, 0xf7,
|
||||||
0xfe, 0xd5, 0x66, 0xee, 0x87, 0x57, 0x9b, 0xb9, 0x7f, 0xbe, 0xda, 0xcc, 0x7d, 0xfb, 0x7a, 0x73,
|
0x50, 0x52, 0x1d, 0x2a, 0xc7, 0xbc, 0xff, 0xf8, 0xe0, 0x61, 0xaf, 0x91, 0x43, 0xc6, 0xfd, 0x41,
|
||||||
0xe5, 0xfb, 0xd7, 0x9b, 0x2b, 0x3f, 0xbc, 0xde, 0x5c, 0x39, 0x29, 0xd3, 0x9f, 0x94, 0x5f, 0xfc,
|
0xe7, 0x5e, 0xaf, 0xdb, 0xc8, 0xb7, 0xaf, 0x7d, 0xf7, 0x72, 0x33, 0xf7, 0xfd, 0xcb, 0xcd, 0xdc,
|
||||||
0x3b, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x60, 0x46, 0x7d, 0xe4, 0x14, 0x00, 0x00,
|
0x0f, 0x2f, 0x37, 0x73, 0xff, 0x7c, 0xb9, 0x99, 0xfb, 0xf6, 0xd5, 0xe6, 0xca, 0xf7, 0xaf, 0x36,
|
||||||
|
0x57, 0x7e, 0x78, 0xb5, 0xb9, 0x72, 0x52, 0xa6, 0x9f, 0x9b, 0x5f, 0xfc, 0x27, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0xa4, 0x50, 0x4f, 0x17, 0x1c, 0x15, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Op) Marshal() (dAtA []byte, err error) {
|
func (m *Op) Marshal() (dAtA []byte, err error) {
|
||||||
|
@ -2784,6 +2802,13 @@ func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
|
if len(m.Hostname) > 0 {
|
||||||
|
i -= len(m.Hostname)
|
||||||
|
copy(dAtA[i:], m.Hostname)
|
||||||
|
i = encodeVarintOps(dAtA, i, uint64(len(m.Hostname)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x3a
|
||||||
|
}
|
||||||
if len(m.ExtraHosts) > 0 {
|
if len(m.ExtraHosts) > 0 {
|
||||||
for iNdEx := len(m.ExtraHosts) - 1; iNdEx >= 0; iNdEx-- {
|
for iNdEx := len(m.ExtraHosts) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
{
|
{
|
||||||
|
@ -2865,6 +2890,15 @@ func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
|
if len(m.ResultID) > 0 {
|
||||||
|
i -= len(m.ResultID)
|
||||||
|
copy(dAtA[i:], m.ResultID)
|
||||||
|
i = encodeVarintOps(dAtA, i, uint64(len(m.ResultID)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xba
|
||||||
|
}
|
||||||
if m.SSHOpt != nil {
|
if m.SSHOpt != nil {
|
||||||
{
|
{
|
||||||
size, err := m.SSHOpt.MarshalToSizedBuffer(dAtA[:i])
|
size, err := m.SSHOpt.MarshalToSizedBuffer(dAtA[:i])
|
||||||
|
@ -4663,6 +4697,10 @@ func (m *Meta) Size() (n int) {
|
||||||
n += 1 + l + sovOps(uint64(l))
|
n += 1 + l + sovOps(uint64(l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
l = len(m.Hostname)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovOps(uint64(l))
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4704,6 +4742,10 @@ func (m *Mount) Size() (n int) {
|
||||||
l = m.SSHOpt.Size()
|
l = m.SSHOpt.Size()
|
||||||
n += 2 + l + sovOps(uint64(l))
|
n += 2 + l + sovOps(uint64(l))
|
||||||
}
|
}
|
||||||
|
l = len(m.ResultID)
|
||||||
|
if l > 0 {
|
||||||
|
n += 2 + l + sovOps(uint64(l))
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6359,6 +6401,38 @@ func (m *Meta) Unmarshal(dAtA []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 7:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowOps
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthOps
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthOps
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Hostname = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipOps(dAtA[iNdEx:])
|
skippy, err := skipOps(dAtA[iNdEx:])
|
||||||
|
@ -6661,6 +6735,38 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 23:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field ResultID", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowOps
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthOps
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthOps
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.ResultID = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipOps(dAtA[iNdEx:])
|
skippy, err := skipOps(dAtA[iNdEx:])
|
||||||
|
|
|
@ -57,6 +57,7 @@ message Meta {
|
||||||
string user = 4;
|
string user = 4;
|
||||||
ProxyEnv proxy_env = 5;
|
ProxyEnv proxy_env = 5;
|
||||||
repeated HostIP extraHosts = 6;
|
repeated HostIP extraHosts = 6;
|
||||||
|
string hostname = 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum NetMode {
|
enum NetMode {
|
||||||
|
@ -81,6 +82,7 @@ message Mount {
|
||||||
CacheOpt cacheOpt = 20;
|
CacheOpt cacheOpt = 20;
|
||||||
SecretOpt secretOpt = 21;
|
SecretOpt secretOpt = 21;
|
||||||
SSHOpt SSHOpt = 22;
|
SSHOpt SSHOpt = 22;
|
||||||
|
string resultID = 23;
|
||||||
}
|
}
|
||||||
|
|
||||||
// MountType defines a type of a mount from a supported set
|
// MountType defines a type of a mount from a supported set
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
package moby_buildkit_v1_apicaps
|
package moby_buildkit_v1_apicaps //nolint:golint
|
||||||
|
|
||||||
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto
|
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto
|
||||||
|
|
|
@ -1,11 +1,15 @@
|
||||||
package grpcerrors
|
package grpcerrors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/containerd/typeurl"
|
||||||
gogotypes "github.com/gogo/protobuf/types"
|
gogotypes "github.com/gogo/protobuf/types"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/golang/protobuf/ptypes"
|
|
||||||
"github.com/golang/protobuf/ptypes/any"
|
"github.com/golang/protobuf/ptypes/any"
|
||||||
"github.com/moby/buildkit/util/stack"
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
@ -29,8 +33,12 @@ func ToGRPC(err error) error {
|
||||||
st = status.New(Code(err), err.Error())
|
st = status.New(Code(err), err.Error())
|
||||||
}
|
}
|
||||||
if st.Code() != Code(err) {
|
if st.Code() != Code(err) {
|
||||||
|
code := Code(err)
|
||||||
|
if code == codes.OK {
|
||||||
|
code = codes.Unknown
|
||||||
|
}
|
||||||
pb := st.Proto()
|
pb := st.Proto()
|
||||||
pb.Code = int32(Code(err))
|
pb.Code = int32(code)
|
||||||
st = status.FromProto(pb)
|
st = status.FromProto(pb)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +55,7 @@ func ToGRPC(err error) error {
|
||||||
})
|
})
|
||||||
|
|
||||||
if len(details) > 0 {
|
if len(details) > 0 {
|
||||||
if st2, err := st.WithDetails(details...); err == nil {
|
if st2, err := withDetails(st, details...); err == nil {
|
||||||
st = st2
|
st = st2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -55,6 +63,26 @@ func ToGRPC(err error) error {
|
||||||
return st.Err()
|
return st.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func withDetails(s *status.Status, details ...proto.Message) (*status.Status, error) {
|
||||||
|
if s.Code() == codes.OK {
|
||||||
|
return nil, errors.New("no error details for status with code OK")
|
||||||
|
}
|
||||||
|
p := s.Proto()
|
||||||
|
for _, detail := range details {
|
||||||
|
url, err := typeurl.TypeURL(detail)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("ignoring typed error %T: not registered", detail)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dt, err := json.Marshal(detail)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p.Details = append(p.Details, &any.Any{TypeUrl: url, Value: dt})
|
||||||
|
}
|
||||||
|
return status.FromProto(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
func Code(err error) codes.Code {
|
func Code(err error) codes.Code {
|
||||||
if se, ok := err.(interface {
|
if se, ok := err.(interface {
|
||||||
Code() codes.Code
|
Code() codes.Code
|
||||||
|
@ -72,9 +100,10 @@ func Code(err error) codes.Code {
|
||||||
Unwrap() error
|
Unwrap() error
|
||||||
})
|
})
|
||||||
if ok {
|
if ok {
|
||||||
return Code(wrapped.Unwrap())
|
if err := wrapped.Unwrap(); err != nil {
|
||||||
|
return Code(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return status.FromContextError(err).Code()
|
return status.FromContextError(err).Code()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +125,9 @@ func AsGRPCStatus(err error) (*status.Status, bool) {
|
||||||
Unwrap() error
|
Unwrap() error
|
||||||
})
|
})
|
||||||
if ok {
|
if ok {
|
||||||
return AsGRPCStatus(wrapped.Unwrap())
|
if err := wrapped.Unwrap(); err != nil {
|
||||||
|
return AsGRPCStatus(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, false
|
return nil, false
|
||||||
|
@ -123,17 +154,9 @@ func FromGRPC(err error) error {
|
||||||
|
|
||||||
// details that we don't understand are copied as proto
|
// details that we don't understand are copied as proto
|
||||||
for _, d := range pb.Details {
|
for _, d := range pb.Details {
|
||||||
var m interface{}
|
m, err := typeurl.UnmarshalAny(gogoAny(d))
|
||||||
detail := &ptypes.DynamicAny{}
|
if err != nil {
|
||||||
if err := ptypes.UnmarshalAny(d, detail); err != nil {
|
continue
|
||||||
detail := &gogotypes.DynamicAny{}
|
|
||||||
if err := gogotypes.UnmarshalAny(gogoAny(d), detail); err != nil {
|
|
||||||
n.Details = append(n.Details, d)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
m = detail.Message
|
|
||||||
} else {
|
|
||||||
m = detail.Message
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch v := m.(type) {
|
switch v := m.(type) {
|
||||||
|
@ -144,7 +167,6 @@ func FromGRPC(err error) error {
|
||||||
default:
|
default:
|
||||||
n.Details = append(n.Details, d)
|
n.Details = append(n.Details, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = status.FromProto(n).Err()
|
err = status.FromProto(n).Err()
|
||||||
|
@ -159,6 +181,10 @@ func FromGRPC(err error) error {
|
||||||
err = d.WrapError(err)
|
err = d.WrapError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
stack.Helper()
|
||||||
|
}
|
||||||
|
|
||||||
return stack.Enable(err)
|
return stack.Enable(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,6 +193,10 @@ type withCode struct {
|
||||||
error
|
error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *withCode) Code() codes.Code {
|
||||||
|
return e.code
|
||||||
|
}
|
||||||
|
|
||||||
func (e *withCode) Unwrap() error {
|
func (e *withCode) Unwrap() error {
|
||||||
return e.error
|
return e.error
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,27 +2,53 @@ package grpcerrors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||||
resp, err = handler(ctx, req)
|
resp, err = handler(ctx, req)
|
||||||
|
oldErr := err
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
stack.Helper()
|
||||||
err = ToGRPC(err)
|
err = ToGRPC(err)
|
||||||
}
|
}
|
||||||
|
if oldErr != nil && err == nil {
|
||||||
|
logErr := errors.Wrap(err, "invalid grpc error conversion")
|
||||||
|
if os.Getenv("BUILDKIT_DEBUG_PANIC_ON_ERROR") == "1" {
|
||||||
|
panic(logErr)
|
||||||
|
}
|
||||||
|
log.Printf("%v", logErr)
|
||||||
|
err = oldErr
|
||||||
|
}
|
||||||
|
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||||
return ToGRPC(handler(srv, ss))
|
err := ToGRPC(handler(srv, ss))
|
||||||
|
if err != nil {
|
||||||
|
stack.Helper()
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||||
return FromGRPC(invoker(ctx, method, req, reply, cc, opts...))
|
err := FromGRPC(invoker(ctx, method, req, reply, cc, opts...))
|
||||||
|
if err != nil {
|
||||||
|
stack.Helper()
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||||
s, err := streamer(ctx, desc, cc, method, opts...)
|
s, err := streamer(ctx, desc, cc, method, opts...)
|
||||||
|
if err != nil {
|
||||||
|
stack.Helper()
|
||||||
|
}
|
||||||
return s, ToGRPC(err)
|
return s, ToGRPC(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,6 @@ type MultiWriter struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
items []*Progress
|
items []*Progress
|
||||||
writers map[rawProgressWriter]struct{}
|
writers map[rawProgressWriter]struct{}
|
||||||
done bool
|
|
||||||
meta map[string]interface{}
|
meta map[string]interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -62,6 +62,11 @@ func WithMetadata(key string, val interface{}) WriterOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Controller interface {
|
||||||
|
Start(context.Context) (context.Context, func(error))
|
||||||
|
Status(id string, action string) func()
|
||||||
|
}
|
||||||
|
|
||||||
type Writer interface {
|
type Writer interface {
|
||||||
Write(id string, value interface{}) error
|
Write(id string, value interface{}) error
|
||||||
Close() error
|
Close() error
|
||||||
|
|
106
vendor/github.com/moby/buildkit/util/progress/progresswriter/multiwriter.go
generated
vendored
Normal file
106
vendor/github.com/moby/buildkit/util/progress/progresswriter/multiwriter.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
package progresswriter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MultiWriter struct {
|
||||||
|
w Writer
|
||||||
|
eg *errgroup.Group
|
||||||
|
once sync.Once
|
||||||
|
ready chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MultiWriter) WithPrefix(pfx string, force bool) Writer {
|
||||||
|
in := make(chan *client.SolveStatus)
|
||||||
|
out := mw.w.Status()
|
||||||
|
p := &prefixed{
|
||||||
|
main: mw.w,
|
||||||
|
in: in,
|
||||||
|
}
|
||||||
|
mw.eg.Go(func() error {
|
||||||
|
mw.once.Do(func() {
|
||||||
|
close(mw.ready)
|
||||||
|
})
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case v, ok := <-in:
|
||||||
|
if ok {
|
||||||
|
if force {
|
||||||
|
for _, v := range v.Vertexes {
|
||||||
|
v.Name = addPrefix(pfx, v.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out <- v
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case <-mw.Done():
|
||||||
|
return mw.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MultiWriter) Done() <-chan struct{} {
|
||||||
|
return mw.w.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MultiWriter) Err() error {
|
||||||
|
return mw.w.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mw *MultiWriter) Status() chan *client.SolveStatus {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type prefixed struct {
|
||||||
|
main Writer
|
||||||
|
in chan *client.SolveStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *prefixed) Done() <-chan struct{} {
|
||||||
|
return p.main.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *prefixed) Err() error {
|
||||||
|
return p.main.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *prefixed) Status() chan *client.SolveStatus {
|
||||||
|
return p.in
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMultiWriter(pw Writer) *MultiWriter {
|
||||||
|
if pw == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
eg, _ := errgroup.WithContext(context.TODO())
|
||||||
|
|
||||||
|
ready := make(chan struct{})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ready
|
||||||
|
eg.Wait()
|
||||||
|
close(pw.Status())
|
||||||
|
}()
|
||||||
|
|
||||||
|
return &MultiWriter{
|
||||||
|
w: pw,
|
||||||
|
eg: eg,
|
||||||
|
ready: ready,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addPrefix(pfx, name string) string {
|
||||||
|
if strings.HasPrefix(name, "[") {
|
||||||
|
return "[" + pfx + " " + name[1:]
|
||||||
|
}
|
||||||
|
return "[" + pfx + "] " + name
|
||||||
|
}
|
94
vendor/github.com/moby/buildkit/util/progress/progresswriter/printer.go
generated
vendored
Normal file
94
vendor/github.com/moby/buildkit/util/progress/progresswriter/printer.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
package progresswriter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containerd/console"
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/util/progress/progressui"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type printer struct {
|
||||||
|
status chan *client.SolveStatus
|
||||||
|
done <-chan struct{}
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *printer) Done() <-chan struct{} {
|
||||||
|
return p.done
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *printer) Err() error {
|
||||||
|
return p.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *printer) Status() chan *client.SolveStatus {
|
||||||
|
if p == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return p.status
|
||||||
|
}
|
||||||
|
|
||||||
|
type tee struct {
|
||||||
|
Writer
|
||||||
|
status chan *client.SolveStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tee) Status() chan *client.SolveStatus {
|
||||||
|
return t.status
|
||||||
|
}
|
||||||
|
|
||||||
|
func Tee(w Writer, ch chan *client.SolveStatus) Writer {
|
||||||
|
st := make(chan *client.SolveStatus)
|
||||||
|
t := &tee{
|
||||||
|
status: st,
|
||||||
|
Writer: w,
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for v := range st {
|
||||||
|
w.Status() <- v
|
||||||
|
ch <- v
|
||||||
|
}
|
||||||
|
close(w.Status())
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPrinter(ctx context.Context, out console.File, mode string) (Writer, error) {
|
||||||
|
statusCh := make(chan *client.SolveStatus)
|
||||||
|
doneCh := make(chan struct{})
|
||||||
|
|
||||||
|
pw := &printer{
|
||||||
|
status: statusCh,
|
||||||
|
done: doneCh,
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && mode == "auto" {
|
||||||
|
mode = v
|
||||||
|
}
|
||||||
|
|
||||||
|
var c console.Console
|
||||||
|
switch mode {
|
||||||
|
case "auto", "tty", "":
|
||||||
|
if cons, err := console.ConsoleFromFile(out); err == nil {
|
||||||
|
c = cons
|
||||||
|
} else {
|
||||||
|
if mode == "tty" {
|
||||||
|
return nil, errors.Wrap(err, "failed to get console")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "plain":
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("invalid progress mode %s", mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// not using shared context to not disrupt display but let is finish reporting errors
|
||||||
|
pw.err = progressui.DisplaySolveStatus(ctx, "", c, out, statusCh)
|
||||||
|
close(doneCh)
|
||||||
|
}()
|
||||||
|
return pw, nil
|
||||||
|
}
|
93
vendor/github.com/moby/buildkit/util/progress/progresswriter/progress.go
generated
vendored
Normal file
93
vendor/github.com/moby/buildkit/util/progress/progresswriter/progress.go
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
package progresswriter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/identity"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Logger func(*client.SolveStatus)
|
||||||
|
|
||||||
|
type SubLogger interface {
|
||||||
|
Wrap(name string, fn func() error) error
|
||||||
|
Log(stream int, dt []byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Wrap(name string, l Logger, fn func(SubLogger) error) (err error) {
|
||||||
|
if l == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
dgst := digest.FromBytes([]byte(identity.NewID()))
|
||||||
|
tm := time.Now()
|
||||||
|
l(&client.SolveStatus{
|
||||||
|
Vertexes: []*client.Vertex{{
|
||||||
|
Digest: dgst,
|
||||||
|
Name: name,
|
||||||
|
Started: &tm,
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
tm2 := time.Now()
|
||||||
|
errMsg := ""
|
||||||
|
if err != nil {
|
||||||
|
errMsg = err.Error()
|
||||||
|
}
|
||||||
|
l(&client.SolveStatus{
|
||||||
|
Vertexes: []*client.Vertex{{
|
||||||
|
Digest: dgst,
|
||||||
|
Name: name,
|
||||||
|
Started: &tm,
|
||||||
|
Completed: &tm2,
|
||||||
|
Error: errMsg,
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
|
return fn(&subLogger{dgst, l})
|
||||||
|
}
|
||||||
|
|
||||||
|
type subLogger struct {
|
||||||
|
dgst digest.Digest
|
||||||
|
logger Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sl *subLogger) Wrap(name string, fn func() error) (err error) {
|
||||||
|
tm := time.Now()
|
||||||
|
sl.logger(&client.SolveStatus{
|
||||||
|
Statuses: []*client.VertexStatus{{
|
||||||
|
Vertex: sl.dgst,
|
||||||
|
ID: name,
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Started: &tm,
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
tm2 := time.Now()
|
||||||
|
sl.logger(&client.SolveStatus{
|
||||||
|
Statuses: []*client.VertexStatus{{
|
||||||
|
Vertex: sl.dgst,
|
||||||
|
ID: name,
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Started: &tm,
|
||||||
|
Completed: &tm2,
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
|
return fn()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sl *subLogger) Log(stream int, dt []byte) {
|
||||||
|
sl.logger(&client.SolveStatus{
|
||||||
|
Logs: []*client.VertexLog{{
|
||||||
|
Vertex: sl.dgst,
|
||||||
|
Stream: stream,
|
||||||
|
Data: dt,
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
}
|
71
vendor/github.com/moby/buildkit/util/progress/progresswriter/reset.go
generated
vendored
Normal file
71
vendor/github.com/moby/buildkit/util/progress/progresswriter/reset.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package progresswriter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ResetTime(in Writer) Writer {
|
||||||
|
w := &pw{Writer: in, status: make(chan *client.SolveStatus), tm: time.Now()}
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-in.Done():
|
||||||
|
return
|
||||||
|
case st, ok := <-w.status:
|
||||||
|
if !ok {
|
||||||
|
close(in.Status())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if w.diff == nil {
|
||||||
|
for _, v := range st.Vertexes {
|
||||||
|
if v.Started != nil {
|
||||||
|
d := v.Started.Sub(w.tm)
|
||||||
|
w.diff = &d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if w.diff != nil {
|
||||||
|
for _, v := range st.Vertexes {
|
||||||
|
if v.Started != nil {
|
||||||
|
d := v.Started.Add(-*w.diff)
|
||||||
|
v.Started = &d
|
||||||
|
}
|
||||||
|
if v.Completed != nil {
|
||||||
|
d := v.Completed.Add(-*w.diff)
|
||||||
|
v.Completed = &d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, v := range st.Statuses {
|
||||||
|
if v.Started != nil {
|
||||||
|
d := v.Started.Add(-*w.diff)
|
||||||
|
v.Started = &d
|
||||||
|
}
|
||||||
|
if v.Completed != nil {
|
||||||
|
d := v.Completed.Add(-*w.diff)
|
||||||
|
v.Completed = &d
|
||||||
|
}
|
||||||
|
v.Timestamp = v.Timestamp.Add(-*w.diff)
|
||||||
|
}
|
||||||
|
for _, v := range st.Logs {
|
||||||
|
v.Timestamp = v.Timestamp.Add(-*w.diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
in.Status() <- st
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
type pw struct {
|
||||||
|
Writer
|
||||||
|
tm time.Time
|
||||||
|
diff *time.Duration
|
||||||
|
status chan *client.SolveStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *pw) Status() chan *client.SolveStatus {
|
||||||
|
return p.status
|
||||||
|
}
|
46
vendor/github.com/moby/buildkit/util/progress/progresswriter/writer.go
generated
vendored
Normal file
46
vendor/github.com/moby/buildkit/util/progress/progresswriter/writer.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
package progresswriter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/identity"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Writer interface {
|
||||||
|
Done() <-chan struct{}
|
||||||
|
Err() error
|
||||||
|
Status() chan *client.SolveStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func Write(w Writer, name string, f func() error) {
|
||||||
|
status := w.Status()
|
||||||
|
dgst := digest.FromBytes([]byte(identity.NewID()))
|
||||||
|
tm := time.Now()
|
||||||
|
|
||||||
|
vtx := client.Vertex{
|
||||||
|
Digest: dgst,
|
||||||
|
Name: name,
|
||||||
|
Started: &tm,
|
||||||
|
}
|
||||||
|
|
||||||
|
status <- &client.SolveStatus{
|
||||||
|
Vertexes: []*client.Vertex{&vtx},
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if f != nil {
|
||||||
|
err = f()
|
||||||
|
}
|
||||||
|
|
||||||
|
tm2 := time.Now()
|
||||||
|
vtx2 := vtx
|
||||||
|
vtx2.Completed = &tm2
|
||||||
|
if err != nil {
|
||||||
|
vtx2.Error = err.Error()
|
||||||
|
}
|
||||||
|
status <- &client.SolveStatus{
|
||||||
|
Vertexes: []*client.Vertex{&vtx2},
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
package sshutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultPort = 22
|
||||||
|
|
||||||
|
var errCallbackDone = fmt.Errorf("callback failed on purpose")
|
||||||
|
|
||||||
|
// addDefaultPort appends a default port if hostport doesn't contain one
|
||||||
|
func addDefaultPort(hostport string, defaultPort int) string {
|
||||||
|
_, _, err := net.SplitHostPort(hostport)
|
||||||
|
if err == nil {
|
||||||
|
return hostport
|
||||||
|
}
|
||||||
|
hostport = net.JoinHostPort(hostport, strconv.Itoa(defaultPort))
|
||||||
|
return hostport
|
||||||
|
}
|
||||||
|
|
||||||
|
// SshKeyScan scans a ssh server for the hostkey; server should be in the form hostname, or hostname:port
|
||||||
|
func SSHKeyScan(server string) (string, error) {
|
||||||
|
var key string
|
||||||
|
KeyScanCallback := func(hostport string, remote net.Addr, pubKey ssh.PublicKey) error {
|
||||||
|
hostname, _, err := net.SplitHostPort(hostport)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key = strings.TrimSpace(fmt.Sprintf("%s %s", hostname, string(ssh.MarshalAuthorizedKey(pubKey))))
|
||||||
|
return errCallbackDone
|
||||||
|
}
|
||||||
|
config := &ssh.ClientConfig{
|
||||||
|
HostKeyCallback: KeyScanCallback,
|
||||||
|
}
|
||||||
|
|
||||||
|
server = addDefaultPort(server, defaultPort)
|
||||||
|
conn, err := ssh.Dial("tcp", server, config)
|
||||||
|
if key != "" {
|
||||||
|
// as long as we get the key, the function worked
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if conn != nil {
|
||||||
|
conn.Close()
|
||||||
|
}
|
||||||
|
return key, err
|
||||||
|
}
|
|
@ -4,12 +4,24 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
io "io"
|
io "io"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/typeurl"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var helpers map[string]struct{}
|
||||||
|
var helpersMu sync.RWMutex
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
typeurl.Register((*Stack)(nil), "github.com/moby/buildkit", "stack.Stack+json")
|
||||||
|
|
||||||
|
helpers = map[string]struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
var version string
|
var version string
|
||||||
var revision string
|
var revision string
|
||||||
|
|
||||||
|
@ -18,6 +30,19 @@ func SetVersionInfo(v, r string) {
|
||||||
revision = r
|
revision = r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Helper() {
|
||||||
|
var pc [1]uintptr
|
||||||
|
n := runtime.Callers(2, pc[:])
|
||||||
|
if n == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
frames := runtime.CallersFrames(pc[:n])
|
||||||
|
frame, _ := frames.Next()
|
||||||
|
helpersMu.Lock()
|
||||||
|
helpers[frame.Function] = struct{}{}
|
||||||
|
helpersMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func Traces(err error) []*Stack {
|
func Traces(err error) []*Stack {
|
||||||
var st []*Stack
|
var st []*Stack
|
||||||
|
|
||||||
|
@ -47,6 +72,7 @@ func Enable(err error) error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
Helper()
|
||||||
if !hasLocalStackTrace(err) {
|
if !hasLocalStackTrace(err) {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
@ -107,6 +133,8 @@ func (w *formatter) Format(s fmt.State, verb rune) {
|
||||||
|
|
||||||
func convertStack(s errors.StackTrace) *Stack {
|
func convertStack(s errors.StackTrace) *Stack {
|
||||||
var out Stack
|
var out Stack
|
||||||
|
helpersMu.RLock()
|
||||||
|
defer helpersMu.RUnlock()
|
||||||
for _, f := range s {
|
for _, f := range s {
|
||||||
dt, err := f.MarshalText()
|
dt, err := f.MarshalText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -116,6 +144,9 @@ func convertStack(s errors.StackTrace) *Stack {
|
||||||
if len(p) != 2 {
|
if len(p) != 2 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if _, ok := helpers[p[0]]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
idx := strings.LastIndexByte(p[1], ':')
|
idx := strings.LastIndexByte(p[1], ':')
|
||||||
if idx == -1 {
|
if idx == -1 {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
// DefaultPathEnvUnix is unix style list of directories to search for
|
||||||
|
// executables. Each directory is separated from the next by a colon
|
||||||
|
// ':' character .
|
||||||
|
const DefaultPathEnvUnix = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||||
|
|
||||||
|
// DefaultPathEnvWindows is windows style list of directories to search for
|
||||||
|
// executables. Each directory is separated from the next by a colon
|
||||||
|
// ';' character .
|
||||||
|
const DefaultPathEnvWindows = "c:\\Windows\\System32;c:\\Windows"
|
||||||
|
|
||||||
|
func DefaultPathEnv(os string) string {
|
||||||
|
if os == "windows" {
|
||||||
|
return DefaultPathEnvWindows
|
||||||
|
}
|
||||||
|
return DefaultPathEnvUnix
|
||||||
|
}
|
|
@ -2,11 +2,6 @@
|
||||||
|
|
||||||
package system
|
package system
|
||||||
|
|
||||||
// DefaultPathEnv is unix style list of directories to search for
|
|
||||||
// executables. Each directory is separated from the next by a colon
|
|
||||||
// ':' character .
|
|
||||||
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
|
||||||
|
|
||||||
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
|
||||||
// is the system drive. This is a no-op on Linux.
|
// is the system drive. This is a no-op on Linux.
|
||||||
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
|
||||||
|
|
|
@ -8,10 +8,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
|
|
||||||
// the container. Docker has no context of what the default path should be.
|
|
||||||
const DefaultPathEnv = ""
|
|
||||||
|
|
||||||
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
|
||||||
// This is used, for example, when validating a user provided path in docker cp.
|
// This is used, for example, when validating a user provided path in docker cp.
|
||||||
// If a drive letter is supplied, it must be the system drive. The drive letter
|
// If a drive letter is supplied, it must be the system drive. The drive letter
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,267 @@
|
||||||
|
# OpenCensus Libraries for Go
|
||||||
|
|
||||||
|
[![Build Status][travis-image]][travis-url]
|
||||||
|
[![Windows Build Status][appveyor-image]][appveyor-url]
|
||||||
|
[![GoDoc][godoc-image]][godoc-url]
|
||||||
|
[![Gitter chat][gitter-image]][gitter-url]
|
||||||
|
|
||||||
|
OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
|
||||||
|
collecting application performance and behavior monitoring data.
|
||||||
|
Currently it consists of three major components: tags, stats and tracing.
|
||||||
|
|
||||||
|
#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289).
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get -u go.opencensus.io
|
||||||
|
```
|
||||||
|
|
||||||
|
The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
|
||||||
|
The use of vendoring or a dependency management tool is recommended.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
OpenCensus Go libraries require Go 1.8 or later.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
The easiest way to get started using OpenCensus in your application is to use an existing
|
||||||
|
integration with your RPC framework:
|
||||||
|
|
||||||
|
* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
|
||||||
|
* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
|
||||||
|
* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql)
|
||||||
|
* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
|
||||||
|
* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
|
||||||
|
* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
|
||||||
|
* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
|
||||||
|
* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
|
||||||
|
* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
|
||||||
|
* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
|
||||||
|
|
||||||
|
If you're using a framework not listed here, you could either implement your own middleware for your
|
||||||
|
framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
|
||||||
|
|
||||||
|
## Exporters
|
||||||
|
|
||||||
|
OpenCensus can export instrumentation data to various backends.
|
||||||
|
OpenCensus has exporter implementations for the following, users
|
||||||
|
can implement their own exporters by implementing the exporter interfaces
|
||||||
|
([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
|
||||||
|
[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
|
||||||
|
|
||||||
|
* [Prometheus][exporter-prom] for stats
|
||||||
|
* [OpenZipkin][exporter-zipkin] for traces
|
||||||
|
* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
|
||||||
|
* [Jaeger][exporter-jaeger] for traces
|
||||||
|
* [AWS X-Ray][exporter-xray] for traces
|
||||||
|
* [Datadog][exporter-datadog] for stats and traces
|
||||||
|
* [Graphite][exporter-graphite] for stats
|
||||||
|
* [Honeycomb][exporter-honeycomb] for traces
|
||||||
|
* [New Relic][exporter-newrelic] for stats and traces
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg)
|
||||||
|
|
||||||
|
In a microservices environment, a user request may go through
|
||||||
|
multiple services until there is a response. OpenCensus allows
|
||||||
|
you to instrument your services and collect diagnostics data all
|
||||||
|
through your services end-to-end.
|
||||||
|
|
||||||
|
## Tags
|
||||||
|
|
||||||
|
Tags represent propagated key-value pairs. They are propagated using `context.Context`
|
||||||
|
in the same process or can be encoded to be transmitted on the wire. Usually, this will
|
||||||
|
be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
|
||||||
|
for gRPC.
|
||||||
|
|
||||||
|
Package `tag` allows adding or modifying tags in the current context.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/tags.go new)
|
||||||
|
```go
|
||||||
|
ctx, err := tag.New(ctx,
|
||||||
|
tag.Insert(osKey, "macOS-10.12.5"),
|
||||||
|
tag.Upsert(userIDKey, "cde36753ed"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Stats
|
||||||
|
|
||||||
|
OpenCensus is a low-overhead framework even if instrumentation is always enabled.
|
||||||
|
In order to be so, it is optimized to make recording of data points fast
|
||||||
|
and separate from the data aggregation.
|
||||||
|
|
||||||
|
OpenCensus stats collection happens in two stages:
|
||||||
|
|
||||||
|
* Definition of measures and recording of data points
|
||||||
|
* Definition of views and aggregation of the recorded data
|
||||||
|
|
||||||
|
### Recording
|
||||||
|
|
||||||
|
Measurements are data points associated with a measure.
|
||||||
|
Recording implicitly tags the set of Measurements with the tags from the
|
||||||
|
provided context:
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/stats.go record)
|
||||||
|
```go
|
||||||
|
stats.Record(ctx, videoSize.M(102478))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Views
|
||||||
|
|
||||||
|
Views are how Measures are aggregated. You can think of them as queries over the
|
||||||
|
set of recorded data points (measurements).
|
||||||
|
|
||||||
|
Views have two parts: the tags to group by and the aggregation type used.
|
||||||
|
|
||||||
|
Currently three types of aggregations are supported:
|
||||||
|
* CountAggregation is used to count the number of times a sample was recorded.
|
||||||
|
* DistributionAggregation is used to provide a histogram of the values of the samples.
|
||||||
|
* SumAggregation is used to sum up all sample values.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/stats.go aggs)
|
||||||
|
```go
|
||||||
|
distAgg := view.Distribution(1<<32, 2<<32, 3<<32)
|
||||||
|
countAgg := view.Count()
|
||||||
|
sumAgg := view.Sum()
|
||||||
|
```
|
||||||
|
|
||||||
|
Here we create a view with the DistributionAggregation over our measure.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/stats.go view)
|
||||||
|
```go
|
||||||
|
if err := view.Register(&view.View{
|
||||||
|
Name: "example.com/video_size_distribution",
|
||||||
|
Description: "distribution of processed video size over time",
|
||||||
|
Measure: videoSize,
|
||||||
|
Aggregation: view.Distribution(1<<32, 2<<32, 3<<32),
|
||||||
|
}); err != nil {
|
||||||
|
log.Fatalf("Failed to register view: %v", err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Register begins collecting data for the view. Registered views' data will be
|
||||||
|
exported via the registered exporters.
|
||||||
|
|
||||||
|
## Traces
|
||||||
|
|
||||||
|
A distributed trace tracks the progression of a single user request as
|
||||||
|
it is handled by the services and processes that make up an application.
|
||||||
|
Each step is called a span in the trace. Spans include metadata about the step,
|
||||||
|
including especially the time spent in the step, called the span’s latency.
|
||||||
|
|
||||||
|
Below you see a trace and several spans underneath it.
|
||||||
|
|
||||||
|
![Traces and spans](https://i.imgur.com/7hZwRVj.png)
|
||||||
|
|
||||||
|
### Spans
|
||||||
|
|
||||||
|
Span is the unit step in a trace. Each span has a name, latency, status and
|
||||||
|
additional metadata.
|
||||||
|
|
||||||
|
Below we are starting a span for a cache read and ending it
|
||||||
|
when we are done:
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/trace.go startend)
|
||||||
|
```go
|
||||||
|
ctx, span := trace.StartSpan(ctx, "cache.Get")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
// Do work to get from cache.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Propagation
|
||||||
|
|
||||||
|
Spans can have parents or can be root spans if they don't have any parents.
|
||||||
|
The current span is propagated in-process and across the network to allow associating
|
||||||
|
new child spans with the parent.
|
||||||
|
|
||||||
|
In the same process, `context.Context` is used to propagate spans.
|
||||||
|
`trace.StartSpan` creates a new span as a root if the current context
|
||||||
|
doesn't contain a span. Or, it creates a child of the span that is
|
||||||
|
already in current context. The returned context can be used to keep
|
||||||
|
propagating the newly created span in the current context.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/trace.go startend)
|
||||||
|
```go
|
||||||
|
ctx, span := trace.StartSpan(ctx, "cache.Get")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
// Do work to get from cache.
|
||||||
|
```
|
||||||
|
|
||||||
|
Across the network, OpenCensus provides different propagation
|
||||||
|
methods for different protocols.
|
||||||
|
|
||||||
|
* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
|
||||||
|
* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
|
||||||
|
by default but can be configured to use a custom propagation method by setting another
|
||||||
|
[propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
|
||||||
|
|
||||||
|
## Execution Tracer
|
||||||
|
|
||||||
|
With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
|
||||||
|
See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
|
||||||
|
for an example of their mutual use.
|
||||||
|
|
||||||
|
## Profiles
|
||||||
|
|
||||||
|
OpenCensus tags can be applied as profiler labels
|
||||||
|
for users who are on Go 1.9 and above.
|
||||||
|
|
||||||
|
[embedmd]:# (internal/readme/tags.go profiler)
|
||||||
|
```go
|
||||||
|
ctx, err = tag.New(ctx,
|
||||||
|
tag.Insert(osKey, "macOS-10.12.5"),
|
||||||
|
tag.Insert(userIDKey, "fff0989878"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
tag.Do(ctx, func(ctx context.Context) {
|
||||||
|
// Do work.
|
||||||
|
// When profiling is on, samples will be
|
||||||
|
// recorded with the key/values from the tag map.
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
A screenshot of the CPU profile from the program above:
|
||||||
|
|
||||||
|
![CPU profile](https://i.imgur.com/jBKjlkw.png)
|
||||||
|
|
||||||
|
## Deprecation Policy
|
||||||
|
|
||||||
|
Before version 1.0.0, the following deprecation policy will be observed:
|
||||||
|
|
||||||
|
No backwards-incompatible changes will be made except for the removal of symbols that have
|
||||||
|
been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
|
||||||
|
removing the *Deprecated* functionality will be made no sooner than 28 days after the first
|
||||||
|
release in which the functionality was marked *Deprecated*.
|
||||||
|
|
||||||
|
[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
|
||||||
|
[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
|
||||||
|
[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
|
||||||
|
[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
|
||||||
|
[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
|
||||||
|
[godoc-url]: https://godoc.org/go.opencensus.io
|
||||||
|
[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
|
||||||
|
[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
||||||
|
|
||||||
|
|
||||||
|
[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
|
||||||
|
[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
|
||||||
|
|
||||||
|
[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus
|
||||||
|
[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
|
||||||
|
[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin
|
||||||
|
[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger
|
||||||
|
[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws
|
||||||
|
[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog
|
||||||
|
[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite
|
||||||
|
[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter
|
||||||
|
[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go
|
|
@ -0,0 +1,15 @@
|
||||||
|
module go.opencensus.io
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6
|
||||||
|
github.com/golang/protobuf v1.3.1
|
||||||
|
github.com/google/go-cmp v0.3.0
|
||||||
|
github.com/stretchr/testify v1.4.0
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect
|
||||||
|
golang.org/x/text v0.3.2 // indirect
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect
|
||||||
|
google.golang.org/grpc v1.20.1
|
||||||
|
)
|
||||||
|
|
||||||
|
go 1.13
|
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal // import "go.opencensus.io/internal"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
opencensus "go.opencensus.io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UserAgent is the user agent to be added to the outgoing
|
||||||
|
// requests from the exporters.
|
||||||
|
var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version())
|
||||||
|
|
||||||
|
// MonotonicEndTime returns the end time at present
|
||||||
|
// but offset from start, monotonically.
|
||||||
|
//
|
||||||
|
// The monotonic clock is used in subtractions hence
|
||||||
|
// the duration since start added back to start gives
|
||||||
|
// end as a monotonic time.
|
||||||
|
// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
|
||||||
|
func MonotonicEndTime(start time.Time) time.Time {
|
||||||
|
return start.Add(time.Since(start))
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
const labelKeySizeLimit = 100
|
||||||
|
|
||||||
|
// Sanitize returns a string that is trunacated to 100 characters if it's too
|
||||||
|
// long, and replaces non-alphanumeric characters to underscores.
|
||||||
|
func Sanitize(s string) string {
|
||||||
|
if len(s) == 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
if len(s) > labelKeySizeLimit {
|
||||||
|
s = s[:labelKeySizeLimit]
|
||||||
|
}
|
||||||
|
s = strings.Map(sanitizeRune, s)
|
||||||
|
if unicode.IsDigit(rune(s[0])) {
|
||||||
|
s = "key_" + s
|
||||||
|
}
|
||||||
|
if s[0] == '_' {
|
||||||
|
s = "key" + s
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// converts anything that is not a letter or digit to an underscore
|
||||||
|
func sanitizeRune(r rune) rune {
|
||||||
|
if unicode.IsLetter(r) || unicode.IsDigit(r) {
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
// Everything else turns into an underscore
|
||||||
|
return '_'
|
||||||
|
}
|
|
@ -0,0 +1,53 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Trace allows internal access to some trace functionality.
|
||||||
|
// TODO(#412): remove this
|
||||||
|
var Trace interface{}
|
||||||
|
|
||||||
|
// LocalSpanStoreEnabled true if the local span store is enabled.
|
||||||
|
var LocalSpanStoreEnabled bool
|
||||||
|
|
||||||
|
// BucketConfiguration stores the number of samples to store for span buckets
|
||||||
|
// for successful and failed spans for a particular span name.
|
||||||
|
type BucketConfiguration struct {
|
||||||
|
Name string
|
||||||
|
MaxRequestsSucceeded int
|
||||||
|
MaxRequestsErrors int
|
||||||
|
}
|
||||||
|
|
||||||
|
// PerMethodSummary is a summary of the spans stored for a single span name.
|
||||||
|
type PerMethodSummary struct {
|
||||||
|
Active int
|
||||||
|
LatencyBuckets []LatencyBucketSummary
|
||||||
|
ErrorBuckets []ErrorBucketSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
// LatencyBucketSummary is a summary of a latency bucket.
|
||||||
|
type LatencyBucketSummary struct {
|
||||||
|
MinLatency, MaxLatency time.Duration
|
||||||
|
Size int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorBucketSummary is a summary of an error bucket.
|
||||||
|
type ErrorBucketSummary struct {
|
||||||
|
ErrorCode int32
|
||||||
|
Size int
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package opencensus contains Go support for OpenCensus.
|
||||||
|
package opencensus // import "go.opencensus.io"
|
||||||
|
|
||||||
|
// Version is the current release version of OpenCensus in use.
|
||||||
|
func Version() string {
|
||||||
|
return "0.23.0"
|
||||||
|
}
|
|
@ -0,0 +1,119 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// TraceID is a 16-byte identifier for a set of spans.
|
||||||
|
TraceID [16]byte
|
||||||
|
|
||||||
|
// SpanID is an 8-byte identifier for a single span.
|
||||||
|
SpanID [8]byte
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t TraceID) String() string {
|
||||||
|
return fmt.Sprintf("%02x", t[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s SpanID) String() string {
|
||||||
|
return fmt.Sprintf("%02x", s[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Annotation represents a text annotation with a set of attributes and a timestamp.
|
||||||
|
type Annotation struct {
|
||||||
|
Time time.Time
|
||||||
|
Message string
|
||||||
|
Attributes map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attribute represents a key-value pair on a span, link or annotation.
|
||||||
|
// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute.
|
||||||
|
type Attribute struct {
|
||||||
|
key string
|
||||||
|
value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolAttribute returns a bool-valued attribute.
|
||||||
|
func BoolAttribute(key string, value bool) Attribute {
|
||||||
|
return Attribute{key: key, value: value}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Attribute returns an int64-valued attribute.
|
||||||
|
func Int64Attribute(key string, value int64) Attribute {
|
||||||
|
return Attribute{key: key, value: value}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Attribute returns a float64-valued attribute.
|
||||||
|
func Float64Attribute(key string, value float64) Attribute {
|
||||||
|
return Attribute{key: key, value: value}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringAttribute returns a string-valued attribute.
|
||||||
|
func StringAttribute(key string, value string) Attribute {
|
||||||
|
return Attribute{key: key, value: value}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LinkType specifies the relationship between the span that had the link
|
||||||
|
// added, and the linked span.
|
||||||
|
type LinkType int32
|
||||||
|
|
||||||
|
// LinkType values.
|
||||||
|
const (
|
||||||
|
LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown.
|
||||||
|
LinkTypeChild // The linked span is a child of the current span.
|
||||||
|
LinkTypeParent // The linked span is the parent of the current span.
|
||||||
|
)
|
||||||
|
|
||||||
|
// Link represents a reference from one span to another span.
|
||||||
|
type Link struct {
|
||||||
|
TraceID TraceID
|
||||||
|
SpanID SpanID
|
||||||
|
Type LinkType
|
||||||
|
// Attributes is a set of attributes on the link.
|
||||||
|
Attributes map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageEventType specifies the type of message event.
|
||||||
|
type MessageEventType int32
|
||||||
|
|
||||||
|
// MessageEventType values.
|
||||||
|
const (
|
||||||
|
MessageEventTypeUnspecified MessageEventType = iota // Unknown event type.
|
||||||
|
MessageEventTypeSent // Indicates a sent RPC message.
|
||||||
|
MessageEventTypeRecv // Indicates a received RPC message.
|
||||||
|
)
|
||||||
|
|
||||||
|
// MessageEvent represents an event describing a message sent or received on the network.
|
||||||
|
type MessageEvent struct {
|
||||||
|
Time time.Time
|
||||||
|
EventType MessageEventType
|
||||||
|
MessageID int64
|
||||||
|
UncompressedByteSize int64
|
||||||
|
CompressedByteSize int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status is the status of a Span.
|
||||||
|
type Status struct {
|
||||||
|
// Code is a status code. Zero indicates success.
|
||||||
|
//
|
||||||
|
// If Code will be propagated to Google APIs, it ideally should be a value from
|
||||||
|
// https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto .
|
||||||
|
Code int32
|
||||||
|
Message string
|
||||||
|
}
|
|
@ -0,0 +1,86 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"go.opencensus.io/trace/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config represents the global tracing configuration.
|
||||||
|
type Config struct {
|
||||||
|
// DefaultSampler is the default sampler used when creating new spans.
|
||||||
|
DefaultSampler Sampler
|
||||||
|
|
||||||
|
// IDGenerator is for internal use only.
|
||||||
|
IDGenerator internal.IDGenerator
|
||||||
|
|
||||||
|
// MaxAnnotationEventsPerSpan is max number of annotation events per span
|
||||||
|
MaxAnnotationEventsPerSpan int
|
||||||
|
|
||||||
|
// MaxMessageEventsPerSpan is max number of message events per span
|
||||||
|
MaxMessageEventsPerSpan int
|
||||||
|
|
||||||
|
// MaxAnnotationEventsPerSpan is max number of attributes per span
|
||||||
|
MaxAttributesPerSpan int
|
||||||
|
|
||||||
|
// MaxLinksPerSpan is max number of links per span
|
||||||
|
MaxLinksPerSpan int
|
||||||
|
}
|
||||||
|
|
||||||
|
var configWriteMu sync.Mutex
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span
|
||||||
|
DefaultMaxAnnotationEventsPerSpan = 32
|
||||||
|
|
||||||
|
// DefaultMaxMessageEventsPerSpan is default max number of message events per span
|
||||||
|
DefaultMaxMessageEventsPerSpan = 128
|
||||||
|
|
||||||
|
// DefaultMaxAttributesPerSpan is default max number of attributes per span
|
||||||
|
DefaultMaxAttributesPerSpan = 32
|
||||||
|
|
||||||
|
// DefaultMaxLinksPerSpan is default max number of links per span
|
||||||
|
DefaultMaxLinksPerSpan = 32
|
||||||
|
)
|
||||||
|
|
||||||
|
// ApplyConfig applies changes to the global tracing configuration.
|
||||||
|
//
|
||||||
|
// Fields not provided in the given config are going to be preserved.
|
||||||
|
func ApplyConfig(cfg Config) {
|
||||||
|
configWriteMu.Lock()
|
||||||
|
defer configWriteMu.Unlock()
|
||||||
|
c := *config.Load().(*Config)
|
||||||
|
if cfg.DefaultSampler != nil {
|
||||||
|
c.DefaultSampler = cfg.DefaultSampler
|
||||||
|
}
|
||||||
|
if cfg.IDGenerator != nil {
|
||||||
|
c.IDGenerator = cfg.IDGenerator
|
||||||
|
}
|
||||||
|
if cfg.MaxAnnotationEventsPerSpan > 0 {
|
||||||
|
c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan
|
||||||
|
}
|
||||||
|
if cfg.MaxMessageEventsPerSpan > 0 {
|
||||||
|
c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan
|
||||||
|
}
|
||||||
|
if cfg.MaxAttributesPerSpan > 0 {
|
||||||
|
c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan
|
||||||
|
}
|
||||||
|
if cfg.MaxLinksPerSpan > 0 {
|
||||||
|
c.MaxLinksPerSpan = cfg.MaxLinksPerSpan
|
||||||
|
}
|
||||||
|
config.Store(&c)
|
||||||
|
}
|
|
@ -0,0 +1,53 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package trace contains support for OpenCensus distributed tracing.
|
||||||
|
|
||||||
|
The following assumes a basic familiarity with OpenCensus concepts.
|
||||||
|
See http://opencensus.io
|
||||||
|
|
||||||
|
|
||||||
|
Exporting Traces
|
||||||
|
|
||||||
|
To export collected tracing data, register at least one exporter. You can use
|
||||||
|
one of the provided exporters or write your own.
|
||||||
|
|
||||||
|
trace.RegisterExporter(exporter)
|
||||||
|
|
||||||
|
By default, traces will be sampled relatively rarely. To change the sampling
|
||||||
|
frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
|
||||||
|
to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
|
||||||
|
|
||||||
|
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||||
|
|
||||||
|
Be careful about using trace.AlwaysSample in a production application with
|
||||||
|
significant traffic: a new trace will be started and exported for every request.
|
||||||
|
|
||||||
|
Adding Spans to a Trace
|
||||||
|
|
||||||
|
A trace consists of a tree of spans. In Go, the current span is carried in a
|
||||||
|
context.Context.
|
||||||
|
|
||||||
|
It is common to want to capture all the activity of a function call in a span. For
|
||||||
|
this to work, the function must take a context.Context as a parameter. Add these two
|
||||||
|
lines to the top of the function:
|
||||||
|
|
||||||
|
ctx, span := trace.StartSpan(ctx, "example.com/Run")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
StartSpan will create a new top-level span if the context
|
||||||
|
doesn't contain another span, otherwise it will create a child span.
|
||||||
|
*/
|
||||||
|
package trace // import "go.opencensus.io/trace"
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright 2019, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package trace
|
||||||
|
|
||||||
|
type evictedQueue struct {
|
||||||
|
queue []interface{}
|
||||||
|
capacity int
|
||||||
|
droppedCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEvictedQueue(capacity int) *evictedQueue {
|
||||||
|
eq := &evictedQueue{
|
||||||
|
capacity: capacity,
|
||||||
|
queue: make([]interface{}, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
return eq
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eq *evictedQueue) add(value interface{}) {
|
||||||
|
if len(eq.queue) == eq.capacity {
|
||||||
|
eq.queue = eq.queue[1:]
|
||||||
|
eq.droppedCount++
|
||||||
|
}
|
||||||
|
eq.queue = append(eq.queue, value)
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
// Copyright 2017, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package trace
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exporter is a type for functions that receive sampled trace spans.
|
||||||
|
//
|
||||||
|
// The ExportSpan method should be safe for concurrent use and should return
|
||||||
|
// quickly; if an Exporter takes a significant amount of time to process a
|
||||||
|
// SpanData, that work should be done on another goroutine.
|
||||||
|
//
|
||||||
|
// The SpanData should not be modified, but a pointer to it can be kept.
|
||||||
|
type Exporter interface {
|
||||||
|
ExportSpan(s *SpanData)
|
||||||
|
}
|
||||||
|
|
||||||
|
type exportersMap map[Exporter]struct{}
|
||||||
|
|
||||||
|
var (
|
||||||
|
exporterMu sync.Mutex
|
||||||
|
exporters atomic.Value
|
||||||
|
)
|
||||||
|
|
||||||
|
// RegisterExporter adds to the list of Exporters that will receive sampled
|
||||||
|
// trace spans.
|
||||||
|
//
|
||||||
|
// Binaries can register exporters, libraries shouldn't register exporters.
|
||||||
|
func RegisterExporter(e Exporter) {
|
||||||
|
exporterMu.Lock()
|
||||||
|
new := make(exportersMap)
|
||||||
|
if old, ok := exporters.Load().(exportersMap); ok {
|
||||||
|
for k, v := range old {
|
||||||
|
new[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
new[e] = struct{}{}
|
||||||
|
exporters.Store(new)
|
||||||
|
exporterMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnregisterExporter removes from the list of Exporters the Exporter that was
|
||||||
|
// registered with the given name.
|
||||||
|
func UnregisterExporter(e Exporter) {
|
||||||
|
exporterMu.Lock()
|
||||||
|
new := make(exportersMap)
|
||||||
|
if old, ok := exporters.Load().(exportersMap); ok {
|
||||||
|
for k, v := range old {
|
||||||
|
new[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(new, e)
|
||||||
|
exporters.Store(new)
|
||||||
|
exporterMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanData contains all the information collected by a Span.
|
||||||
|
type SpanData struct {
|
||||||
|
SpanContext
|
||||||
|
ParentSpanID SpanID
|
||||||
|
SpanKind int
|
||||||
|
Name string
|
||||||
|
StartTime time.Time
|
||||||
|
// The wall clock time of EndTime will be adjusted to always be offset
|
||||||
|
// from StartTime by the duration of the span.
|
||||||
|
EndTime time.Time
|
||||||
|
// The values of Attributes each have type string, bool, or int64.
|
||||||
|
Attributes map[string]interface{}
|
||||||
|
Annotations []Annotation
|
||||||
|
MessageEvents []MessageEvent
|
||||||
|
Status
|
||||||
|
Links []Link
|
||||||
|
HasRemoteParent bool
|
||||||
|
DroppedAttributeCount int
|
||||||
|
DroppedAnnotationCount int
|
||||||
|
DroppedMessageEventCount int
|
||||||
|
DroppedLinkCount int
|
||||||
|
|
||||||
|
// ChildSpanCount holds the number of child span created for this span.
|
||||||
|
ChildSpanCount int
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
// Copyright 2018, OpenCensus Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package internal provides trace internals.
|
||||||
|
package internal
|
||||||
|
|
||||||
|
// IDGenerator allows custom generators for TraceId and SpanId.
|
||||||
|
type IDGenerator interface {
|
||||||
|
NewTraceID() [16]byte
|
||||||
|
NewSpanID() [8]byte
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue