mirror of https://github.com/docker/cli.git
Merge pull request #3248 from thaJeztah/update_grpc_proto
vendor: update protobuf, grpc
This commit is contained in:
commit
304a2dca5f
|
@ -26,7 +26,7 @@ github.com/gofrs/flock 6caa7350c26b838538005fae7dbe
|
|||
github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2
|
||||
github.com/gogo/protobuf b03c65ea87cdc3521ede29f62fe3ce239267c1bc # v1.3.2
|
||||
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
||||
github.com/golang/protobuf 84668698ea25b64748563aa20726db66a6b8d299 # v1.3.5
|
||||
github.com/golang/protobuf ae97035608a719c7a1c1c41bed0ae0744bdb0c6f # v1.5.2
|
||||
github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0
|
||||
github.com/google/gofuzz 24818f796faf91cd76ec7bddd72458fbced7a6c1
|
||||
github.com/google/shlex e7afc7fbc51079733e9468cdfd1efcd7d196cd1d
|
||||
|
@ -78,8 +78,9 @@ golang.org/x/sys d19ff857e887eacb631721f188c7
|
|||
golang.org/x/term f5c789dd3221ff39d752ac54467d762de7cfbec6
|
||||
golang.org/x/text 23ae387dee1f90d29a23c0e87ee0b46038fbed0e # v0.3.3
|
||||
golang.org/x/time 3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
|
||||
google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6c7065a3160e8
|
||||
google.golang.org/grpc f495f5b15ae7ccda3b38c53a1bfcde4c1a58a2bc # v1.27.1
|
||||
google.golang.org/genproto 8816d57aaa9ad8cba31b2a8ecb6199c494bdf8b4 # v0.0.0-20201110150050-8816d57aaa9a
|
||||
google.golang.org/grpc 0257c8657362b76f24e7a8cfb61df48d4cb735d3 # v1.38.0
|
||||
google.golang.org/protobuf f2d1f6cbe10b90d22296ea09a7217081c2798009 # v1.26.0
|
||||
gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
|
||||
gopkg.in/yaml.v2 53403b58ad1b561927d19068c655246f2db79d48 # v2.2.8
|
||||
gotest.tools/v3 bb0d8a963040ea5048dcef1a14d8f8b58a33d4b3 # v3.0.2
|
||||
|
|
|
@ -1,292 +1,122 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
# Go support for Protocol Buffers
|
||||
|
||||
[![GoDev](https://img.shields.io/static/v1?label=godev&message=reference&color=00add8)](https://pkg.go.dev/mod/github.com/golang/protobuf)
|
||||
[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
|
||||
[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf)
|
||||
|
||||
Google's data interchange format.
|
||||
Copyright 2010 The Go Authors.
|
||||
https://github.com/golang/protobuf
|
||||
This module
|
||||
([`github.com/golang/protobuf`](https://pkg.go.dev/mod/github.com/golang/protobuf))
|
||||
contains Go bindings for protocol buffers.
|
||||
|
||||
This package and the code it generates requires at least Go 1.9.
|
||||
It has been superseded by the
|
||||
[`google.golang.org/protobuf`](https://pkg.go.dev/mod/google.golang.org/protobuf)
|
||||
module, which contains an updated and simplified API,
|
||||
support for protobuf reflection, and many other improvements.
|
||||
We recommend that new code use the `google.golang.org/protobuf` module.
|
||||
|
||||
This software implements Go bindings for protocol buffers. For
|
||||
information about protocol buffers themselves, see
|
||||
https://developers.google.com/protocol-buffers/
|
||||
Versions v1.4 and later of `github.com/golang/protobuf` are implemented
|
||||
in terms of `google.golang.org/protobuf`.
|
||||
Programs which use both modules must use at least version v1.4 of this one.
|
||||
|
||||
## Installation ##
|
||||
See the
|
||||
[developer guide for protocol buffers in Go](https://developers.google.com/protocol-buffers/docs/gotutorial)
|
||||
for a general guide for how to get started using protobufs in Go.
|
||||
|
||||
To use this software, you must:
|
||||
- Install the standard C++ implementation of protocol buffers from
|
||||
https://developers.google.com/protocol-buffers/
|
||||
- Of course, install the Go compiler and tools from
|
||||
https://golang.org/
|
||||
See
|
||||
https://golang.org/doc/install
|
||||
for details or, if you are using gccgo, follow the instructions at
|
||||
https://golang.org/doc/install/gccgo
|
||||
- Grab the code from the repository and install the `proto` package.
|
||||
The simplest way is to run:
|
||||
```
|
||||
go get -u github.com/golang/protobuf/protoc-gen-go
|
||||
```
|
||||
The compiler plugin, `protoc-gen-go`, will be installed in `$GOPATH/bin`
|
||||
unless `$GOBIN` is set. It must be in your `$PATH` for the protocol
|
||||
compiler, `protoc`, to find it.
|
||||
- If you need a particular version of `protoc-gen-go` (e.g., to match your
|
||||
`proto` package version), one option is
|
||||
```shell
|
||||
GIT_TAG="v1.2.0" # change as needed
|
||||
go get -d -u github.com/golang/protobuf/protoc-gen-go
|
||||
git -C "$(go env GOPATH)"/src/github.com/golang/protobuf checkout $GIT_TAG
|
||||
go install github.com/golang/protobuf/protoc-gen-go
|
||||
```
|
||||
See
|
||||
[release note documentation](https://github.com/golang/protobuf/releases)
|
||||
for more information about individual releases of this project.
|
||||
|
||||
This software has two parts: a 'protocol compiler plugin' that
|
||||
generates Go source files that, once compiled, can access and manage
|
||||
protocol buffers; and a library that implements run-time support for
|
||||
encoding (marshaling), decoding (unmarshaling), and accessing protocol
|
||||
buffers.
|
||||
See
|
||||
[documentation for the next major revision](https://pkg.go.dev/mod/google.golang.org/protobuf)
|
||||
for more information about the purpose, usage, and history of this project.
|
||||
|
||||
There is support for gRPC in Go using protocol buffers.
|
||||
See the note at the bottom of this file for details.
|
||||
## Package index
|
||||
|
||||
There are no insertion points in the plugin.
|
||||
Summary of the packages provided by this module:
|
||||
|
||||
* [`proto`](https://pkg.go.dev/github.com/golang/protobuf/proto): Package
|
||||
`proto` provides functions operating on protobuf messages such as cloning,
|
||||
merging, and checking equality, as well as binary serialization and text
|
||||
serialization.
|
||||
* [`jsonpb`](https://pkg.go.dev/github.com/golang/protobuf/jsonpb): Package
|
||||
`jsonpb` serializes protobuf messages as JSON.
|
||||
* [`ptypes`](https://pkg.go.dev/github.com/golang/protobuf/ptypes): Package
|
||||
`ptypes` provides helper functionality for protobuf well-known types.
|
||||
* [`ptypes/any`](https://pkg.go.dev/github.com/golang/protobuf/ptypes/any):
|
||||
Package `any` is the generated package for `google/protobuf/any.proto`.
|
||||
* [`ptypes/empty`](https://pkg.go.dev/github.com/golang/protobuf/ptypes/empty):
|
||||
Package `empty` is the generated package for `google/protobuf/empty.proto`.
|
||||
* [`ptypes/timestamp`](https://pkg.go.dev/github.com/golang/protobuf/ptypes/timestamp):
|
||||
Package `timestamp` is the generated package for
|
||||
`google/protobuf/timestamp.proto`.
|
||||
* [`ptypes/duration`](https://pkg.go.dev/github.com/golang/protobuf/ptypes/duration):
|
||||
Package `duration` is the generated package for
|
||||
`google/protobuf/duration.proto`.
|
||||
* [`ptypes/wrappers`](https://pkg.go.dev/github.com/golang/protobuf/ptypes/wrappers):
|
||||
Package `wrappers` is the generated package for
|
||||
`google/protobuf/wrappers.proto`.
|
||||
* [`ptypes/struct`](https://pkg.go.dev/github.com/golang/protobuf/ptypes/struct):
|
||||
Package `structpb` is the generated package for
|
||||
`google/protobuf/struct.proto`.
|
||||
* [`protoc-gen-go/descriptor`](https://pkg.go.dev/github.com/golang/protobuf/protoc-gen-go/descriptor):
|
||||
Package `descriptor` is the generated package for
|
||||
`google/protobuf/descriptor.proto`.
|
||||
* [`protoc-gen-go/plugin`](https://pkg.go.dev/github.com/golang/protobuf/protoc-gen-go/plugin):
|
||||
Package `plugin` is the generated package for
|
||||
`google/protobuf/compiler/plugin.proto`.
|
||||
* [`protoc-gen-go`](https://pkg.go.dev/github.com/golang/protobuf/protoc-gen-go):
|
||||
The `protoc-gen-go` binary is a protoc plugin to generate a Go protocol
|
||||
buffer package.
|
||||
|
||||
## Using protocol buffers with Go ##
|
||||
## Reporting issues
|
||||
|
||||
Once the software is installed, there are two steps to using it.
|
||||
First you must compile the protocol buffer definitions and then import
|
||||
them, with the support library, into your program.
|
||||
The issue tracker for this project
|
||||
[is located here](https://github.com/golang/protobuf/issues).
|
||||
|
||||
To compile the protocol buffer definition, run protoc with the --go_out
|
||||
parameter set to the directory you want to output the Go code to.
|
||||
Please report any issues with a sufficient description of the bug or feature
|
||||
request. Bug reports should ideally be accompanied by a minimal reproduction of
|
||||
the issue. Irreproducible bugs are difficult to diagnose and fix (and likely to
|
||||
be closed after some period of time). Bug reports must specify the version of
|
||||
the
|
||||
[Go protocol buffer module](https://github.com/protocolbuffers/protobuf-go/releases)
|
||||
and also the version of the
|
||||
[protocol buffer toolchain](https://github.com/protocolbuffers/protobuf/releases)
|
||||
being used.
|
||||
|
||||
protoc --go_out=. *.proto
|
||||
## Contributing
|
||||
|
||||
The generated files will be suffixed .pb.go. See the Test code below
|
||||
for an example using such a file.
|
||||
This project is open-source and accepts contributions. See the
|
||||
[contribution guide](https://github.com/golang/protobuf/blob/master/CONTRIBUTING.md)
|
||||
for more information.
|
||||
|
||||
## Packages and input paths ##
|
||||
## Compatibility
|
||||
|
||||
The protocol buffer language has a concept of "packages" which does not
|
||||
correspond well to the Go notion of packages. In generated Go code,
|
||||
each source `.proto` file is associated with a single Go package. The
|
||||
name and import path for this package is specified with the `go_package`
|
||||
proto option:
|
||||
This module and the generated code are expected to be stable over time. However,
|
||||
we reserve the right to make breaking changes without notice for the following
|
||||
reasons:
|
||||
|
||||
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||
|
||||
The protocol buffer compiler will attempt to derive a package name and
|
||||
import path if a `go_package` option is not present, but it is
|
||||
best to always specify one explicitly.
|
||||
|
||||
There is a one-to-one relationship between source `.proto` files and
|
||||
generated `.pb.go` files, but any number of `.pb.go` files may be
|
||||
contained in the same Go package.
|
||||
|
||||
The output name of a generated file is produced by replacing the
|
||||
`.proto` suffix with `.pb.go` (e.g., `foo.proto` produces `foo.pb.go`).
|
||||
However, the output directory is selected in one of two ways. Let
|
||||
us say we have `inputs/x.proto` with a `go_package` option of
|
||||
`github.com/golang/protobuf/p`. The corresponding output file may
|
||||
be:
|
||||
|
||||
- Relative to the import path:
|
||||
|
||||
```shell
|
||||
protoc --go_out=. inputs/x.proto
|
||||
# writes ./github.com/golang/protobuf/p/x.pb.go
|
||||
```
|
||||
|
||||
(This can work well with `--go_out=$GOPATH`.)
|
||||
|
||||
- Relative to the input file:
|
||||
|
||||
```shell
|
||||
protoc --go_out=paths=source_relative:. inputs/x.proto
|
||||
# generate ./inputs/x.pb.go
|
||||
```
|
||||
|
||||
## Generated code ##
|
||||
|
||||
The package comment for the proto library contains text describing
|
||||
the interface provided in Go for protocol buffers. Here is an edited
|
||||
version.
|
||||
|
||||
The proto package converts data structures to and from the
|
||||
wire format of protocol buffers. It works in concert with the
|
||||
Go source code generated for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
Helpers for getting values are superseded by the
|
||||
GetFoo methods and their use is deprecated.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed with the enum's type name. Enum types have
|
||||
a String method, and a Enum method to assist in message construction.
|
||||
- Nested groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
Consider file test.proto, containing
|
||||
|
||||
```proto
|
||||
syntax = "proto2";
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; };
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
}
|
||||
```
|
||||
|
||||
To create and play with a Test object from the example package,
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"path/to/example"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &example.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &example.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
```
|
||||
|
||||
## Parameters ##
|
||||
|
||||
To pass extra parameters to the plugin, use a comma-separated
|
||||
parameter list separated from the output directory by a colon:
|
||||
|
||||
protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto
|
||||
|
||||
- `paths=(import | source_relative)` - specifies how the paths of
|
||||
generated files are structured. See the "Packages and imports paths"
|
||||
section above. The default is `import`.
|
||||
- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to
|
||||
load. The only plugin in this repo is `grpc`.
|
||||
- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is
|
||||
associated with Go package quux/shme. This is subject to the
|
||||
import_prefix parameter.
|
||||
|
||||
The following parameters are deprecated and should not be used:
|
||||
|
||||
- `import_prefix=xxx` - a prefix that is added onto the beginning of
|
||||
all imports.
|
||||
- `import_path=foo/bar` - used as the package if no input files
|
||||
declare `go_package`. If it contains slashes, everything up to the
|
||||
rightmost slash is ignored.
|
||||
|
||||
## gRPC Support ##
|
||||
|
||||
If a proto file specifies RPC services, protoc-gen-go can be instructed to
|
||||
generate code compatible with gRPC (http://www.grpc.io/). To do this, pass
|
||||
the `plugins` parameter to protoc-gen-go; the usual way is to insert it into
|
||||
the --go_out argument to protoc:
|
||||
|
||||
protoc --go_out=plugins=grpc:. *.proto
|
||||
|
||||
## Compatibility ##
|
||||
|
||||
The library and the generated code are expected to be stable over time.
|
||||
However, we reserve the right to make breaking changes without notice for the
|
||||
following reasons:
|
||||
|
||||
- Security. A security issue in the specification or implementation may come to
|
||||
light whose resolution requires breaking compatibility. We reserve the right
|
||||
to address such security issues.
|
||||
- Unspecified behavior. There are some aspects of the Protocol Buffers
|
||||
specification that are undefined. Programs that depend on such unspecified
|
||||
* **Security:** A security issue in the specification or implementation may
|
||||
come to light whose resolution requires breaking compatibility. We reserve
|
||||
the right to address such issues.
|
||||
* **Unspecified behavior:** There are some aspects of the protocol buffer
|
||||
specification that are undefined. Programs that depend on unspecified
|
||||
behavior may break in future releases.
|
||||
- Specification errors or changes. If it becomes necessary to address an
|
||||
inconsistency, incompleteness, or change in the Protocol Buffers
|
||||
specification, resolving the issue could affect the meaning or legality of
|
||||
existing programs. We reserve the right to address such issues, including
|
||||
updating the implementations.
|
||||
- Bugs. If the library has a bug that violates the specification, a program
|
||||
that depends on the buggy behavior may break if the bug is fixed. We reserve
|
||||
* **Specification changes:** It may become necessary to address an
|
||||
inconsistency, incompleteness, or change in the protocol buffer
|
||||
specification, which may affect the behavior of existing programs. We
|
||||
reserve the right to address such changes.
|
||||
* **Bugs:** If a package has a bug that violates correctness, a program
|
||||
depending on the buggy behavior may break if the bug is fixed. We reserve
|
||||
the right to fix such bugs.
|
||||
- Adding methods or fields to generated structs. These may conflict with field
|
||||
names that already exist in a schema, causing applications to break. When the
|
||||
code generator encounters a field in the schema that would collide with a
|
||||
generated field or method name, the code generator will append an underscore
|
||||
to the generated field or method name.
|
||||
- Adding, removing, or changing methods or fields in generated structs that
|
||||
start with `XXX`. These parts of the generated code are exported out of
|
||||
necessity, but should not be considered part of the public API.
|
||||
- Adding, removing, or changing unexported symbols in generated code.
|
||||
* **Generated additions**: We reserve the right to add new declarations to
|
||||
generated Go packages of `.proto` files. This includes declared constants,
|
||||
variables, functions, types, fields in structs, and methods on types. This
|
||||
may break attempts at injecting additional code on top of what is generated
|
||||
by `protoc-gen-go`. Such practice is not supported by this project.
|
||||
* **Internal changes**: We reserve the right to add, modify, and remove
|
||||
internal code, which includes all unexported declarations, the
|
||||
[`generator`](https://pkg.go.dev/github.com/golang/protobuf/protoc-gen-go/generator)
|
||||
package, and all packages under
|
||||
[`internal`](https://pkg.go.dev/github.com/golang/protobuf/internal).
|
||||
|
||||
Any breaking changes outside of these will be announced 6 months in advance to
|
||||
protobuf@googlegroups.com.
|
||||
|
||||
You should, whenever possible, use generated code created by the `protoc-gen-go`
|
||||
tool built at the same commit as the `proto` package. The `proto` package
|
||||
declares package-level constants in the form `ProtoPackageIsVersionX`.
|
||||
Application code and generated code may depend on one of these constants to
|
||||
ensure that compilation will fail if the available version of the proto library
|
||||
is too old. Whenever we make a change to the generated code that requires newer
|
||||
library support, in the same commit we will increment the version number of the
|
||||
generated code and declare a new package-level constant whose name incorporates
|
||||
the latest version number. Removing a compatibility constant is considered a
|
||||
breaking change and would be subject to the announcement policy stated above.
|
||||
|
||||
The `protoc-gen-go/generator` package exposes a plugin interface,
|
||||
which is used by the gRPC code generation. This interface is not
|
||||
supported and is subject to incompatible changes without notice.
|
||||
[protobuf@googlegroups.com](https://groups.google.com/forum/#!forum/protobuf).
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
// Deprecated: Use the "google.golang.org/protobuf" module instead.
|
||||
module github.com/golang/protobuf
|
||||
|
||||
go 1.9
|
||||
|
||||
require (
|
||||
github.com/google/go-cmp v0.5.5
|
||||
google.golang.org/protobuf v1.26.0
|
||||
)
|
||||
|
|
|
@ -0,0 +1,324 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed32 = 5
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
)
|
||||
|
||||
// EncodeVarint returns the varint encoded bytes of v.
|
||||
func EncodeVarint(v uint64) []byte {
|
||||
return protowire.AppendVarint(nil, v)
|
||||
}
|
||||
|
||||
// SizeVarint returns the length of the varint encoded bytes of v.
|
||||
// This is equal to len(EncodeVarint(v)).
|
||||
func SizeVarint(v uint64) int {
|
||||
return protowire.SizeVarint(v)
|
||||
}
|
||||
|
||||
// DecodeVarint parses a varint encoded integer from b,
|
||||
// returning the integer value and the length of the varint.
|
||||
// It returns (0, 0) if there is a parse error.
|
||||
func DecodeVarint(b []byte) (uint64, int) {
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return 0, 0
|
||||
}
|
||||
return v, n
|
||||
}
|
||||
|
||||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
||||
// It may be reused between invocations to reduce memory usage.
|
||||
type Buffer struct {
|
||||
buf []byte
|
||||
idx int
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer initialized with buf,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
return &Buffer{buf: buf}
|
||||
}
|
||||
|
||||
// SetDeterministic specifies whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (b *Buffer) SetDeterministic(deterministic bool) {
|
||||
b.deterministic = deterministic
|
||||
}
|
||||
|
||||
// SetBuf sets buf as the internal buffer,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func (b *Buffer) SetBuf(buf []byte) {
|
||||
b.buf = buf
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Reset clears the internal buffer of all written and unread data.
|
||||
func (b *Buffer) Reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Bytes returns the internal buffer.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.buf
|
||||
}
|
||||
|
||||
// Unread returns the unread portion of the buffer.
|
||||
func (b *Buffer) Unread() []byte {
|
||||
return b.buf[b.idx:]
|
||||
}
|
||||
|
||||
// Marshal appends the wire-format encoding of m to the buffer.
|
||||
func (b *Buffer) Marshal(m Message) error {
|
||||
var err error
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the wire-format message in the buffer and
|
||||
// places the decoded results in m.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) Unmarshal(m Message) error {
|
||||
err := UnmarshalMerge(b.Unread(), m)
|
||||
b.idx = len(b.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
||||
|
||||
func (m *unknownFields) String() string { panic("not implemented") }
|
||||
func (m *unknownFields) Reset() { panic("not implemented") }
|
||||
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
||||
|
||||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
||||
// to stdout. This is only intended for debugging.
|
||||
func (*Buffer) DebugPrint(s string, b []byte) {
|
||||
m := MessageReflect(new(unknownFields))
|
||||
m.SetUnknown(b)
|
||||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
||||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
||||
}
|
||||
|
||||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeVarint(v uint64) error {
|
||||
b.buf = protowire.AppendVarint(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed32(v uint64) error {
|
||||
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed64(v uint64) error {
|
||||
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
||||
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
||||
b.buf = protowire.AppendBytes(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
||||
// It does not validate whether v contains valid UTF-8.
|
||||
func (b *Buffer) EncodeStringBytes(v string) error {
|
||||
b.buf = protowire.AppendString(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
||||
func (b *Buffer) EncodeMessage(m Message) error {
|
||||
var err error
|
||||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
||||
func (b *Buffer) DecodeVarint() (uint64, error) {
|
||||
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
||||
}
|
||||
|
||||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// If alloc is specified, it returns a copy the raw bytes
|
||||
// rather than a sub-slice of the buffer.
|
||||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
||||
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return nil, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
if alloc {
|
||||
v = append([]byte(nil), v...)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// It does not validate whether the raw bytes contain valid UTF-8.
|
||||
func (b *Buffer) DecodeStringBytes() (string, error) {
|
||||
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return "", protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeMessage(m Message) error {
|
||||
v, err := b.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// DecodeGroup consumes a message group from the buffer.
|
||||
// It assumes that the start group marker has already been consumed and
|
||||
// consumes all bytes until (and including the end group marker).
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeGroup(m Message) error {
|
||||
v, n, err := consumeGroup(b.buf[b.idx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.idx += n
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// consumeGroup parses b until it finds an end group marker, returning
|
||||
// the raw bytes of the message (excluding the end group marker) and the
|
||||
// the total length of the message (including the end group marker).
|
||||
func consumeGroup(b []byte) ([]byte, int, error) {
|
||||
b0 := b
|
||||
depth := 1 // assume this follows a start group marker
|
||||
for {
|
||||
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
||||
if tagLen < 0 {
|
||||
return nil, 0, protowire.ParseError(tagLen)
|
||||
}
|
||||
b = b[tagLen:]
|
||||
|
||||
var valLen int
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
_, valLen = protowire.ConsumeVarint(b)
|
||||
case protowire.Fixed32Type:
|
||||
_, valLen = protowire.ConsumeFixed32(b)
|
||||
case protowire.Fixed64Type:
|
||||
_, valLen = protowire.ConsumeFixed64(b)
|
||||
case protowire.BytesType:
|
||||
_, valLen = protowire.ConsumeBytes(b)
|
||||
case protowire.StartGroupType:
|
||||
depth++
|
||||
case protowire.EndGroupType:
|
||||
depth--
|
||||
default:
|
||||
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
||||
}
|
||||
if valLen < 0 {
|
||||
return nil, 0, protowire.ParseError(valLen)
|
||||
}
|
||||
b = b[valLen:]
|
||||
|
||||
if depth == 0 {
|
||||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,253 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return src
|
||||
}
|
||||
out := reflect.New(in.Type().Elem())
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
|
@ -1,427 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// SetDefaults sets unpopulated scalar fields to their default values.
|
||||
// Fields within a oneof are not set even if they have a default value.
|
||||
// SetDefaults is recursively called upon any populated message fields.
|
||||
func SetDefaults(m Message) {
|
||||
if m != nil {
|
||||
setDefaults(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func setDefaults(m protoreflect.Message) {
|
||||
fds := m.Descriptor().Fields()
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if !m.Has(fd) {
|
||||
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
||||
v := fd.Default()
|
||||
if fd.Kind() == protoreflect.BytesKind {
|
||||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
setDefaults(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
setDefaults(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
setDefaults(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
|
@ -1,63 +1,113 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
// Deprecated: do not use.
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
// Deprecated: No longer returned.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
)
|
||||
|
||||
// Deprecated: Do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this type existed for intenal-use only.
|
||||
type InternalMessageInfo struct{}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Size(m Message) int {
|
||||
return protoV2.Size(MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
||||
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
||||
}
|
||||
|
|
|
@ -1,48 +1,13 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
type generatedDiscarder interface {
|
||||
XXX_DiscardUnknown()
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
|
@ -51,300 +16,43 @@ type generatedDiscarder interface {
|
|||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m, ok := m.(generatedDiscarder); ok {
|
||||
m.XXX_DiscardUnknown()
|
||||
return
|
||||
}
|
||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||
// but the master branch has no implementation for InternalMessageInfo,
|
||||
// so it would be more work to replicate that approach.
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields.
|
||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
di := atomicLoadDiscardInfo(&a.discard)
|
||||
if di == nil {
|
||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||
atomicStoreDiscardInfo(&a.discard, di)
|
||||
}
|
||||
di.discard(toPointer(&m))
|
||||
}
|
||||
|
||||
type discardInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []discardFieldInfo
|
||||
unrecognized field
|
||||
}
|
||||
|
||||
type discardFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
discard func(src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||
discardInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||
discardInfoLock.Lock()
|
||||
defer discardInfoLock.Unlock()
|
||||
di := discardInfoMap[t]
|
||||
if di == nil {
|
||||
di = &discardInfo{typ: t}
|
||||
discardInfoMap[t] = di
|
||||
}
|
||||
return di
|
||||
}
|
||||
|
||||
func (di *discardInfo) discard(src pointer) {
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||
di.computeDiscardInfo()
|
||||
}
|
||||
|
||||
for _, fi := range di.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
fi.discard(sfp)
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if di.unrecognized.IsValid() {
|
||||
*src.offset(di.unrecognized).toBytes() = nil
|
||||
if m != nil {
|
||||
discardUnknown(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func (di *discardInfo) computeDiscardInfo() {
|
||||
di.lock.Lock()
|
||||
defer di.lock.Unlock()
|
||||
if di.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := di.typ
|
||||
n := t.NumField()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
dfi := discardFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
func discardUnknown(m protoreflect.Message) {
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
discardUnknown(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
discardUnknown(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
discardUnknown(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||
dfi.discard = func(src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
DiscardUnknown(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dfi.discard = func(pointer) {} // Noop
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
dfi.discard = func(src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
DiscardUnknown(sv.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
di.fields = append(di.fields, dfi)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
di.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
di.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&di.initialized, 1)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(m); err == nil {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
// Discard unknown fields.
|
||||
if len(m.GetUnknown()) > 0 {
|
||||
m.SetUnknown(nil)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,203 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||
|
||||
// errOneofHasNil is the error returned if Marshal is called with
|
||||
// a struct with a oneof field containing a nil element.
|
||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
||||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// ErrTooLarge is the error returned if Marshal is called with a
|
||||
// message that encodes to >2GB.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
)
|
||||
|
||||
// The fundamental encoders that put bytes on the wire.
|
||||
// Those that take integer types all accept uint64 and are
|
||||
// therefore of type valueEncoder.
|
||||
|
||||
const maxVarintBytes = 10 // maximum length of a varint
|
||||
|
||||
// EncodeVarint returns the varint encoding of x.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
// Not used by the package itself, but helpful to clients
|
||||
// wishing to use the same encoding.
|
||||
func EncodeVarint(x uint64) []byte {
|
||||
var buf [maxVarintBytes]byte
|
||||
var n int
|
||||
for n = 0; x > 127; n++ {
|
||||
buf[n] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
buf[n] = uint8(x)
|
||||
n++
|
||||
return buf[0:n]
|
||||
}
|
||||
|
||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
||||
for x >= 1<<7 {
|
||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
||||
x >>= 7
|
||||
}
|
||||
p.buf = append(p.buf, uint8(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SizeVarint returns the varint encoding size of an integer.
|
||||
func SizeVarint(x uint64) int {
|
||||
switch {
|
||||
case x < 1<<7:
|
||||
return 1
|
||||
case x < 1<<14:
|
||||
return 2
|
||||
case x < 1<<21:
|
||||
return 3
|
||||
case x < 1<<28:
|
||||
return 4
|
||||
case x < 1<<35:
|
||||
return 5
|
||||
case x < 1<<42:
|
||||
return 6
|
||||
case x < 1<<49:
|
||||
return 7
|
||||
case x < 1<<56:
|
||||
return 8
|
||||
case x < 1<<63:
|
||||
return 9
|
||||
}
|
||||
return 10
|
||||
}
|
||||
|
||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24),
|
||||
uint8(x>>32),
|
||||
uint8(x>>40),
|
||||
uint8(x>>48),
|
||||
uint8(x>>56))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
||||
p.EncodeVarint(uint64(len(b)))
|
||||
p.buf = append(p.buf, b...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
||||
p.EncodeVarint(uint64(len(s)))
|
||||
p.buf = append(p.buf, s...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface representing objects that can marshal themselves.
|
||||
type Marshaler interface {
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
||||
// All protocol buffer fields are nillable, but be careful.
|
||||
func isNil(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,301 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
return bytes.Equal(u1, u2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1 := extensionAsLegacyType(e1.value)
|
||||
m2 := extensionAsLegacyType(e2.value)
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
if bytes.Equal(e1.enc, e2.enc) {
|
||||
continue
|
||||
}
|
||||
// The bytes are different, but the extensions might still be
|
||||
// equal. We need to decode them to compare.
|
||||
}
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// If both have only encoded form and the bytes are the same,
|
||||
// it is handled above. We get here when the bytes are different.
|
||||
// We don't know how to decode it, so just compare them as byte
|
||||
// slices.
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -1,607 +1,356 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
type (
|
||||
// ExtensionDesc represents an extension descriptor and
|
||||
// is used to interact with an extension field in a message.
|
||||
//
|
||||
// Variables of this type are generated in code by protoc-gen-go.
|
||||
ExtensionDesc = protoimpl.ExtensionInfo
|
||||
|
||||
// ExtensionRange represents a range of message extensions.
|
||||
// Used in code generated by protoc-gen-go.
|
||||
ExtensionRange = protoiface.ExtensionRangeV1
|
||||
|
||||
// Deprecated: Do not use; this is an internal type.
|
||||
Extension = protoimpl.ExtensionFieldV1
|
||||
|
||||
// Deprecated: Do not use; this is an internal type.
|
||||
XXX_InternalExtensions = protoimpl.ExtensionFields
|
||||
)
|
||||
|
||||
// ErrMissingExtension reports whether the extension was not present.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, error) {
|
||||
switch p := p.(type) {
|
||||
case extendableProto:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return p, nil
|
||||
case extendableProtoV1:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return extensionAdapter{p}, nil
|
||||
}
|
||||
// Don't allocate a specific error containing %T:
|
||||
// this is the hot path for Clone and MarshalText.
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
func isNilPtr(x interface{}) bool {
|
||||
v := reflect.ValueOf(x)
|
||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
// HasExtension reports whether the extension field is present in m
|
||||
// either as an explicitly populated field or as an unknown field.
|
||||
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
// Check whether any populated known field matches the field number.
|
||||
xtd := xt.TypeDescriptor()
|
||||
if isValidExtension(mr.Descriptor(), xtd) {
|
||||
has = mr.Has(xtd)
|
||||
} else {
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
has = int32(fd.Number()) == xt.Field
|
||||
return !has
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
// Check whether any unknown field matches the field number.
|
||||
for b := mr.GetUnknown(); !has && len(b) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b)
|
||||
has = int32(num) == xt.Field
|
||||
b = b[n:]
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
return has
|
||||
}
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
|
||||
// value is a concrete value for the extension field. Let the type of
|
||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
||||
// be the "storage type". The API type and storage type are the same except:
|
||||
// * For scalars (except []byte), the API type uses *T,
|
||||
// while the storage type uses T.
|
||||
// * For repeated fields, the API type uses []T, while the storage type
|
||||
// uses *[]T.
|
||||
//
|
||||
// The reason for the divergence is so that the storage type more naturally
|
||||
// matches what is expected of when retrieving the values through the
|
||||
// protobuf reflection APIs.
|
||||
//
|
||||
// The value may only be populated if desc is also populated.
|
||||
value interface{}
|
||||
|
||||
// enc is the raw bytes for the extension field.
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
epb, err := extendable(base)
|
||||
if err != nil {
|
||||
// ClearExtension removes the extension field from m
|
||||
// either as an explicitly populated field or as an unknown field.
|
||||
func ClearExtension(m Message, xt *ExtensionDesc) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
xtd := xt.TypeDescriptor()
|
||||
if isValidExtension(mr.Descriptor(), xtd) {
|
||||
mr.Clear(xtd)
|
||||
} else {
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
if int32(fd.Number()) == xt.Field {
|
||||
mr.Clear(fd)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return false
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok := extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
// ClearAllExtensions clears all extensions from m.
|
||||
// This includes populated fields and unknown fields in the extension range.
|
||||
func ClearAllExtensions(m Message) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, extension.Field)
|
||||
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
mr.Clear(fd)
|
||||
}
|
||||
return true
|
||||
})
|
||||
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
|
||||
}
|
||||
|
||||
// GetExtension retrieves a proto2 extended field from pb.
|
||||
// GetExtension retrieves a proto2 extended field from m.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes for the extension field.
|
||||
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
// Retrieve the unknown fields for this extension field.
|
||||
var bo protoreflect.RawFields
|
||||
for bi := mr.GetUnknown(); len(bi) > 0; {
|
||||
num, _, n := protowire.ConsumeField(bi)
|
||||
if int32(num) == xt.Field {
|
||||
bo = append(bo, bi[:n]...)
|
||||
}
|
||||
bi = bi[n:]
|
||||
}
|
||||
|
||||
// For type incomplete descriptors, only retrieve the unknown fields.
|
||||
if xt.ExtensionType == nil {
|
||||
return []byte(bo), nil
|
||||
}
|
||||
|
||||
// If the extension field only exists as unknown fields, unmarshal it.
|
||||
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
|
||||
xtd := xt.TypeDescriptor()
|
||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||
}
|
||||
if !mr.Has(xtd) && len(bo) > 0 {
|
||||
m2 := mr.New()
|
||||
if err := (proto.UnmarshalOptions{
|
||||
Resolver: extensionResolver{xt},
|
||||
}.Unmarshal(bo, m2.Interface())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if extension.ExtendedType != nil {
|
||||
// can only check type if this is a complete descriptor
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return nil, err
|
||||
if m2.Has(xtd) {
|
||||
mr.Set(xtd, m2.Get(xtd))
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
}
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor
|
||||
return e.enc, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = extensionAsStorageType(v)
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor, so no default
|
||||
// Check whether the message has the extension field set or a default.
|
||||
var pv protoreflect.Value
|
||||
switch {
|
||||
case mr.Has(xtd):
|
||||
pv = mr.Get(xtd)
|
||||
case xtd.HasDefault():
|
||||
pv = xtd.Default()
|
||||
default:
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
sf, _, err := fieldDefault(t, props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
v := xt.InterfaceOf(pv)
|
||||
rv := reflect.ValueOf(v)
|
||||
if isScalarKind(rv.Kind()) {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
}
|
||||
|
||||
if sf == nil || sf.value == nil {
|
||||
// There is no default value.
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Ptr {
|
||||
// We do not need to return a Ptr, we can directly return sf.value.
|
||||
return sf.value, nil
|
||||
}
|
||||
|
||||
// We need to return an interface{} that is a pointer to sf.value.
|
||||
value := reflect.New(t).Elem()
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
if sf.kind == reflect.Int32 {
|
||||
// We may have an int32 or an enum, but the underlying data is int32.
|
||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||
// set it as a int32.
|
||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||
} else {
|
||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||
}
|
||||
return value.Interface(), nil
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||
// extensionResolver is a custom extension resolver that stores a single
|
||||
// extension type that takes precedence over the global registry.
|
||||
type extensionResolver struct{ xt protoreflect.ExtensionType }
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate space to store the pointer/slice.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
var err error
|
||||
for {
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
|
||||
return r.xt, nil
|
||||
}
|
||||
b = b[n:]
|
||||
wire := int(x) & 7
|
||||
|
||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
|
||||
return r.xt, nil
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||
}
|
||||
|
||||
// GetExtensions returns a list of the extensions values present in m,
|
||||
// corresponding with the provided list of extension descriptors, xts.
|
||||
// If an extension is missing in m, the corresponding value is nil.
|
||||
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
vs := make([]interface{}, len(xts))
|
||||
for i, xt := range xts {
|
||||
v, err := GetExtension(m, xt)
|
||||
if err != nil {
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
return vs, err
|
||||
}
|
||||
vs[i] = v
|
||||
}
|
||||
return
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// SetExtension sets an extension field in m to the provided value.
|
||||
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return errNotExtendable
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
rv := reflect.ValueOf(v)
|
||||
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
if rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
|
||||
}
|
||||
if isScalarKind(rv.Elem().Kind()) {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
xtd := xt.TypeDescriptor()
|
||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
||||
mr.Set(xtd, xt.ValueOf(v))
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
// SetRawExtension inserts b into the unknown fields of m.
|
||||
//
|
||||
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
|
||||
func SetRawExtension(m Message, fnum int32, b []byte) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
|
||||
// Verify that the raw field is valid.
|
||||
for b0 := b; len(b0) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b0)
|
||||
if int32(num) != fnum {
|
||||
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
|
||||
}
|
||||
b0 = b0[n:]
|
||||
}
|
||||
|
||||
ClearExtension(m, &ExtensionDesc{Field: fnum})
|
||||
mr.SetUnknown(append(mr.GetUnknown(), b...))
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
// ExtensionDescs returns a list of extension descriptors found in m,
|
||||
// containing descriptors for both populated extension fields in m and
|
||||
// also unknown fields of m that are in the extension range.
|
||||
// For the later case, an type incomplete descriptor is provided where only
|
||||
// the ExtensionDesc.Field field is populated.
|
||||
// The order of the extension descriptors is undefined.
|
||||
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
|
||||
// Collect a set of known extension descriptors.
|
||||
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
xt := fd.(protoreflect.ExtensionTypeDescriptor)
|
||||
if xd, ok := xt.Type().(*ExtensionDesc); ok {
|
||||
extDescs[fd.Number()] = xd
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Collect a set of unknown extension descriptors.
|
||||
extRanges := mr.Descriptor().ExtensionRanges()
|
||||
for b := mr.GetUnknown(); len(b) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b)
|
||||
if extRanges.Has(num) && extDescs[num] == nil {
|
||||
extDescs[num] = nil
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Transpose the set of descriptors into a list.
|
||||
var xts []*ExtensionDesc
|
||||
for num, xt := range extDescs {
|
||||
if xt == nil {
|
||||
xt = &ExtensionDesc{Field: int32(num)}
|
||||
}
|
||||
xts = append(xts, xt)
|
||||
}
|
||||
return xts, nil
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
// isValidExtension reports whether xtd is a valid extension descriptor for md.
|
||||
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
|
||||
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
|
||||
}
|
||||
|
||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
||||
// See Extension.value.
|
||||
func extensionAsLegacyType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
|
||||
// This function exists for historical reasons since the representation of
|
||||
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
|
||||
func isScalarKind(k reflect.Kind) bool {
|
||||
switch k {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
// Represent primitive types as a pointer to the value.
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
||||
// See Extension.value.
|
||||
func extensionAsStorageType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
// clearUnknown removes unknown fields from m where remover.Has reports true.
|
||||
func clearUnknown(m protoreflect.Message, remover interface {
|
||||
Has(protoreflect.FieldNumber) bool
|
||||
}) {
|
||||
var bo protoreflect.RawFields
|
||||
for bi := m.GetUnknown(); len(bi) > 0; {
|
||||
num, _, n := protowire.ConsumeField(bi)
|
||||
if !remover.Has(num) {
|
||||
bo = append(bo, bi[:n]...)
|
||||
}
|
||||
bi = bi[n:]
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Represent slice types as a pointer to the value.
|
||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
if bi := m.GetUnknown(); len(bi) != len(bo) {
|
||||
m.SetUnknown(bo)
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
type fieldNum protoreflect.FieldNumber
|
||||
|
||||
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
|
||||
return protoreflect.FieldNumber(n1) == n2
|
||||
}
|
||||
|
|
|
@ -1,965 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
oneof union {
|
||||
int32 number = 6;
|
||||
string name = 7;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
// Types that are valid to be assigned to Union:
|
||||
// *Test_Number
|
||||
// *Test_Name
|
||||
Union isTest_Union `protobuf_oneof:"union"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
|
||||
type isTest_Union interface {
|
||||
isTest_Union()
|
||||
}
|
||||
|
||||
type Test_Number struct {
|
||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||
}
|
||||
type Test_Name struct {
|
||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||
}
|
||||
|
||||
func (*Test_Number) isTest_Union() {}
|
||||
func (*Test_Name) isTest_Union() {}
|
||||
|
||||
func (m *Test) GetUnion() isTest_Union {
|
||||
if m != nil {
|
||||
return m.Union
|
||||
}
|
||||
return nil
|
||||
}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetNumber() int32 {
|
||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||
return x.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Test) GetName() string {
|
||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
Union: &pb.Test_Name{"fred"},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// Use a type switch to determine which oneof was set.
|
||||
switch u := test.Union.(type) {
|
||||
case *pb.Test_Number: // u.Number contains the number.
|
||||
case *pb.Test_Name: // u.Name contains the string.
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||
// Marshal reports this when a required field is not initialized.
|
||||
// Unmarshal reports this when a required field is missing from the wire data.
|
||||
type RequiredNotSetError struct{ field string }
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.field == "" {
|
||||
return fmt.Sprintf("proto: required field not set")
|
||||
}
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type invalidUTF8Error struct{ field string }
|
||||
|
||||
func (e *invalidUTF8Error) Error() string {
|
||||
if e.field == "" {
|
||||
return "proto: invalid UTF-8 detected"
|
||||
}
|
||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||
}
|
||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||
// This error should not be exposed to the external API as such errors should
|
||||
// be recreated with the field information.
|
||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||
|
||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||
// or a InvalidUTF8 error.
|
||||
func isNonFatal(err error) bool {
|
||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||
return true
|
||||
}
|
||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nonFatal struct{ E error }
|
||||
|
||||
// Merge merges err into nf and reports whether it was successful.
|
||||
// Otherwise it returns false for any fatal non-nil errors.
|
||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||
if err == nil {
|
||||
return true // not an error
|
||||
}
|
||||
if !isNonFatal(err) {
|
||||
return false // fatal error
|
||||
}
|
||||
if nf.E == nil {
|
||||
nf.E = err // store first instance of non-fatal error
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // read point
|
||||
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
// SetDeterministic sets whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexicographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||
p.deterministic = deterministic
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := p.buf
|
||||
index := p.index
|
||||
p.buf = b
|
||||
p.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := p.index
|
||||
if index == len(p.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = p.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = p.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
|
||||
case WireVarint:
|
||||
u, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
p.buf = obuf
|
||||
p.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
// f is *T or []*T or map[T]*T
|
||||
switch f.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(f, recur, zeros)
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, k := range f.MapKeys() {
|
||||
e := f.MapIndex(k)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
sf, nested, err := fieldDefault(ft, prop)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Print(err)
|
||||
case nested:
|
||||
dm.nested = append(dm.nested, fi)
|
||||
case sf != nil:
|
||||
sf.index = fi
|
||||
dm.scalars = append(dm.scalars, *sf)
|
||||
}
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// fieldDefault returns the scalarField for field type ft.
|
||||
// sf will be nil if the field can not have a default.
|
||||
// nestedMessage will be true if this is a nested message.
|
||||
// Note that sf.index is not set on return.
|
||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||
var canHaveDefault bool
|
||||
switch ft.Kind() {
|
||||
case reflect.Ptr:
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
nestedMessage = true
|
||||
} else {
|
||||
canHaveDefault = true // proto2 scalar field
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Ptr:
|
||||
nestedMessage = true // repeated message
|
||||
case reflect.Uint8:
|
||||
canHaveDefault = true // bytes field
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if ft.Elem().Kind() == reflect.Ptr {
|
||||
nestedMessage = true // map with message values
|
||||
}
|
||||
}
|
||||
|
||||
if !canHaveDefault {
|
||||
if nestedMessage {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// We now know that ft is a pointer or slice.
|
||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
}
|
||||
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{vs: vs}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
switch vs[0].Kind() {
|
||||
case reflect.Int32, reflect.Int64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
case reflect.Bool:
|
||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||
case reflect.String:
|
||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type mapKeySorter struct {
|
||||
vs []reflect.Value
|
||||
less func(a, b reflect.Value) bool
|
||||
}
|
||||
|
||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||
func (s mapKeySorter) Less(i, j int) bool {
|
||||
return s.less(s.vs[i], s.vs[j])
|
||||
}
|
||||
|
||||
// isProto3Zero reports whether v is a zero proto3 value.
|
||||
func isProto3Zero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
// This type is not subject to any compatibility guarantee.
|
||||
type InternalMessageInfo struct {
|
||||
marshal *marshalInfo
|
||||
unmarshal *unmarshalInfo
|
||||
merge *mergeInfo
|
||||
discard *discardInfo
|
||||
}
|
|
@ -1,181 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
return ms.find(pb) != nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,360 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build purego appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const unsafeAllowed = false
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
var zeroField = field([]int{})
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// The pointer type is for the table-driven decoder.
|
||||
// The implementation here uses a reflect.Value of pointer type to
|
||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||
// instead of reflect to implement the same (but faster) interface.
|
||||
type pointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
return pointer{v: reflect.ValueOf(*i)}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
if deref {
|
||||
u = u.Elem()
|
||||
}
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{v: v}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// grow updates the slice s in place to make it one element longer.
|
||||
// s must be addressable.
|
||||
// Returns the (addressable) new element.
|
||||
func grow(s reflect.Value) reflect.Value {
|
||||
n, m := s.Len(), s.Cap()
|
||||
if n < m {
|
||||
s.SetLen(n + 1)
|
||||
} else {
|
||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||
}
|
||||
return s.Index(n)
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return p.v.Interface().(*int64)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return p.v.Interface().(**int64)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return p.v.Interface().(*[]int64)
|
||||
}
|
||||
|
||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||
}
|
||||
|
||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return p.v.Interface().(**int32)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return p.v.Interface().(*[]int32)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().(*int32)
|
||||
}
|
||||
// an enum
|
||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||
// Then assign it to a **int32 or **enum.
|
||||
// Note: we can convert *int32 to *enum, but we can't convert
|
||||
// **int32 to **enum!
|
||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||
}
|
||||
|
||||
// getInt32Slice copies []int32 from p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().([]int32)
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []int32, then assign []enum's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := p.v.Elem()
|
||||
s := make([]int32, slice.Len())
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
s[i] = int32(slice.Index(i).Int())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setInt32Slice copies []int32 into p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
p.v.Elem().Set(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []enum, then assign []int32's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||
for i, x := range v {
|
||||
slice.Index(i).SetInt(int64(x))
|
||||
}
|
||||
p.v.Elem().Set(slice)
|
||||
}
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
grow(p.v.Elem()).SetInt(int64(v))
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return p.v.Interface().(*uint64)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return p.v.Interface().(**uint64)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return p.v.Interface().(*[]uint64)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return p.v.Interface().(*uint32)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return p.v.Interface().(**uint32)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return p.v.Interface().(*[]uint32)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return p.v.Interface().(*bool)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return p.v.Interface().(**bool)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return p.v.Interface().(*[]bool)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return p.v.Interface().(*float64)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return p.v.Interface().(**float64)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return p.v.Interface().(*[]float64)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return p.v.Interface().(*float32)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return p.v.Interface().(**float32)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return p.v.Interface().(*[]float32)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return p.v.Interface().(*string)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return p.v.Interface().(**string)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return p.v.Interface().(*[]string)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return p.v.Interface().(*[]byte)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return p.v.Interface().(*[][]byte)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return p.v.Interface().(*XXX_InternalExtensions)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return p.v.Interface().(*map[int32]Extension)
|
||||
}
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
p.v.Elem().Set(q.v)
|
||||
}
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
grow(p.v.Elem()).Set(q.v)
|
||||
}
|
||||
|
||||
// getPointerSlice copies []*T from p as a new []pointer.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
if p.v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
n := p.v.Elem().Len()
|
||||
s := make([]pointer, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setPointerSlice copies []pointer into p as a new []*T.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
if v == nil {
|
||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||
return
|
||||
}
|
||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||
for _, p := range v {
|
||||
s = reflect.Append(s, p.v)
|
||||
}
|
||||
p.v.Elem().Set(s)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
if p.v.Elem().IsNil() {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||
}
|
||||
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
// TODO: check that p.v.Type().Elem() == t?
|
||||
return p.v
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
|
||||
var atomicLock sync.Mutex
|
|
@ -1,313 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !purego,!appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const unsafeAllowed = true
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
const zeroField = field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != invalidField
|
||||
}
|
||||
|
||||
// The pointer type below is for the new table-driven encoder/decoder.
|
||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||
// the same (but slower) interface.
|
||||
type pointer struct {
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// size of pointer
|
||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
// Saves ~25ns over the equivalent:
|
||||
// return valToPointer(reflect.ValueOf(*i))
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
} else {
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
if deref {
|
||||
p.p = *(*unsafe.Pointer)(p.p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||
// this to no longer be inlineable, which is a serious performance cost.
|
||||
/*
|
||||
if !f.IsValid() {
|
||||
panic("invalid field")
|
||||
}
|
||||
*/
|
||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return (*int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return (**int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return (*[]int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return (*int32)(p.p)
|
||||
}
|
||||
|
||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return (**int32)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return (*[]int32)(p.p)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
return *(**int32)(p.p)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
*(**int32)(p.p) = &v
|
||||
}
|
||||
|
||||
// getInt32Slice loads a []int32 from p.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
return *(*[]int32)(p.p)
|
||||
}
|
||||
|
||||
// setInt32Slice stores a []int32 to p.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
*(*[]int32)(p.p) = v
|
||||
}
|
||||
|
||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
s := (*[]int32)(p.p)
|
||||
*s = append(*s, v)
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return (*uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return (**uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return (*[]uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return (*uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return (**uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return (*[]uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return (*bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return (**bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return (*[]bool)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return (*float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return (**float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return (*[]float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return (*float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return (**float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return (*[]float32)(p.p)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return (*string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return (**string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return (*[]string)(p.p)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return (*[]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return (*[][]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(p.p)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return (*map[int32]Extension)(p.p)
|
||||
}
|
||||
|
||||
// getPointerSlice loads []*T from p as a []pointer.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We load it as []pointer.
|
||||
return *(*[]pointer)(p.p)
|
||||
}
|
||||
|
||||
// setPointerSlice stores []pointer into p as a []*T.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We store it as []pointer.
|
||||
*(*[]pointer)(p.p) = v
|
||||
}
|
||||
|
||||
// getPointer loads the pointer at p and returns it.
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||
}
|
||||
|
||||
// setPointer stores the pointer q at p.
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
*(*unsafe.Pointer)(p.p) = q.p
|
||||
}
|
||||
|
||||
// append q to the slice pointed to by p.
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
s := (*[]unsafe.Pointer)(p.p)
|
||||
*s = append(*s, q.p)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||
}
|
||||
|
||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||
// object of type t stored at p.
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(t, p.p)
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
|
@ -1,162 +1,104 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
// StructProperties represents protocol buffer type information for a
|
||||
// generated protobuf message in the open-struct API.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
// Prop are the properties for each field.
|
||||
//
|
||||
// Fields belonging to a oneof are stored in OneofTypes instead, with a
|
||||
// single Properties representing the parent oneof held here.
|
||||
//
|
||||
// The order of Prop matches the order of fields in the Go struct.
|
||||
// Struct fields that are not related to protobufs have a "XXX_" prefix
|
||||
// in the Properties.Name and must be ignored by the user.
|
||||
Prop []*Properties
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the original name of a field.
|
||||
// It is keyed by the protobuf field name.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// OneofProperties represents information about a specific field in a oneof.
|
||||
type OneofProperties struct {
|
||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
||||
Field int // struct field number of the containing oneof in the message
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
// Properties represents the type information for a protobuf message field.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
JSONName string // name to use for JSON; determined by protoc
|
||||
// Name is a placeholder name with little meaningful semantic value.
|
||||
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
|
||||
Name string
|
||||
// OrigName is the protobuf field name or oneof name.
|
||||
OrigName string
|
||||
// JSONName is the JSON name for the protobuf field.
|
||||
JSONName string
|
||||
// Enum is a placeholder name for enums.
|
||||
// For historical reasons, this is neither the Go name for the enum,
|
||||
// nor the protobuf name for the enum.
|
||||
Enum string // Deprecated: Do not use.
|
||||
// Weak contains the full name of the weakly referenced message.
|
||||
Weak string
|
||||
// Wire is a string representation of the wire type.
|
||||
Wire string
|
||||
// WireType is the protobuf wire type for the field.
|
||||
WireType int
|
||||
// Tag is the protobuf field number.
|
||||
Tag int
|
||||
// Required reports whether this is a required field.
|
||||
Required bool
|
||||
// Optional reports whether this is a optional field.
|
||||
Optional bool
|
||||
// Repeated reports whether this is a repeated field.
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field
|
||||
oneof bool // whether this is a oneof field
|
||||
// Packed reports whether this is a packed repeated field of scalars.
|
||||
Packed bool
|
||||
// Proto3 reports whether this field operates under the proto3 syntax.
|
||||
Proto3 bool
|
||||
// Oneof reports whether this field belongs within a oneof.
|
||||
Oneof bool
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
// Default is the default value in string form.
|
||||
Default string
|
||||
// HasDefault reports whether the field has a default value.
|
||||
HasDefault bool
|
||||
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
// MapKeyProp is the properties for the key field for a map field.
|
||||
MapKeyProp *Properties
|
||||
// MapValProp is the properties for the value field for a map field.
|
||||
MapValProp *Properties
|
||||
}
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
MapKeyProp *Properties // set for map types only
|
||||
MapValProp *Properties // set for map types only
|
||||
// OneofProperties represents the type information for a protobuf oneof.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type OneofProperties struct {
|
||||
// Type is a pointer to the generated wrapper type for the field value.
|
||||
// This is nil for messages that are not in the open-struct API.
|
||||
Type reflect.Type
|
||||
// Field is the index into StructProperties.Prop for the containing oneof.
|
||||
Field int
|
||||
// Prop is the properties for the field.
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s += ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
s += "," + strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
|
@ -170,18 +112,21 @@ func (p *Properties) String() string {
|
|||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != p.OrigName {
|
||||
if p.JSONName != "" {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if len(p.Weak) > 0 {
|
||||
s += ",weak=" + p.Weak
|
||||
}
|
||||
if p.Proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.Oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
|
@ -189,356 +134,173 @@ func (p *Properties) String() string {
|
|||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
log.Printf("proto: tag has too few fields: %q", s)
|
||||
return
|
||||
func (p *Properties) Parse(tag string) {
|
||||
// For example: "bytes,49,opt,name=foo,def=hello!"
|
||||
for len(tag) > 0 {
|
||||
i := strings.IndexByte(tag, ',')
|
||||
if i < 0 {
|
||||
i = len(tag)
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outer:
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
switch s := tag[:i]; {
|
||||
case strings.HasPrefix(s, "name="):
|
||||
p.OrigName = s[len("name="):]
|
||||
case strings.HasPrefix(s, "json="):
|
||||
p.JSONName = s[len("json="):]
|
||||
case strings.HasPrefix(s, "enum="):
|
||||
p.Enum = s[len("enum="):]
|
||||
case strings.HasPrefix(s, "weak="):
|
||||
p.Weak = s[len("weak="):]
|
||||
case strings.Trim(s, "0123456789") == "":
|
||||
n, _ := strconv.ParseUint(s, 10, 32)
|
||||
p.Tag = int(n)
|
||||
case s == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
case s == "req":
|
||||
p.Required = true
|
||||
case s == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
case s == "varint" || s == "zigzag32" || s == "zigzag64":
|
||||
p.Wire = s
|
||||
p.WireType = WireVarint
|
||||
case s == "fixed32":
|
||||
p.Wire = s
|
||||
p.WireType = WireFixed32
|
||||
case s == "fixed64":
|
||||
p.Wire = s
|
||||
p.WireType = WireFixed64
|
||||
case s == "bytes":
|
||||
p.Wire = s
|
||||
p.WireType = WireBytes
|
||||
case s == "group":
|
||||
p.Wire = s
|
||||
p.WireType = WireStartGroup
|
||||
case s == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "json="):
|
||||
p.JSONName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
p.proto3 = true
|
||||
case f == "oneof":
|
||||
p.oneof = true
|
||||
case strings.HasPrefix(f, "def="):
|
||||
case s == "proto3":
|
||||
p.Proto3 = true
|
||||
case s == "oneof":
|
||||
p.Oneof = true
|
||||
case strings.HasPrefix(s, "def="):
|
||||
// The default tag is special in that everything afterwards is the
|
||||
// default regardless of the presence of commas.
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break outer
|
||||
}
|
||||
p.Default, i = tag[len("def="):], len(tag)
|
||||
}
|
||||
tag = strings.TrimPrefix(tag[i:], ",")
|
||||
}
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// setFieldProps initializes the field properties for submessages and maps.
|
||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
switch t1 := typ; t1.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t1.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t1.Elem()
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t2.Elem()
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.mtype = t1
|
||||
p.MapKeyProp = &Properties{}
|
||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.MapValProp = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setFieldProps(typ, f, lockGetProp)
|
||||
|
||||
if typ != nil && typ.Kind() == reflect.Map {
|
||||
p.MapKeyProp = new(Properties)
|
||||
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
|
||||
p.MapValProp = new(Properties)
|
||||
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
propertiesMu sync.RWMutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
// GetProperties returns the list of properties for the type represented by t,
|
||||
// which must be a generated protocol buffer message in the open-struct API,
|
||||
// where protobuf message fields are represented by exported Go struct fields.
|
||||
//
|
||||
// Deprecated: Use protobuf reflection instead.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
if p, ok := propertiesCache.Load(t); ok {
|
||||
return p.(*StructProperties)
|
||||
}
|
||||
|
||||
// Most calls to GetProperties in a long-running program will be
|
||||
// retrieving details for types we have seen before.
|
||||
propertiesMu.RLock()
|
||||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
return sprop
|
||||
}
|
||||
|
||||
propertiesMu.Lock()
|
||||
sprop = getPropertiesLocked(t)
|
||||
propertiesMu.Unlock()
|
||||
return sprop
|
||||
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
|
||||
return p.(*StructProperties)
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
return prop
|
||||
func newProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||
}
|
||||
|
||||
var hasOneof bool
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
// Construct a list of properties for each field in the struct.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
f := t.Field(i)
|
||||
tagField := f.Tag.Get("protobuf")
|
||||
p.Init(f.Type, f.Name, tagField, &f)
|
||||
|
||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||
if oneof != "" {
|
||||
// Oneof fields don't use the traditional protobuf tag.
|
||||
p.OrigName = oneof
|
||||
tagOneof := f.Tag.Get("protobuf_oneof")
|
||||
if tagOneof != "" {
|
||||
hasOneof = true
|
||||
p.OrigName = tagOneof
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
|
||||
// Rename unrelated struct fields with the "XXX_" prefix since so much
|
||||
// user code simply checks for this to exclude special fields.
|
||||
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
|
||||
p.Name = "XXX_" + p.Name
|
||||
p.OrigName = "XXX_" + p.OrigName
|
||||
} else if p.Weak != "" {
|
||||
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
|
||||
}
|
||||
print("\n")
|
||||
|
||||
prop.Prop = append(prop.Prop, p)
|
||||
}
|
||||
|
||||
// Construct a mapping of oneof field names to properties.
|
||||
if hasOneof {
|
||||
var oneofWrappers []interface{}
|
||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
|
||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
|
||||
}
|
||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
|
||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
|
||||
}
|
||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
|
||||
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
|
||||
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
var oots []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
for _, wrapper := range oneofWrappers {
|
||||
p := &OneofProperties{
|
||||
Type: reflect.ValueOf(wrapper).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
f := p.Type.Elem().Field(0)
|
||||
p.Prop.Name = f.Name
|
||||
p.Prop.Parse(f.Tag.Get("protobuf"))
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
// Determine the struct field that contains this oneof.
|
||||
// Each wrapper is assignable to exactly one parent field.
|
||||
var foundOneof bool
|
||||
for i := 0; i < t.NumField() && !foundOneof; i++ {
|
||||
if p.Type.AssignableTo(t.Field(i).Type) {
|
||||
p.Field = i
|
||||
foundOneof = true
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
if !foundOneof {
|
||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||
}
|
||||
prop.OneofTypes[p.Prop.OrigName] = p
|
||||
}
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from names to integers of the
|
||||
// enum type enumType, or a nil if not found.
|
||||
func EnumValueMap(enumType string) map[string]int32 {
|
||||
return enumValueMaps[enumType]
|
||||
}
|
||||
|
||||
// A registry of all linked message types.
|
||||
// The string is a fully-qualified proto name ("pkg.Message").
|
||||
var (
|
||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||
revProtoTypes = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// RegisterType is called from generated code and maps from the fully qualified
|
||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||
func RegisterType(x Message, name string) {
|
||||
if _, ok := protoTypedNils[name]; ok {
|
||||
// TODO: Some day, make this a panic.
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||
// Generated code always calls RegisterType with nil x.
|
||||
// This check is just for extra safety.
|
||||
protoTypedNils[name] = x
|
||||
} else {
|
||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||
}
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||
// proto name to the native map type of the proto map definition.
|
||||
func RegisterMapType(x interface{}, name string) {
|
||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||
}
|
||||
if _, ok := protoMapTypes[name]; ok {
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
protoMapTypes[name] = t
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||
// map entry.
|
||||
func MessageType(name string) reflect.Type {
|
||||
if t, ok := protoTypedNils[name]; ok {
|
||||
return reflect.TypeOf(t)
|
||||
}
|
||||
return protoMapTypes[name]
|
||||
}
|
||||
|
||||
// A registry of all linked proto files.
|
||||
var (
|
||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||
)
|
||||
|
||||
// RegisterFile is called from generated code and maps from the
|
||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||
protoFiles[filename] = fileDescriptor
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
||||
func (sp *StructProperties) Len() int { return len(sp.Prop) }
|
||||
func (sp *StructProperties) Less(i, j int) bool { return false }
|
||||
func (sp *StructProperties) Swap(i, j int) { return }
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package proto provides functionality for handling protocol buffer messages.
|
||||
// In particular, it provides marshaling and unmarshaling between a protobuf
|
||||
// message and the binary wire format.
|
||||
//
|
||||
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
||||
// more information.
|
||||
//
|
||||
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
ProtoPackageIsVersion1 = true
|
||||
ProtoPackageIsVersion2 = true
|
||||
ProtoPackageIsVersion3 = true
|
||||
ProtoPackageIsVersion4 = true
|
||||
)
|
||||
|
||||
// GeneratedEnum is any enum type generated by protoc-gen-go
|
||||
// which is a named int32 kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedEnum interface{}
|
||||
|
||||
// GeneratedMessage is any message type generated by protoc-gen-go
|
||||
// which is a pointer to a named struct kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedMessage interface{}
|
||||
|
||||
// Message is a protocol buffer message.
|
||||
//
|
||||
// This is the v1 version of the message interface and is marginally better
|
||||
// than an empty interface as it lacks any method to programatically interact
|
||||
// with the contents of the message.
|
||||
//
|
||||
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
||||
// exposes protobuf reflection as a first-class feature of the interface.
|
||||
//
|
||||
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
||||
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
||||
type Message = protoiface.MessageV1
|
||||
|
||||
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
|
||||
return protoimpl.X.ProtoMessageV1Of(m)
|
||||
}
|
||||
|
||||
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV2(m GeneratedMessage) protoV2.Message {
|
||||
return protoimpl.X.ProtoMessageV2Of(m)
|
||||
}
|
||||
|
||||
// MessageReflect returns a reflective view for a message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageReflect(m Message) protoreflect.Message {
|
||||
return protoimpl.X.MessageOf(m)
|
||||
}
|
||||
|
||||
// Marshaler is implemented by messages that can marshal themselves.
|
||||
// This interface is used by the following functions: Size, Marshal,
|
||||
// Buffer.Marshal, and Buffer.EncodeMessage.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Marshaler interface {
|
||||
// Marshal formats the encoded bytes of the message.
|
||||
// It should be deterministic and emit valid protobuf wire data.
|
||||
// The caller takes ownership of the returned buffer.
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
||||
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
||||
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Unmarshaler interface {
|
||||
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
||||
// The provided buffer is only valid for during method call.
|
||||
// It should not reset the receiver message.
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Merger is implemented by messages that can merge themselves.
|
||||
// This interface is used by the following functions: Clone and Merge.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Merger interface {
|
||||
// Merge merges the contents of src into the receiver message.
|
||||
// It clones all data structures in src such that it aliases no mutable
|
||||
// memory referenced by src.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// RequiredNotSetError is an error type returned when
|
||||
// marshaling or unmarshaling a message with missing required fields.
|
||||
type RequiredNotSetError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.err != nil {
|
||||
return e.err.Error()
|
||||
}
|
||||
return "proto: required field not set"
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func checkRequiredNotSet(m protoV2.Message) error {
|
||||
if err := protoV2.CheckInitialized(m); err != nil {
|
||||
return &RequiredNotSetError{err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of src.
|
||||
func Clone(src Message) Message {
|
||||
return MessageV1(protoV2.Clone(MessageV2(src)))
|
||||
}
|
||||
|
||||
// Merge merges src into dst, which must be messages of the same type.
|
||||
//
|
||||
// Populated scalar fields in src are copied to dst, while populated
|
||||
// singular messages in src are merged into dst by recursively calling Merge.
|
||||
// The elements of every list field in src is appended to the corresponded
|
||||
// list fields in dst. The entries of every map field in src is copied into
|
||||
// the corresponding map field in dst, possibly replacing existing entries.
|
||||
// The unknown fields of src are appended to the unknown fields of dst.
|
||||
func Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Equal reports whether two messages are equal.
|
||||
// If two messages marshal to the same bytes under deterministic serialization,
|
||||
// then Equal is guaranteed to report true.
|
||||
//
|
||||
// Two messages are equal if they are the same protobuf message type,
|
||||
// have the same set of populated known and extension field values,
|
||||
// and the same set of unknown fields values.
|
||||
//
|
||||
// Scalar values are compared with the equivalent of the == operator in Go,
|
||||
// except bytes values which are compared using bytes.Equal and
|
||||
// floating point values which specially treat NaNs as equal.
|
||||
// Message values are compared by recursively calling Equal.
|
||||
// Lists are equal if each element value is also equal.
|
||||
// Maps are equal if they have the same set of keys, where the pair of values
|
||||
// for each key is also equal.
|
||||
func Equal(x, y Message) bool {
|
||||
return protoV2.Equal(MessageV2(x), MessageV2(y))
|
||||
}
|
||||
|
||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||
return ok && ms.IsMessageSet()
|
||||
}
|
|
@ -0,0 +1,317 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/reflect/protodesc"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// filePath is the path to the proto source file.
|
||||
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
||||
|
||||
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
||||
type fileDescGZIP = []byte
|
||||
|
||||
var fileCache sync.Map // map[filePath]fileDescGZIP
|
||||
|
||||
// RegisterFile is called from generated code to register the compressed
|
||||
// FileDescriptorProto with the file path for a proto source file.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
||||
func RegisterFile(s filePath, d fileDescGZIP) {
|
||||
// Decompress the descriptor.
|
||||
zr, err := gzip.NewReader(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
b, err := ioutil.ReadAll(zr)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
|
||||
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
||||
// Note that DescBuilder.Build automatically registers the constructed
|
||||
// file descriptor with the v2 registry.
|
||||
protoimpl.DescBuilder{RawDescriptor: b}.Build()
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
fileCache.Store(s, d)
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
||||
// for a proto source file. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
||||
func FileDescriptor(s filePath) fileDescGZIP {
|
||||
if v, ok := fileCache.Load(s); ok {
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
|
||||
// Find the descriptor in the v2 registry.
|
||||
var b []byte
|
||||
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
||||
b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
|
||||
}
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
if len(b) > 0 {
|
||||
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// enumName is the name of an enum. For historical reasons, the enum name is
|
||||
// neither the full Go name nor the full protobuf name of the enum.
|
||||
// The name is the dot-separated combination of just the proto package that the
|
||||
// enum is declared within followed by the Go type name of the generated enum.
|
||||
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
||||
|
||||
// enumsByName maps enum values by name to their numeric counterpart.
|
||||
type enumsByName = map[string]int32
|
||||
|
||||
// enumsByNumber maps enum values by number to their name counterpart.
|
||||
type enumsByNumber = map[int32]string
|
||||
|
||||
var enumCache sync.Map // map[enumName]enumsByName
|
||||
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
||||
|
||||
// RegisterEnum is called from the generated code to register the mapping of
|
||||
// enum value names to enum numbers for the enum identified by s.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
||||
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
|
||||
if _, ok := enumCache.Load(s); ok {
|
||||
panic("proto: duplicate enum registered: " + s)
|
||||
}
|
||||
enumCache.Store(s, m)
|
||||
|
||||
// This does not forward registration to the v2 registry since this API
|
||||
// lacks sufficient information to construct a complete v2 enum descriptor.
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
||||
// the enum of the given name. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
||||
func EnumValueMap(s enumName) enumsByName {
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
|
||||
// Check whether the cache is stale. If the number of files in the current
|
||||
// package differs, then it means that some enums may have been recently
|
||||
// registered upstream that we do not know about.
|
||||
var protoPkg protoreflect.FullName
|
||||
if i := strings.LastIndexByte(s, '.'); i >= 0 {
|
||||
protoPkg = protoreflect.FullName(s[:i])
|
||||
}
|
||||
v, _ := numFilesCache.Load(protoPkg)
|
||||
numFiles, _ := v.(int)
|
||||
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
|
||||
return nil // cache is up-to-date; was not found earlier
|
||||
}
|
||||
|
||||
// Update the enum cache for all enums declared in the given proto package.
|
||||
numFiles = 0
|
||||
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
|
||||
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
|
||||
name := protoimpl.X.LegacyEnumName(ed)
|
||||
if _, ok := enumCache.Load(name); !ok {
|
||||
m := make(enumsByName)
|
||||
evs := ed.Values()
|
||||
for i := evs.Len() - 1; i >= 0; i-- {
|
||||
ev := evs.Get(i)
|
||||
m[string(ev.Name())] = int32(ev.Number())
|
||||
}
|
||||
enumCache.LoadOrStore(name, m)
|
||||
}
|
||||
})
|
||||
numFiles++
|
||||
return true
|
||||
})
|
||||
numFilesCache.Store(protoPkg, numFiles)
|
||||
|
||||
// Check cache again for enum map.
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// walkEnums recursively walks all enums declared in d.
|
||||
func walkEnums(d interface {
|
||||
Enums() protoreflect.EnumDescriptors
|
||||
Messages() protoreflect.MessageDescriptors
|
||||
}, f func(protoreflect.EnumDescriptor)) {
|
||||
eds := d.Enums()
|
||||
for i := eds.Len() - 1; i >= 0; i-- {
|
||||
f(eds.Get(i))
|
||||
}
|
||||
mds := d.Messages()
|
||||
for i := mds.Len() - 1; i >= 0; i-- {
|
||||
walkEnums(mds.Get(i), f)
|
||||
}
|
||||
}
|
||||
|
||||
// messageName is the full name of protobuf message.
|
||||
type messageName = string
|
||||
|
||||
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
||||
|
||||
// RegisterType is called from generated code to register the message Go type
|
||||
// for a message of the given name.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
||||
func RegisterType(m Message, s messageName) {
|
||||
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
|
||||
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
messageTypeCache.Store(s, reflect.TypeOf(m))
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code to register the Go map type
|
||||
// for a protobuf message representing a map entry.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMapType(m interface{}, s messageName) {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("invalid map kind: %v", t))
|
||||
}
|
||||
if _, ok := messageTypeCache.Load(s); ok {
|
||||
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
|
||||
}
|
||||
messageTypeCache.Store(s, t)
|
||||
}
|
||||
|
||||
// MessageType returns the message type for a named message.
|
||||
// It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
||||
func MessageType(s messageName) reflect.Type {
|
||||
if v, ok := messageTypeCache.Load(s); ok {
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
|
||||
// Derive the message type from the v2 registry.
|
||||
var t reflect.Type
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
|
||||
t = messageGoType(mt)
|
||||
}
|
||||
|
||||
// If we could not get a concrete type, it is possible that it is a
|
||||
// pseudo-message for a map entry.
|
||||
if t == nil {
|
||||
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
|
||||
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
|
||||
kt := goTypeForField(md.Fields().ByNumber(1))
|
||||
vt := goTypeForField(md.Fields().ByNumber(2))
|
||||
t = reflect.MapOf(kt, vt)
|
||||
}
|
||||
}
|
||||
|
||||
// Locally cache the message type for the given name.
|
||||
if t != nil {
|
||||
v, _ := messageTypeCache.LoadOrStore(s, t)
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
|
||||
switch k := fd.Kind(); k {
|
||||
case protoreflect.EnumKind:
|
||||
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
|
||||
return enumGoType(et)
|
||||
}
|
||||
return reflect.TypeOf(protoreflect.EnumNumber(0))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
|
||||
return messageGoType(mt)
|
||||
}
|
||||
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
|
||||
default:
|
||||
return reflect.TypeOf(fd.Default().Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func enumGoType(et protoreflect.EnumType) reflect.Type {
|
||||
return reflect.TypeOf(et.New(0))
|
||||
}
|
||||
|
||||
func messageGoType(mt protoreflect.MessageType) reflect.Type {
|
||||
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
|
||||
}
|
||||
|
||||
// MessageName returns the full protobuf name for the given message type.
|
||||
//
|
||||
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
||||
func MessageName(m Message) messageName {
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
|
||||
}
|
||||
|
||||
// RegisterExtension is called from the generated code to register
|
||||
// the extension descriptor.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
||||
func RegisterExtension(d *ExtensionDesc) {
|
||||
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type extensionsByNumber = map[int32]*ExtensionDesc
|
||||
|
||||
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions for the
|
||||
// provided protobuf message, indexed by the extension field number.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
||||
func RegisteredExtensions(m Message) extensionsByNumber {
|
||||
// Check whether the cache is stale. If the number of extensions for
|
||||
// the given message differs, then it means that some extensions were
|
||||
// recently registered upstream that we do not know about.
|
||||
s := MessageName(m)
|
||||
v, _ := extensionCache.Load(s)
|
||||
xs, _ := v.(extensionsByNumber)
|
||||
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
|
||||
return xs // cache is up-to-date
|
||||
}
|
||||
|
||||
// Cache is stale, re-compute the extensions map.
|
||||
xs = make(extensionsByNumber)
|
||||
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
|
||||
if xd, ok := xt.(*ExtensionDesc); ok {
|
||||
xs[int32(xt.TypeDescriptor().Number())] = xd
|
||||
} else {
|
||||
// TODO: This implies that the protoreflect.ExtensionType is a
|
||||
// custom type not generated by protoc-gen-go. We could try and
|
||||
// convert the type to an ExtensionDesc.
|
||||
}
|
||||
return true
|
||||
})
|
||||
extensionCache.Store(s, xs)
|
||||
return xs
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,654 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Merge merges the src message into dst.
|
||||
// This assumes that dst and src of the same type and are non-nil.
|
||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||
mi := atomicLoadMergeInfo(&a.merge)
|
||||
if mi == nil {
|
||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||
atomicStoreMergeInfo(&a.merge, mi)
|
||||
}
|
||||
mi.merge(toPointer(&dst), toPointer(&src))
|
||||
}
|
||||
|
||||
type mergeInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []mergeFieldInfo
|
||||
unrecognized field // Offset of XXX_unrecognized
|
||||
}
|
||||
|
||||
type mergeFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
|
||||
// isPointer reports whether the value in the field is a pointer.
|
||||
// This is true for the following situations:
|
||||
// * Pointer to struct
|
||||
// * Pointer to basic type (proto2 only)
|
||||
// * Slice (first value in slice header is a pointer)
|
||||
// * String (first value in string header is a pointer)
|
||||
isPointer bool
|
||||
|
||||
// basicWidth reports the width of the field assuming that it is directly
|
||||
// embedded in the struct (as is the case for basic types in proto3).
|
||||
// The possible values are:
|
||||
// 0: invalid
|
||||
// 1: bool
|
||||
// 4: int32, uint32, float32
|
||||
// 8: int64, uint64, float64
|
||||
basicWidth int
|
||||
|
||||
// Where dst and src are pointers to the types being merged.
|
||||
merge func(dst, src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||
mergeInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||
mergeInfoLock.Lock()
|
||||
defer mergeInfoLock.Unlock()
|
||||
mi := mergeInfoMap[t]
|
||||
if mi == nil {
|
||||
mi = &mergeInfo{typ: t}
|
||||
mergeInfoMap[t] = mi
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||
if dst.isNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||
mi.computeMergeInfo()
|
||||
}
|
||||
|
||||
for _, fi := range mi.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||
continue
|
||||
}
|
||||
if fi.basicWidth > 0 {
|
||||
switch {
|
||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||
continue
|
||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||
continue
|
||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dfp := dst.offset(fi.field)
|
||||
fi.merge(dfp, sfp)
|
||||
}
|
||||
|
||||
// TODO: Make this faster?
|
||||
out := dst.asPointerTo(mi.typ).Elem()
|
||||
in := src.asPointerTo(mi.typ).Elem()
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if mi.unrecognized.IsValid() {
|
||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *mergeInfo) computeMergeInfo() {
|
||||
mi.lock.Lock()
|
||||
defer mi.lock.Unlock()
|
||||
if mi.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := mi.typ
|
||||
n := t.NumField()
|
||||
|
||||
props := GetProperties(t)
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
mfi := mergeFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
switch tf.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||
// As a special case, we assume slices and strings are pointers
|
||||
// since we know that the first field in the SliceSlice or
|
||||
// StringHeader is a data pointer.
|
||||
mfi.isPointer = true
|
||||
case reflect.Bool:
|
||||
mfi.basicWidth = 1
|
||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||
mfi.basicWidth = 4
|
||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||
mfi.basicWidth = 8
|
||||
}
|
||||
}
|
||||
|
||||
// Unwrap tf to get at its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic("both pointer and slice for basic type in " + tf.Name())
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Int32:
|
||||
switch {
|
||||
case isSlice: // E.g., []int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfsp := src.toInt32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfs := src.getInt32Slice()
|
||||
if sfs != nil {
|
||||
dfs := dst.getInt32Slice()
|
||||
dfs = append(dfs, sfs...)
|
||||
if dfs == nil {
|
||||
dfs = []int32{}
|
||||
}
|
||||
dst.setInt32Slice(dfs)
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfpp := src.toInt32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfp := src.getInt32Ptr()
|
||||
if sfp != nil {
|
||||
dfp := dst.getInt32Ptr()
|
||||
if dfp == nil {
|
||||
dst.setInt32Ptr(*sfp)
|
||||
} else {
|
||||
*dfp = *sfp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt32(); v != 0 {
|
||||
*dst.toInt32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
switch {
|
||||
case isSlice: // E.g., []int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toInt64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toInt64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt64(); v != 0 {
|
||||
*dst.toInt64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint32(); v != 0 {
|
||||
*dst.toUint32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint64(); v != 0 {
|
||||
*dst.toUint64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
switch {
|
||||
case isSlice: // E.g., []float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat32(); v != 0 {
|
||||
*dst.toFloat32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
switch {
|
||||
case isSlice: // E.g., []float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat64(); v != 0 {
|
||||
*dst.toFloat64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch {
|
||||
case isSlice: // E.g., []bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toBoolSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toBoolSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []bool{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toBoolPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toBoolPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Bool(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toBool(); v {
|
||||
*dst.toBool() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
switch {
|
||||
case isSlice: // E.g., []string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toStringSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toStringSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toStringPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toStringPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = String(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toString(); v != "" {
|
||||
*dst.toString() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
isProto3 := props.Prop[i].proto3
|
||||
switch {
|
||||
case isPointer:
|
||||
panic("bad pointer in byte slice case in " + tf.Name())
|
||||
case tf.Elem().Kind() != reflect.Uint8:
|
||||
panic("bad element kind in byte slice case in " + tf.Name())
|
||||
case isSlice: // E.g., [][]byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbsp := src.toBytesSlice()
|
||||
if *sbsp != nil {
|
||||
dbsp := dst.toBytesSlice()
|
||||
for _, sb := range *sbsp {
|
||||
if sb == nil {
|
||||
*dbsp = append(*dbsp, nil)
|
||||
} else {
|
||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||
}
|
||||
}
|
||||
if *dbsp == nil {
|
||||
*dbsp = [][]byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., []byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbp := src.toBytes()
|
||||
if *sbp != nil {
|
||||
dbp := dst.toBytes()
|
||||
if !isProto3 || len(*sbp) > 0 {
|
||||
*dbp = append([]byte{}, *sbp...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
if sps != nil {
|
||||
dps := dst.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
var dp pointer
|
||||
if !sp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
if dps == nil {
|
||||
dps = []pointer{}
|
||||
}
|
||||
dst.setPointerSlice(dps)
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
dp := dst.getPointer()
|
||||
if dp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
dst.setPointer(dp)
|
||||
}
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in map case in " + tf.Name())
|
||||
default: // E.g., map[K]V
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
dm := dst.asPointerTo(tf).Elem()
|
||||
if dm.IsNil() {
|
||||
dm.Set(reflect.MakeMap(tf))
|
||||
}
|
||||
|
||||
switch tf.Elem().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
default: // Basic type (e.g., string)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
du := dst.asPointerTo(tf).Elem()
|
||||
typ := su.Elem().Type()
|
||||
if du.IsNil() || du.Elem().Type() != typ {
|
||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||
}
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
dv := du.Elem().Elem().Field(0)
|
||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||
default: // Basic type (e.g., string)
|
||||
dv.Set(sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||
}
|
||||
mi.fields = append(mi.fields, mfi)
|
||||
}
|
||||
|
||||
mi.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
mi.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&mi.initialized, 1)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,845 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for writing the text protocol buffer format.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
backslashT = []byte{'\\', 't'}
|
||||
backslashDQ = []byte{'\\', '"'}
|
||||
backslashBS = []byte{'\\', '\\'}
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
WriteByte(byte) error
|
||||
}
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
ind int
|
||||
complete bool // if the current position is a complete line
|
||||
compact bool // whether to write out as a one-liner
|
||||
w writer
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
return io.WriteString(w.w, s)
|
||||
}
|
||||
// WriteString is typically called without newlines, so this
|
||||
// codepath and its copy are rare. We copy to avoid
|
||||
// duplicating all of Write's logic here.
|
||||
return w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
n, err = w.w.Write(p)
|
||||
w.complete = false
|
||||
return n, err
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
if err := w.w.WriteByte(' '); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if i+1 < len(frags) {
|
||||
if err := w.w.WriteByte('\n'); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
err := w.w.WriteByte(c)
|
||||
w.complete = c == '\n'
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *textWriter) indent() { w.ind++ }
|
||||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Print("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
}
|
||||
|
||||
func writeName(w *textWriter, props *Properties) error {
|
||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Wire != "group" {
|
||||
return w.WriteByte(':')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
fv := sv.Field(i)
|
||||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if name == "XXX_NoUnkeyedLiteral" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
// XXX_extensions map[int32]proto.Extension
|
||||
// The first is handled here;
|
||||
// the second is handled at the bottom of this function.
|
||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Field not filled in. This could be an optional field or
|
||||
// a required field that wasn't filled in. Either way, there
|
||||
// isn't anything we can show for it.
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
// Repeated field that is empty, or a bytes field that is unused.
|
||||
continue
|
||||
}
|
||||
|
||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||
// Repeated field.
|
||||
for j := 0; j < fv.Len(); j++ {
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v := fv.Index(j)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
// A nil message in a repeated field is not valid,
|
||||
// but we can handle that more gracefully than panicking.
|
||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Map {
|
||||
// Map fields are rendered as a repeated struct with key/value fields.
|
||||
keys := fv.MapKeys()
|
||||
sort.Sort(mapKeys(keys))
|
||||
for _, key := range keys {
|
||||
val := fv.MapIndex(key)
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// open struct
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
// key
|
||||
if _, err := w.WriteString("key:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// nil values aren't legal, but we can avoid panicking because of them.
|
||||
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
||||
// value
|
||||
if _, err := w.WriteString("value:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// close struct
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
||||
// empty bytes field
|
||||
continue
|
||||
}
|
||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
||||
// proto3 non-repeated scalar field; skip if zero value
|
||||
if isProto3Zero(fv) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if fv.Kind() == reflect.Interface {
|
||||
// Check if it is a oneof.
|
||||
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
||||
// fv is nil, or holds a pointer to generated struct.
|
||||
// That generated struct has exactly one field,
|
||||
// which has a protobuf struct tag.
|
||||
if fv.IsNil() {
|
||||
continue
|
||||
}
|
||||
inner := fv.Elem().Elem() // interface -> *T -> T
|
||||
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
||||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
||||
props.Parse(tag)
|
||||
// Write the value in the oneof, not the oneof itself.
|
||||
fv = inner.Field(0)
|
||||
|
||||
// Special case to cope with malformed messages gracefully:
|
||||
// If the value in the oneof is a nil pointer, don't panic
|
||||
// in writeAny.
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Use errors.New so writeAny won't render quotes.
|
||||
msg := errors.New("/* nil */")
|
||||
fv = reflect.ValueOf(&msg).Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := tm.writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if _, err := extendable(pv.Interface()); err == nil {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||
x := v.Float()
|
||||
var b []byte
|
||||
switch {
|
||||
case math.IsInf(x, 1):
|
||||
b = posInf
|
||||
case math.IsInf(x, -1):
|
||||
b = negInf
|
||||
case math.IsNaN(x):
|
||||
b = nan
|
||||
}
|
||||
if b != nil {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
// Other values are handled below.
|
||||
}
|
||||
|
||||
// We don't attempt to serialise every possible value type; only those
|
||||
// that can occur in protocol buffers.
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Bytes())); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
if err := writeString(w, v.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Struct:
|
||||
// Required/optional group/message.
|
||||
var bra, ket byte = '<', '>'
|
||||
if props != nil && props.Wire == "group" {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
if err := w.WriteByte(bra); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if v.CanAddr() {
|
||||
// Calling v.Interface on a struct causes the reflect package to
|
||||
// copy the entire struct. This is racy with the new Marshaler
|
||||
// since we atomically update the XXX_sizecache.
|
||||
//
|
||||
// Thus, we retrieve a pointer to the struct if possible to avoid
|
||||
// a race since v.Interface on the pointer doesn't copy the struct.
|
||||
//
|
||||
// If v is not addressable, then we are not worried about a race
|
||||
// since it implies that the binary Marshaler cannot possibly be
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if v.Type().Implements(textMarshalerType) {
|
||||
text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if err := tm.writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := fmt.Fprint(w, v.Interface())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalent to C's isprint.
|
||||
func isprint(c byte) bool {
|
||||
return c >= 0x20 && c < 0x7f
|
||||
}
|
||||
|
||||
// writeString writes a string in the protocol buffer text format.
|
||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||
// we treat the string as a byte sequence, and we use octal escapes.
|
||||
// These differences are to maintain interoperability with the other
|
||||
// languages' implementations of the text format.
|
||||
func writeString(w *textWriter, s string) error {
|
||||
// use WriteByte here to get any needed indent
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Loop over the bytes, not the runes.
|
||||
for i := 0; i < len(s); i++ {
|
||||
var err error
|
||||
// Divergence from C++: we don't escape apostrophes.
|
||||
// There's no need to escape them, and the C++ parser
|
||||
// copes with a naked apostrophe.
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
_, err = w.w.Write(backslashN)
|
||||
case '\r':
|
||||
_, err = w.w.Write(backslashR)
|
||||
case '\t':
|
||||
_, err = w.w.Write(backslashT)
|
||||
case '"':
|
||||
_, err = w.w.Write(backslashDQ)
|
||||
case '\\':
|
||||
_, err = w.w.Write(backslashBS)
|
||||
default:
|
||||
if isprint(c) {
|
||||
err = w.w.WriteByte(c)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b := NewBuffer(data)
|
||||
for b.index < len(b.buf) {
|
||||
x, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||
return err
|
||||
}
|
||||
wire, tag := x&7, x>>3
|
||||
if wire == WireEndGroup {
|
||||
w.unindent()
|
||||
if _, err := w.Write(endBraceNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
if wire != WireStartGroup {
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.compact || wire == WireStartGroup {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch wire {
|
||||
case WireBytes:
|
||||
buf, e := b.DecodeRawBytes(false)
|
||||
if e == nil {
|
||||
_, err = fmt.Fprintf(w, "%q", buf)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||
}
|
||||
case WireFixed32:
|
||||
x, err = b.DecodeFixed32()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireFixed64:
|
||||
x, err = b.DecodeFixed64()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireStartGroup:
|
||||
err = w.WriteByte('{')
|
||||
w.indent()
|
||||
case WireVarint:
|
||||
x, err = b.DecodeVarint()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
default:
|
||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||
if err == nil {
|
||||
_, err = fmt.Fprint(w, x)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type int32Slice []int32
|
||||
|
||||
func (s int32Slice) Len() int { return len(s) }
|
||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep, _ := extendable(pv.Interface())
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m, mu := ep.extensionsRead()
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
mu.Lock()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
mu.Unlock()
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
var desc *ExtensionDesc
|
||||
if emap != nil {
|
||||
desc = emap[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// Unknown extension.
|
||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting extension: %v", err)
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
remain := w.ind * 2
|
||||
for remain > 0 {
|
||||
n := remain
|
||||
if n > len(spaces) {
|
||||
n = len(spaces)
|
||||
}
|
||||
w.w.Write(spaces[:n])
|
||||
remain -= n
|
||||
}
|
||||
w.complete = false
|
||||
}
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
return nil
|
||||
}
|
||||
var bw *bufio.Writer
|
||||
ww, ok := w.(writer)
|
||||
if !ok {
|
||||
bw = bufio.NewWriter(w)
|
||||
ww = bw
|
||||
}
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: tm.Compact,
|
||||
}
|
||||
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
tm.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// TODO: consider removing some of the Marshal functions below.
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
|
@ -0,0 +1,801 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextUnmarshalV2 = false
|
||||
|
||||
// ParseError is returned by UnmarshalText.
|
||||
type ParseError struct {
|
||||
Message string
|
||||
|
||||
// Deprecated: Do not use.
|
||||
Line, Offset int
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() string {
|
||||
if wrapTextUnmarshalV2 {
|
||||
return e.Message
|
||||
}
|
||||
if e.Line == 1 {
|
||||
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
|
||||
}
|
||||
|
||||
// UnmarshalText parses a proto text formatted string into m.
|
||||
func UnmarshalText(s string, m Message) error {
|
||||
if u, ok := m.(encoding.TextUnmarshaler); ok {
|
||||
return u.UnmarshalText([]byte(s))
|
||||
}
|
||||
|
||||
m.Reset()
|
||||
mi := MessageV2(m)
|
||||
|
||||
if wrapTextUnmarshalV2 {
|
||||
err := prototext.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
}.Unmarshal([]byte(s), mi)
|
||||
if err != nil {
|
||||
return &ParseError{Message: err.Error()}
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
} else {
|
||||
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
seen := make(map[protoreflect.FieldNumber]bool)
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := protoreflect.Name(tok.value)
|
||||
fd := fds.ByName(name)
|
||||
switch {
|
||||
case fd == nil:
|
||||
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
|
||||
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
|
||||
fd = gd
|
||||
}
|
||||
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
|
||||
fd = nil
|
||||
case fd.IsWeak() && fd.Message().IsPlaceholder():
|
||||
fd = nil
|
||||
}
|
||||
if fd == nil {
|
||||
typeName := string(md.FullName())
|
||||
if m, ok := m.Interface().(Message); ok {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
typeName = t.Elem().String()
|
||||
}
|
||||
}
|
||||
return p.errorf("unknown field name %q in %v", name, typeName)
|
||||
}
|
||||
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
|
||||
}
|
||||
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
|
||||
return p.errorf("non-repeated field %q was repeated", fd.Name())
|
||||
}
|
||||
seen[fd.Number()] = true
|
||||
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
if v, err = p.unmarshalValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
|
||||
name, err := p.consumeExtensionOrAnyName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
|
||||
if err != nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
|
||||
}
|
||||
m2 := mt.New()
|
||||
if err := p.unmarshalMessage(m2, terminator); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := protoV2.Marshal(m2.Interface())
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
|
||||
}
|
||||
|
||||
urlFD := m.Descriptor().Fields().ByName("type_url")
|
||||
valFD := m.Descriptor().Fields().ByName("value")
|
||||
if seen[urlFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
|
||||
}
|
||||
if seen[valFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
|
||||
}
|
||||
m.Set(urlFD, protoreflect.ValueOfString(name))
|
||||
m.Set(valFD, protoreflect.ValueOfBytes(b))
|
||||
seen[urlFD.Number()] = true
|
||||
seen[valFD.Number()] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
xname := protoreflect.FullName(name)
|
||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||
if xt == nil && isMessageSet(m.Descriptor()) {
|
||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||
}
|
||||
if xt == nil {
|
||||
return p.errorf("unrecognized extension %q", name)
|
||||
}
|
||||
fd := xt.TypeDescriptor()
|
||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
|
||||
}
|
||||
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
v, err = p.unmarshalValue(v, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
return p.consumeOptionalSeparator()
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := v.List()
|
||||
var err error
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
return v, nil
|
||||
case fd.IsMap():
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order.
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
keyFD := fd.MapKey()
|
||||
valFD := fd.MapValue()
|
||||
|
||||
mv := v.Map()
|
||||
kv := keyFD.Default()
|
||||
vv := mv.NewValue()
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
var err error
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
mv.Set(kv.MapKey(), vv)
|
||||
return v, nil
|
||||
default:
|
||||
p.back()
|
||||
return p.unmarshalSingularValue(v, fd)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
return protoreflect.ValueOfBool(true), nil
|
||||
case "false", "0", "f", "False":
|
||||
return protoreflect.ValueOfBool(false), nil
|
||||
}
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfUint32(uint32(x)), nil
|
||||
}
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfUint64(uint64(x)), nil
|
||||
}
|
||||
case protoreflect.FloatKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 32); err == nil {
|
||||
return protoreflect.ValueOfFloat32(float32(x)), nil
|
||||
}
|
||||
case protoreflect.DoubleKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 64); err == nil {
|
||||
return protoreflect.ValueOfFloat64(float64(x)), nil
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfString(tok.unquoted), nil
|
||||
}
|
||||
case protoreflect.BytesKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
|
||||
}
|
||||
case protoreflect.EnumKind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
|
||||
}
|
||||
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
|
||||
if vd != nil {
|
||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||
}
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
err := p.unmarshalMessage(v.Message(), terminator)
|
||||
return v, err
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||
}
|
||||
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
if fd.Message() == nil {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
||||
// the following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in unmarshalMessage to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(rune(i)), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,560 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextMarshalV2 = false
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line)
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes the proto text format of m to w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
|
||||
b, err := tm.marshal(m)
|
||||
if len(b) > 0 {
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Text returns a proto text formatted string of m.
|
||||
func (tm *TextMarshaler) Text(m Message) string {
|
||||
b, _ := tm.marshal(m)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return []byte("<nil>"), nil
|
||||
}
|
||||
|
||||
if wrapTextMarshalV2 {
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
return m.MarshalText()
|
||||
}
|
||||
|
||||
opts := prototext.MarshalOptions{
|
||||
AllowPartial: true,
|
||||
EmitUnknown: true,
|
||||
}
|
||||
if !tm.Compact {
|
||||
opts.Indent = " "
|
||||
}
|
||||
if !tm.ExpandAny {
|
||||
opts.Resolver = (*protoregistry.Types)(nil)
|
||||
}
|
||||
return opts.Marshal(mr.Interface())
|
||||
} else {
|
||||
w := &textWriter{
|
||||
compact: tm.Compact,
|
||||
expandAny: tm.ExpandAny,
|
||||
complete: true,
|
||||
}
|
||||
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
b, err := m.MarshalText()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Write(b)
|
||||
return w.buf, nil
|
||||
}
|
||||
|
||||
err := w.writeMessage(mr)
|
||||
return w.buf, err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// MarshalText writes the proto text format of m to w.
|
||||
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// MarshalTextString returns a proto text formatted string of m.
|
||||
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
|
||||
|
||||
// CompactText writes the compact proto text format of m to w.
|
||||
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// CompactTextString returns a compact proto text formatted string of m.
|
||||
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
endBraceNewline = []byte("}\n")
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
compact bool // same as TextMarshaler.Compact
|
||||
expandAny bool // same as TextMarshaler.ExpandAny
|
||||
complete bool // whether the current position is a complete line
|
||||
indent int // indentation level; never negative
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, _ error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, p...)
|
||||
w.complete = false
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
w.buf = append(w.buf, ' ')
|
||||
n++
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
if i+1 < len(frags) {
|
||||
w.buf = append(w.buf, '\n')
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, c)
|
||||
w.complete = c == '\n'
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
|
||||
if fd.Kind() != protoreflect.GroupKind {
|
||||
w.buf = append(w.buf, fd.Name()...)
|
||||
w.WriteByte(':')
|
||||
} else {
|
||||
// Use message type name for group field name.
|
||||
w.buf = append(w.buf, fd.Message().Name()...)
|
||||
}
|
||||
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
|
||||
md := m.Descriptor()
|
||||
fdURL := md.Fields().ByName("type_url")
|
||||
fdVal := md.Fields().ByName("value")
|
||||
|
||||
url := m.Get(fdURL).String()
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
b := m.Get(fdVal).Bytes()
|
||||
m2 := mt.New()
|
||||
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
if requiresQuotes(url) {
|
||||
w.writeQuotedString(url)
|
||||
} else {
|
||||
w.Write([]byte(url))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.indent++
|
||||
}
|
||||
if err := w.writeMessage(m2); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.indent--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeMessage(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if w.expandAny && md.FullName() == "google.protobuf.Any" {
|
||||
if canExpand, err := w.writeProto3Any(m); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fds := md.Fields()
|
||||
for i := 0; i < fds.Len(); {
|
||||
fd := fds.Get(i)
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
fd = m.WhichOneof(od)
|
||||
i += od.Fields().Len()
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
if fd == nil || !m.Has(fd) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := m.Get(fd).List()
|
||||
for j := 0; j < lv.Len(); j++ {
|
||||
w.writeName(fd)
|
||||
v := lv.Get(j)
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
case fd.IsMap():
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := m.Get(fd).Map()
|
||||
|
||||
type entry struct{ key, val protoreflect.Value }
|
||||
var entries []entry
|
||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
entries = append(entries, entry{k.Value(), v})
|
||||
return true
|
||||
})
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
switch kfd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return entries[i].key.Int() < entries[j].key.Int()
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||
case protoreflect.StringKind:
|
||||
return entries[i].key.String() < entries[j].key.String()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
})
|
||||
for _, entry := range entries {
|
||||
w.writeName(fd)
|
||||
w.WriteByte('<')
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
w.writeName(kfd)
|
||||
if err := w.writeSingularValue(entry.key, kfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.writeName(vfd)
|
||||
if err := w.writeSingularValue(entry.val, vfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.indent--
|
||||
w.WriteByte('>')
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
default:
|
||||
w.writeName(fd)
|
||||
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
if b := m.GetUnknown(); len(b) > 0 {
|
||||
w.writeUnknownFields(b)
|
||||
}
|
||||
return w.writeExtensions(m)
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
switch fd.Kind() {
|
||||
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
||||
switch vf := v.Float(); {
|
||||
case math.IsInf(vf, +1):
|
||||
w.Write(posInf)
|
||||
case math.IsInf(vf, -1):
|
||||
w.Write(negInf)
|
||||
case math.IsNaN(vf):
|
||||
w.Write(nan)
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
// NOTE: This does not validate UTF-8 for historical reasons.
|
||||
w.writeQuotedString(string(v.String()))
|
||||
case protoreflect.BytesKind:
|
||||
w.writeQuotedString(string(v.Bytes()))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var bra, ket byte = '<', '>'
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
w.WriteByte(bra)
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
m := v.Message()
|
||||
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
|
||||
b, err := m2.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.Write(b)
|
||||
} else {
|
||||
w.writeMessage(m)
|
||||
}
|
||||
w.indent--
|
||||
w.WriteByte(ket)
|
||||
case protoreflect.EnumKind:
|
||||
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
|
||||
fmt.Fprint(w, ev.Name())
|
||||
} else {
|
||||
fmt.Fprint(w, v.Enum())
|
||||
}
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
||||
func (w *textWriter) writeQuotedString(s string) {
|
||||
w.WriteByte('"')
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
w.buf = append(w.buf, `\n`...)
|
||||
case '\r':
|
||||
w.buf = append(w.buf, `\r`...)
|
||||
case '\t':
|
||||
w.buf = append(w.buf, `\t`...)
|
||||
case '"':
|
||||
w.buf = append(w.buf, `\"`...)
|
||||
case '\\':
|
||||
w.buf = append(w.buf, `\\`...)
|
||||
default:
|
||||
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
|
||||
w.buf = append(w.buf, c)
|
||||
} else {
|
||||
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.WriteByte('"')
|
||||
}
|
||||
|
||||
func (w *textWriter) writeUnknownFields(b []byte) {
|
||||
if !w.compact {
|
||||
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
|
||||
}
|
||||
|
||||
for len(b) > 0 {
|
||||
num, wtyp, n := protowire.ConsumeTag(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
|
||||
if wtyp == protowire.EndGroupType {
|
||||
w.indent--
|
||||
w.Write(endBraceNewline)
|
||||
continue
|
||||
}
|
||||
fmt.Fprint(w, num)
|
||||
if wtyp != protowire.StartGroupType {
|
||||
w.WriteByte(':')
|
||||
}
|
||||
if !w.compact || wtyp == protowire.StartGroupType {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed32Type:
|
||||
v, n := protowire.ConsumeFixed32(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed64Type:
|
||||
v, n := protowire.ConsumeFixed64(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.BytesType:
|
||||
v, n := protowire.ConsumeBytes(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprintf(w, "%q", v)
|
||||
case protowire.StartGroupType:
|
||||
w.WriteByte('{')
|
||||
w.indent++
|
||||
default:
|
||||
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
// writeExtensions writes all the extensions in m.
|
||||
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if md.ExtensionRanges().Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ext struct {
|
||||
desc protoreflect.FieldDescriptor
|
||||
val protoreflect.Value
|
||||
}
|
||||
var exts []ext
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
exts = append(exts, ext{fd, v})
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(exts, func(i, j int) bool {
|
||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||
})
|
||||
|
||||
for _, ext := range exts {
|
||||
// For message set, use the name of the message as the extension name.
|
||||
name := string(ext.desc.FullName())
|
||||
if isMessageSet(ext.desc.ContainingMessage()) {
|
||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||
}
|
||||
|
||||
if !ext.desc.IsList() {
|
||||
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
lv := ext.val.List()
|
||||
for i := 0; i < lv.Len(); i++ {
|
||||
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
fmt.Fprintf(w, "[%s]:", name)
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
for i := 0; i < w.indent*2; i++ {
|
||||
w.buf = append(w.buf, ' ')
|
||||
}
|
||||
w.complete = false
|
||||
}
|
|
@ -1,880 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for parsing the Text protocol buffer format.
|
||||
// TODO: message sets.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Error string emitted when deserializing Any and fields are already set
|
||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
Offset int // 0-based byte offset from start of input
|
||||
}
|
||||
|
||||
func (p *ParseError) Error() string {
|
||||
if p.Line == 1 {
|
||||
// show offset only for first line
|
||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func (t *token) String() string {
|
||||
if t.err == nil {
|
||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||
}
|
||||
return fmt.Sprintf("parse error: %v", t.err)
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(i), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a RequiredNotSetError indicating which required field was not set.
|
||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
if !isNil(sv.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
props := sprops.Prop[i]
|
||||
if props.Required {
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||
}
|
||||
}
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||
}
|
||||
|
||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
||||
i, ok := sprops.decoderOrigNames[name]
|
||||
if ok {
|
||||
return i, sprops.Prop[i], true
|
||||
}
|
||||
return -1, nil, false
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
// Colon is optional when the field is a group or message.
|
||||
needColon := true
|
||||
switch props.Wire {
|
||||
case "group":
|
||||
needColon = false
|
||||
case "bytes":
|
||||
// A "bytes" field is either a message, a string, or a repeated field;
|
||||
// those three become *T, *string and []T respectively, so we can check for
|
||||
// this field being a pointer to a non-string.
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
// *T or *string
|
||||
if typ.Elem().Kind() == reflect.String {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.Slice {
|
||||
// []T or []*T
|
||||
if typ.Elem().Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.String {
|
||||
// The proto3 exception is for a string field,
|
||||
// which requires a colon.
|
||||
break
|
||||
}
|
||||
needColon = false
|
||||
}
|
||||
if needColon {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
reqCount := sprops.reqCount
|
||||
var reqFieldErr error
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension or an Any.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
if fieldSet["type_url"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||
}
|
||||
if fieldSet["value"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
fieldSet["type_url"] = true
|
||||
fieldSet["value"] = true
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == extName {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
props.Parse(desc.Tag)
|
||||
|
||||
typ := reflect.TypeOf(desc.ExtensionType)
|
||||
if err := p.checkForColon(props, typ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rep := desc.repeated()
|
||||
|
||||
// Read the extension structure, and set it in
|
||||
// the value we're constructing.
|
||||
var ext reflect.Value
|
||||
if !rep {
|
||||
ext = reflect.New(typ).Elem()
|
||||
} else {
|
||||
ext = reflect.New(typ.Elem()).Elem()
|
||||
}
|
||||
if err := p.readAny(ext, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(Message)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
old, err := GetExtension(ep, desc)
|
||||
var sl reflect.Value
|
||||
if err == nil {
|
||||
sl = reflect.ValueOf(old) // existing slice
|
||||
} else {
|
||||
sl = reflect.MakeSlice(typ, 0, 1)
|
||||
}
|
||||
sl = reflect.Append(sl, ext)
|
||||
SetExtension(ep, desc, sl.Interface())
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := tok.value
|
||||
var dst reflect.Value
|
||||
fi, props, ok := structFieldByName(sprops, name)
|
||||
if ok {
|
||||
dst = sv.Field(fi)
|
||||
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
||||
// It is a oneof.
|
||||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
field := sv.Field(oop.Field)
|
||||
if !field.IsNil() {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||
}
|
||||
field.Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
}
|
||||
|
||||
if dst.Kind() == reflect.Map {
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the map if it doesn't already exist.
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
key := reflect.New(dst.Type().Key()).Elem()
|
||||
val := reflect.New(dst.Type().Elem()).Elem()
|
||||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order. See b/28924776 for a time
|
||||
// this went wrong.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it's not already set if it's not a repeated field.
|
||||
if !props.Repeated && fieldSet[name] {
|
||||
return p.errorf("non-repeated field %q was repeated", name)
|
||||
}
|
||||
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
fieldSet[name] = true
|
||||
if err := p.readAny(dst, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if reqCount > 0 {
|
||||
return p.missingRequiredFieldError(sv)
|
||||
}
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
if at.Elem().Kind() == reflect.Uint8 {
|
||||
// Special case for []byte
|
||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||
// Deliberately written out here, as the error after
|
||||
// this switch statement would write "invalid []byte: ...",
|
||||
// which is not as user-friendly.
|
||||
return p.errorf("invalid string: %v", tok.value)
|
||||
}
|
||||
bytes := []byte(tok.unquoted)
|
||||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field.
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// true/1/t/True or false/f/0/False.
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0", "f", "False":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v := tok.value
|
||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||
// remove 'f' when the value is "-inf" or "inf".
|
||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||
fv.SetFloat(f)
|
||||
return nil
|
||||
}
|
||||
case reflect.Int32:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(props.Enum) == 0 {
|
||||
break
|
||||
}
|
||||
m, ok := enumValueMaps[props.Enum]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
x, ok := m[tok.value]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fv.SetInt(int64(x))
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// A basic field (indirected through pointer), or a repeated message/group
|
||||
p.back()
|
||||
fv.Set(reflect.New(fv.Type().Elem()))
|
||||
return p.readAny(fv.Elem(), props)
|
||||
case reflect.String:
|
||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||
fv.SetString(tok.unquoted)
|
||||
return nil
|
||||
}
|
||||
case reflect.Struct:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||
}
|
||||
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
// If a required field is not set and no other error occurs,
|
||||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
return um.UnmarshalText([]byte(s))
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
return newTextParser(s).readStruct(v.Elem(), "")
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
)
|
||||
|
||||
// Size returns the size in bytes of the wire-format encoding of m.
|
||||
func Size(m Message) int {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
return protoV2.Size(mi)
|
||||
}
|
||||
|
||||
// Marshal returns the wire-format encoding of m.
|
||||
func Marshal(m Message) ([]byte, error) {
|
||||
b, err := marshalAppend(nil, m, false)
|
||||
if b == nil {
|
||||
b = zeroBytes
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
var zeroBytes = make([]byte, 0, 0)
|
||||
|
||||
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, ErrNil
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
nbuf, err := protoV2.MarshalOptions{
|
||||
Deterministic: deterministic,
|
||||
AllowPartial: true,
|
||||
}.MarshalAppend(buf, mi)
|
||||
if err != nil {
|
||||
return buf, err
|
||||
}
|
||||
if len(buf) == len(nbuf) {
|
||||
if !mi.ProtoReflect().IsValid() {
|
||||
return buf, ErrNil
|
||||
}
|
||||
}
|
||||
return nbuf, checkRequiredNotSet(mi)
|
||||
}
|
||||
|
||||
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
||||
//
|
||||
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
||||
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
||||
func Unmarshal(b []byte, m Message) error {
|
||||
m.Reset()
|
||||
return UnmarshalMerge(b, m)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
||||
func UnmarshalMerge(b []byte, m Message) error {
|
||||
mi := MessageV2(m)
|
||||
out, err := protoV2.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
Merge: true,
|
||||
}.UnmarshalState(protoiface.UnmarshalInput{
|
||||
Buf: b,
|
||||
Message: mi.ProtoReflect(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out.Flags&protoiface.UnmarshalInitialized > 0 {
|
||||
return nil
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
// Bool stores v in a new bool value and returns a pointer to it.
|
||||
func Bool(v bool) *bool { return &v }
|
||||
|
||||
// Int stores v in a new int32 value and returns a pointer to it.
|
||||
//
|
||||
// Deprecated: Use Int32 instead.
|
||||
func Int(v int) *int32 { return Int32(int32(v)) }
|
||||
|
||||
// Int32 stores v in a new int32 value and returns a pointer to it.
|
||||
func Int32(v int32) *int32 { return &v }
|
||||
|
||||
// Int64 stores v in a new int64 value and returns a pointer to it.
|
||||
func Int64(v int64) *int64 { return &v }
|
||||
|
||||
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 { return &v }
|
||||
|
||||
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 { return &v }
|
||||
|
||||
// Float32 stores v in a new float32 value and returns a pointer to it.
|
||||
func Float32(v float32) *float32 { return &v }
|
||||
|
||||
// Float64 stores v in a new float64 value and returns a pointer to it.
|
||||
func Float64(v float64) *float64 { return &v }
|
||||
|
||||
// String stores v in a new string value and returns a pointer to it.
|
||||
func String(v string) *string { return &v }
|
|
@ -1,141 +1,179 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
const urlPrefix = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
// AnyMessageName returns the message name contained in an anypb.Any message.
|
||||
// Most type assertions should use the Is function instead.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *any.Any) (string, error) {
|
||||
// Deprecated: Call the any.MessageName method instead.
|
||||
func AnyMessageName(any *anypb.Any) (string, error) {
|
||||
name, err := anyMessageName(any)
|
||||
return string(name), err
|
||||
}
|
||||
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
name := protoreflect.FullName(any.TypeUrl)
|
||||
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
|
||||
name = name[i+len("/"):]
|
||||
}
|
||||
if !name.IsValid() {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*any.Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
// MarshalAny marshals the given message m into an anypb.Any message.
|
||||
//
|
||||
// Deprecated: Call the anypb.New function instead.
|
||||
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
||||
switch dm := m.(type) {
|
||||
case DynamicAny:
|
||||
m = dm.Message
|
||||
case *DynamicAny:
|
||||
if dm == nil {
|
||||
return nil, proto.ErrNil
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
|
||||
}
|
||||
|
||||
// Empty returns a new message of the type specified in an anypb.Any message.
|
||||
// It returns protoregistry.NotFound if the corresponding message type could not
|
||||
// be resolved in the global registry.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
|
||||
// to resolve the message name and create a new instance of it.
|
||||
func Empty(any *anypb.Any) (proto.Message, error) {
|
||||
name, err := anyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.MessageV1(mt.New().Interface()), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
||||
// into the provided message m. It returns an error if the target message
|
||||
// does not match the type in the Any message or if an unmarshal error occurs.
|
||||
//
|
||||
// The target message m may be a *DynamicAny message. If the underlying message
|
||||
// type could not be resolved, then this returns protoregistry.NotFound.
|
||||
//
|
||||
// Deprecated: Call the any.UnmarshalTo method instead.
|
||||
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
||||
if dm, ok := m.(*DynamicAny); ok {
|
||||
if dm.Message == nil {
|
||||
var err error
|
||||
dm.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
|
||||
anyName, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgName := proto.MessageName(m)
|
||||
if anyName != msgName {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, m)
|
||||
}
|
||||
|
||||
// Is reports whether the Any message contains a message of the specified type.
|
||||
//
|
||||
// Deprecated: Call the any.MessageIs method instead.
|
||||
func Is(any *anypb.Any, m proto.Message) bool {
|
||||
if any == nil || m == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(m)
|
||||
if !strings.HasSuffix(any.TypeUrl, name) {
|
||||
return false
|
||||
}
|
||||
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
// allocate a proto.Message for the type specified in an anypb.Any message.
|
||||
// The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func Empty(any *any.Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *any.Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
|
||||
// the any message contents into a new instance of the underlying message.
|
||||
type DynamicAny struct{ proto.Message }
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
func (m DynamicAny) String() string {
|
||||
if m.Message == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
return m.Message.String()
|
||||
}
|
||||
func (m DynamicAny) Reset() {
|
||||
if m.Message == nil {
|
||||
return
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
m.Message.Reset()
|
||||
}
|
||||
func (m DynamicAny) ProtoMessage() {
|
||||
return
|
||||
}
|
||||
func (m DynamicAny) ProtoReflect() protoreflect.Message {
|
||||
if m.Message == nil {
|
||||
return nil
|
||||
}
|
||||
return dynamicAny{proto.MessageReflect(m.Message)}
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *any.Any, pb proto.Message) bool {
|
||||
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
|
||||
// but it avoids scanning TypeUrl for the slash.
|
||||
if any == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(pb)
|
||||
prefix := len(any.TypeUrl) - len(name)
|
||||
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
|
||||
type dynamicAny struct{ protoreflect.Message }
|
||||
|
||||
func (m dynamicAny) Type() protoreflect.MessageType {
|
||||
return dynamicAnyType{m.Message.Type()}
|
||||
}
|
||||
func (m dynamicAny) New() protoreflect.Message {
|
||||
return dynamicAnyType{m.Message.Type()}.New()
|
||||
}
|
||||
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
|
||||
return DynamicAny{proto.MessageV1(m.Message.Interface())}
|
||||
}
|
||||
|
||||
type dynamicAnyType struct{ protoreflect.MessageType }
|
||||
|
||||
func (t dynamicAnyType) New() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.New()}
|
||||
}
|
||||
func (t dynamicAnyType) Zero() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.Zero()}
|
||||
}
|
||||
|
|
|
@ -1,203 +1,62 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/any.proto
|
||||
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||
|
||||
package any
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
// Symbols defined in public import of google/protobuf/any.proto.
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
type Any = anypb.Any
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := ptypes.MarshalAny(foo)
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. This string must contain at least
|
||||
// one "/" character. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
|
||||
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
|
||||
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
|
||||
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
func (m *Any) Reset() { *m = Any{} }
|
||||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b53526c13ae22eb4, []int{0}
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
|
||||
func (m *Any) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Any.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Any) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Any.Merge(m, src)
|
||||
}
|
||||
func (m *Any) XXX_Size() int {
|
||||
return xxx_messageInfo_Any.Size(m)
|
||||
}
|
||||
func (m *Any) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Any.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Any proto.InternalMessageInfo
|
||||
|
||||
func (m *Any) GetTypeUrl() string {
|
||||
if m != nil {
|
||||
return m.TypeUrl
|
||||
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
|
||||
return
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Any) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4)
|
||||
}
|
||||
|
||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
||||
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
|
||||
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
|
||||
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
|
||||
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
|
||||
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
|
||||
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
|
||||
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
|
||||
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
|
||||
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
|
||||
}
|
||||
|
|
|
@ -1,155 +0,0 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "AnyProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := ptypes.MarshalAny(foo)
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
message Any {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. This string must contain at least
|
||||
// one "/" character. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
string type_url = 1;
|
||||
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
bytes value = 2;
|
||||
}
|
|
@ -1,35 +1,10 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package ptypes contains code for interacting with well-known types.
|
||||
*/
|
||||
// Package ptypes provides functionality for interacting with well-known types.
|
||||
//
|
||||
// Deprecated: Well-known types have specialized functionality directly
|
||||
// injected into the generated packages for each message type.
|
||||
// See the deprecation notice for each function for the suggested alternative.
|
||||
package ptypes
|
||||
|
|
|
@ -1,102 +1,76 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||
durationpb "github.com/golang/protobuf/ptypes/duration"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in duration.proto.
|
||||
// This is about 10,000 years in seconds.
|
||||
const (
|
||||
// Range of a durpb.Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the durpb.Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid durpb.Duration
|
||||
// may still be too large to fit into a time.Duration (the range of durpb.Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *durpb.Duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Duration converts a durpb.Duration to a time.Duration. Duration
|
||||
// returns an error if the durpb.Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func Duration(p *durpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
// Duration converts a durationpb.Duration to a time.Duration.
|
||||
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
||||
//
|
||||
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
|
||||
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(dur); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
d := time.Duration(dur.Seconds) * time.Second
|
||||
if int64(d/time.Second) != dur.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
if dur.Nanos != 0 {
|
||||
d += time.Duration(dur.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (dur.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a durpb.Duration.
|
||||
func DurationProto(d time.Duration) *durpb.Duration {
|
||||
// DurationProto converts a time.Duration to a durationpb.Duration.
|
||||
//
|
||||
// Deprecated: Call the durationpb.New function instead.
|
||||
func DurationProto(d time.Duration) *durationpb.Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &durpb.Duration{
|
||||
Seconds: secs,
|
||||
return &durationpb.Duration{
|
||||
Seconds: int64(secs),
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
||||
|
||||
// validateDuration determines whether the durationpb.Duration is valid
|
||||
// according to the definition in google/protobuf/duration.proto.
|
||||
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
||||
// Note that the range of durationpb.Duration is about 10,000 years,
|
||||
// while the range of time.Duration is about 290 years.
|
||||
func validateDuration(dur *durationpb.Duration) error {
|
||||
if dur == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", dur)
|
||||
}
|
||||
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", dur)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,163 +1,63 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/duration.proto
|
||||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
|
||||
package duration
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
// Symbols defined in public import of google/protobuf/duration.proto.
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
type Duration = durationpb.Duration
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
|
||||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
|
||||
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Duration.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Duration) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Duration.Merge(m, src)
|
||||
}
|
||||
func (m *Duration) XXX_Size() int {
|
||||
return xxx_messageInfo_Duration.Size(m)
|
||||
}
|
||||
func (m *Duration) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Duration.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||
|
||||
func (m *Duration) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
|
||||
return
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Duration) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5)
|
||||
}
|
||||
|
||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||
// 190 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
||||
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
|
||||
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
|
||||
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
|
||||
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
|
||||
0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
|
||||
0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
|
||||
0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
|
||||
0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
|
||||
0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
|
||||
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
|
||||
}
|
||||
|
|
|
@ -1,116 +0,0 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/duration";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DurationProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
message Duration {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
|
@ -1,46 +1,18 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
|
@ -50,17 +22,80 @@ const (
|
|||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return
|
||||
// value is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
|
||||
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.Now function instead.
|
||||
func TimestampNow() *timestamppb.Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.New function instead.
|
||||
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
||||
ts := ×tamppb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
||||
// For invalid Timestamps, it returns an error message in parentheses.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime method instead,
|
||||
// followed by a call to the Format method on the time.Time value.
|
||||
func TimestampString(ts *timestamppb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
||||
// and has a Nanos field in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
// Otherwise, it returns an error that describes the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *tspb.Timestamp) error {
|
||||
// Every valid Timestamp can be represented by a time.Time,
|
||||
// but the converse is not true.
|
||||
func validateTimestamp(ts *timestamppb.Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
|
@ -75,58 +110,3 @@ func validateTimestamp(ts *tspb.Timestamp) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
func TimestampNow() *tspb.Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
|
||||
ts := &tspb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||
// Timestamps, it returns an error message in parentheses.
|
||||
func TimestampString(ts *tspb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
|
|
@ -1,185 +1,64 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/timestamp.proto
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
type Timestamp = timestamppb.Timestamp
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
//
|
||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||
// second table is needed for interpretation, using a [24-hour linear
|
||||
// smear](https://developers.google.com/time/smear).
|
||||
//
|
||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// timestamp = Timestamp()
|
||||
// timestamp.GetCurrentTime()
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Timestamp type is encoded as a string in the
|
||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||
// where {year} is always expressed using four digits while {month}, {day},
|
||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard
|
||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using
|
||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_292007bbfe81227e, []int{0}
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||
|
||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Timestamp) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Timestamp.Merge(m, src)
|
||||
}
|
||||
func (m *Timestamp) XXX_Size() int {
|
||||
return xxx_messageInfo_Timestamp.Size(m)
|
||||
}
|
||||
func (m *Timestamp) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
||||
|
||||
func (m *Timestamp) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
||||
return
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Timestamp) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e)
|
||||
}
|
||||
|
||||
var fileDescriptor_292007bbfe81227e = []byte{
|
||||
// 191 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
|
||||
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
|
||||
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
|
||||
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
|
||||
0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
|
||||
0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
|
||||
0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
|
||||
0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
|
||||
0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
|
||||
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
||||
}
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "TimestampProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
//
|
||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||
// second table is needed for interpretation, using a [24-hour linear
|
||||
// smear](https://developers.google.com/time/smear).
|
||||
//
|
||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// timestamp = Timestamp()
|
||||
// timestamp.GetCurrentTime()
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Timestamp type is encoded as a string in the
|
||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||
// where {year} is always expressed using four digits while {month}, {day},
|
||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard
|
||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using
|
||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
message Timestamp {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
|
@ -16,16 +16,19 @@ This repository contains the generated Go packages for common protocol buffer
|
|||
types, and the generated [gRPC][1] code necessary for interacting with Google's gRPC
|
||||
APIs.
|
||||
|
||||
There are two sources for the proto files used in this repository:
|
||||
The sources for the proto files used in this repository:
|
||||
|
||||
1. [google/protobuf][2]: the code in the `protobuf` and `ptypes` subdirectories
|
||||
is derived from this repo. The messages in `protobuf` are used to describe
|
||||
protocol buffer messages themselves. The messages under `ptypes` define the
|
||||
common well-known types.
|
||||
2. [googleapis/googleapis][3]: the code in the `googleapis` is derived from this
|
||||
* [googleapis/googleapis][2]: the code in the `googleapis` is derived from this
|
||||
repo. The packages here contain types specifically for interacting with Google
|
||||
APIs.
|
||||
|
||||
Historically, the packages in the `protobuf` directory used to contain
|
||||
generated code for certain well-known types hosted by [google/protobuf][3].
|
||||
These types are now hosted by the [`google.golang.org/protobuf`][4] module
|
||||
and type aliases are used to forward declarations in this module over to
|
||||
declarations in the `protobuf` module.
|
||||
|
||||
[1]: http://grpc.io
|
||||
[2]: https://github.com/google/protobuf/
|
||||
[3]: https://github.com/googleapis/googleapis/
|
||||
[2]: https://github.com/googleapis/googleapis/
|
||||
[3]: https://github.com/google/protobuf/
|
||||
[4]: https://pkg.go.dev/mod/google.golang.org/protobuf
|
||||
|
|
|
@ -3,9 +3,9 @@ module google.golang.org/genproto
|
|||
go 1.11
|
||||
|
||||
require (
|
||||
github.com/golang/protobuf v1.3.3
|
||||
github.com/golang/protobuf v1.4.1
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
|
||||
google.golang.org/grpc v1.27.0
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
|
||||
google.golang.org/protobuf v1.24.0
|
||||
)
|
||||
|
|
|
@ -1,26 +1,45 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.13.0
|
||||
// source: google/rpc/status.proto
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
math "math"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
any "github.com/golang/protobuf/ptypes/any"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
// The `Status` type defines a logical error model that is suitable for
|
||||
// different programming environments, including REST APIs and RPC APIs. It is
|
||||
|
@ -30,6 +49,10 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||
// You can find out more about this error model and how to work with it in the
|
||||
// [API Design Guide](https://cloud.google.com/apis/design/errors).
|
||||
type Status struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
// A developer-facing error message, which should be in English. Any
|
||||
|
@ -38,80 +61,146 @@ type Status struct {
|
|||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
// A list of messages that carry the error details. There is a common set of
|
||||
// message types for APIs to use.
|
||||
Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Status) Reset() {
|
||||
*x = Status{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_rpc_status_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Status) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (m *Status) Reset() { *m = Status{} }
|
||||
func (m *Status) String() string { return proto.CompactTextString(m) }
|
||||
func (*Status) ProtoMessage() {}
|
||||
|
||||
func (x *Status) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_rpc_status_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Status.ProtoReflect.Descriptor instead.
|
||||
func (*Status) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_24d244abaf643bfe, []int{0}
|
||||
return file_google_rpc_status_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *Status) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Status.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Status.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Status) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Status.Merge(m, src)
|
||||
}
|
||||
func (m *Status) XXX_Size() int {
|
||||
return xxx_messageInfo_Status.Size(m)
|
||||
}
|
||||
func (m *Status) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Status.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Status proto.InternalMessageInfo
|
||||
|
||||
func (m *Status) GetCode() int32 {
|
||||
if m != nil {
|
||||
return m.Code
|
||||
func (x *Status) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Status) GetMessage() string {
|
||||
if m != nil {
|
||||
return m.Message
|
||||
func (x *Status) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Status) GetDetails() []*any.Any {
|
||||
if m != nil {
|
||||
return m.Details
|
||||
func (x *Status) GetDetails() []*anypb.Any {
|
||||
if x != nil {
|
||||
return x.Details
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Status)(nil), "google.rpc.Status")
|
||||
var File_google_rpc_status_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_rpc_status_proto_rawDesc = []byte{
|
||||
0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61,
|
||||
0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18,
|
||||
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61,
|
||||
0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
|
||||
0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74,
|
||||
0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
|
||||
0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74,
|
||||
0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe)
|
||||
var (
|
||||
file_google_rpc_status_proto_rawDescOnce sync.Once
|
||||
file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_rpc_status_proto_rawDescGZIP() []byte {
|
||||
file_google_rpc_status_proto_rawDescOnce.Do(func() {
|
||||
file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData)
|
||||
})
|
||||
return file_google_rpc_status_proto_rawDescData
|
||||
}
|
||||
|
||||
var fileDescriptor_24d244abaf643bfe = []byte{
|
||||
// 212 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
|
||||
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
|
||||
0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
|
||||
0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
|
||||
0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
|
||||
0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
|
||||
0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
|
||||
0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x44, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
|
||||
0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
|
||||
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
|
||||
0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x7e, 0x30, 0x32, 0x2e, 0x62, 0x62, 0x0e, 0x0a,
|
||||
0x70, 0x4e, 0x62, 0x03, 0x2b, 0x36, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x28, 0x45, 0xb1,
|
||||
0x13, 0x01, 0x00, 0x00,
|
||||
var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_rpc_status_proto_goTypes = []interface{}{
|
||||
(*Status)(nil), // 0: google.rpc.Status
|
||||
(*anypb.Any)(nil), // 1: google.protobuf.Any
|
||||
}
|
||||
var file_google_rpc_status_proto_depIdxs = []int32{
|
||||
1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_rpc_status_proto_init() }
|
||||
func file_google_rpc_status_proto_init() {
|
||||
if File_google_rpc_status_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Status); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_rpc_status_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_rpc_status_proto_goTypes,
|
||||
DependencyIndexes: file_google_rpc_status_proto_depIdxs,
|
||||
MessageInfos: file_google_rpc_status_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_rpc_status_proto = out.File
|
||||
file_google_rpc_status_proto_rawDesc = nil
|
||||
file_google_rpc_status_proto_goTypes = nil
|
||||
file_google_rpc_status_proto_depIdxs = nil
|
||||
}
|
||||
|
|
|
@ -1,64 +1,53 @@
|
|||
# gRPC-Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
|
||||
[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
|
||||
[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
|
||||
[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
|
||||
|
||||
The Go implementation of [gRPC](https://grpc.io/): A high performance, open
|
||||
source, general RPC framework that puts mobile and HTTP/2 first. For more
|
||||
information see the [gRPC Quick Start:
|
||||
Go](https://grpc.io/docs/quickstart/go.html) guide.
|
||||
The [Go][] implementation of [gRPC][]: A high performance, open source, general
|
||||
RPC framework that puts mobile and HTTP/2 first. For more information see the
|
||||
[Go gRPC docs][], or jump directly into the [quick start][].
|
||||
|
||||
Installation
|
||||
------------
|
||||
## Prerequisites
|
||||
|
||||
To install this package, you need to install Go and setup your Go workspace on
|
||||
your computer. The simplest way to install the library is to run:
|
||||
- **[Go][]**: any one of the **three latest major** [releases][go-releases].
|
||||
|
||||
## Installation
|
||||
|
||||
With [Go module][] support (Go 1.11+), simply add the following import
|
||||
|
||||
```go
|
||||
import "google.golang.org/grpc"
|
||||
```
|
||||
|
||||
to your code, and then `go [build|run|test]` will automatically fetch the
|
||||
necessary dependencies.
|
||||
|
||||
Otherwise, to install the `grpc-go` package, run the following command:
|
||||
|
||||
```console
|
||||
$ go get -u google.golang.org/grpc
|
||||
```
|
||||
|
||||
With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in
|
||||
your source code and `go [build|run|test]` will automatically download the
|
||||
necessary dependencies ([Go modules
|
||||
ref](https://github.com/golang/go/wiki/Modules)).
|
||||
> **Note:** If you are trying to access `grpc-go` from **China**, see the
|
||||
> [FAQ](#FAQ) below.
|
||||
|
||||
If you are trying to access grpc-go from within China, please see the
|
||||
[FAQ](#FAQ) below.
|
||||
## Learn more
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
gRPC-Go requires Go 1.9 or later.
|
||||
- [Go gRPC docs][], which include a [quick start][] and [API
|
||||
reference][API] among other resources
|
||||
- [Low-level technical docs](Documentation) from this repository
|
||||
- [Performance benchmark][]
|
||||
- [Examples](examples)
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
|
||||
descriptions.
|
||||
- Documentation on specific topics can be found in the [Documentation
|
||||
directory](Documentation/).
|
||||
- Examples can be found in the [examples directory](examples/).
|
||||
## FAQ
|
||||
|
||||
Performance
|
||||
-----------
|
||||
Performance benchmark data for grpc-go and other languages is maintained in
|
||||
[this
|
||||
dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
|
||||
|
||||
Status
|
||||
------
|
||||
General Availability [Google Cloud Platform Launch
|
||||
Stages](https://cloud.google.com/terms/launch-stages).
|
||||
|
||||
FAQ
|
||||
---
|
||||
|
||||
#### I/O Timeout Errors
|
||||
### I/O Timeout Errors
|
||||
|
||||
The `golang.org` domain may be blocked from some countries. `go get` usually
|
||||
produces an error like the following when this happens:
|
||||
|
||||
```
|
||||
```console
|
||||
$ go get -u google.golang.org/grpc
|
||||
package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
|
||||
```
|
||||
|
@ -69,7 +58,7 @@ To build Go code, there are several options:
|
|||
|
||||
- Without Go module support: `git clone` the repo manually:
|
||||
|
||||
```
|
||||
```sh
|
||||
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
|
||||
```
|
||||
|
||||
|
@ -79,7 +68,7 @@ To build Go code, there are several options:
|
|||
- With Go module support: it is possible to use the `replace` feature of `go
|
||||
mod` to create aliases for golang.org packages. In your project's directory:
|
||||
|
||||
```
|
||||
```sh
|
||||
go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
|
@ -87,35 +76,66 @@ To build Go code, there are several options:
|
|||
```
|
||||
|
||||
Again, this will need to be done for all transitive dependencies hosted on
|
||||
golang.org as well. Please refer to [this
|
||||
issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
|
||||
this concern.
|
||||
golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652).
|
||||
|
||||
#### Compiling error, undefined: grpc.SupportPackageIsVersion
|
||||
### Compiling error, undefined: grpc.SupportPackageIsVersion
|
||||
|
||||
Please update proto package, gRPC package and rebuild the proto files:
|
||||
- `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
|
||||
- `go get -u google.golang.org/grpc`
|
||||
- `protoc --go_out=plugins=grpc:. *.proto`
|
||||
#### If you are using Go modules:
|
||||
|
||||
#### How to turn on logging
|
||||
Ensure your gRPC-Go version is `require`d at the appropriate version in
|
||||
the same module containing the generated `.pb.go` files. For example,
|
||||
`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
|
||||
|
||||
The default logger is controlled by the environment variables. Turn everything
|
||||
on by setting:
|
||||
```go
|
||||
module <your module name>
|
||||
|
||||
```
|
||||
GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
|
||||
require (
|
||||
google.golang.org/grpc v1.27.0
|
||||
)
|
||||
```
|
||||
|
||||
#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
|
||||
#### If you are *not* using Go modules:
|
||||
|
||||
Update the `proto` package, gRPC package, and rebuild the `.proto` files:
|
||||
|
||||
```sh
|
||||
go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
|
||||
go get -u google.golang.org/grpc
|
||||
protoc --go_out=plugins=grpc:. *.proto
|
||||
```
|
||||
|
||||
### How to turn on logging
|
||||
|
||||
The default logger is controlled by environment variables. Turn everything on
|
||||
like this:
|
||||
|
||||
```console
|
||||
$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
|
||||
$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
|
||||
```
|
||||
|
||||
### The RPC failed with error `"code = Unavailable desc = transport is closing"`
|
||||
|
||||
This error means the connection the RPC is using was closed, and there are many
|
||||
possible reasons, including:
|
||||
1. mis-configured transport credentials, connection failed on handshaking
|
||||
1. bytes disrupted, possibly by a proxy in between
|
||||
1. server shutdown
|
||||
1. Keepalive parameters caused connection shutdown, for example if you have configured
|
||||
your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
|
||||
If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
|
||||
to allow longer RPC calls to finish.
|
||||
|
||||
It can be tricky to debug this because the error happens on the client side but
|
||||
the root cause of the connection being closed is on the server side. Turn on
|
||||
logging on __both client and server__, and see if there are any transport
|
||||
errors.
|
||||
|
||||
[API]: https://pkg.go.dev/google.golang.org/grpc
|
||||
[Go]: https://golang.org
|
||||
[Go module]: https://github.com/golang/go/wiki/Modules
|
||||
[gRPC]: https://grpc.io
|
||||
[Go gRPC docs]: https://grpc.io/docs/languages/go
|
||||
[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696
|
||||
[quick start]: https://grpc.io/docs/languages/go/quickstart
|
||||
[go-releases]: https://golang.org/doc/devel/release.html
|
||||
|
|
|
@ -19,7 +19,10 @@
|
|||
// Package attributes defines a generic key/value store used in various gRPC
|
||||
// components.
|
||||
//
|
||||
// All APIs in this package are EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
package attributes
|
||||
|
||||
import "fmt"
|
||||
|
@ -50,6 +53,9 @@ func New(kvs ...interface{}) *Attributes {
|
|||
// times, the last value overwrites all previous values for that key. To
|
||||
// remove an existing key, use a nil value.
|
||||
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
|
||||
if a == nil {
|
||||
return New(kvs...)
|
||||
}
|
||||
if len(kvs)%2 != 0 {
|
||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||
}
|
||||
|
@ -66,5 +72,8 @@ func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
|
|||
// Value returns the value associated with these attributes for key, or nil if
|
||||
// no value is associated with key.
|
||||
func (a *Attributes) Value(key interface{}) interface{} {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
return a.m[key]
|
||||
}
|
||||
|
|
|
@ -48,7 +48,10 @@ type BackoffConfig struct {
|
|||
// here for more details:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type ConnectParams struct {
|
||||
// Backoff specifies the configuration options for connection backoff.
|
||||
Backoff backoff.Config
|
||||
|
|
|
@ -1,391 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Address represents a server the client connects to.
|
||||
//
|
||||
// Deprecated: please use package balancer.
|
||||
type Address struct {
|
||||
// Addr is the server address on which a connection will be established.
|
||||
Addr string
|
||||
// Metadata is the information associated with Addr, which may be used
|
||||
// to make load balancing decision.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// BalancerConfig specifies the configurations for Balancer.
|
||||
//
|
||||
// Deprecated: please use package balancer. May be removed in a future 1.x release.
|
||||
type BalancerConfig struct {
|
||||
// DialCreds is the transport credential the Balancer implementation can
|
||||
// use to dial to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it does not need to talk to another party securely.
|
||||
DialCreds credentials.TransportCredentials
|
||||
// Dialer is the custom dialer the Balancer implementation can use to dial
|
||||
// to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it doesn't need to talk to remote balancer.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// BalancerGetOptions configures a Get call.
|
||||
//
|
||||
// Deprecated: please use package balancer. May be removed in a future 1.x release.
|
||||
type BalancerGetOptions struct {
|
||||
// BlockingWait specifies whether Get should block when there is no
|
||||
// connected address.
|
||||
BlockingWait bool
|
||||
}
|
||||
|
||||
// Balancer chooses network addresses for RPCs.
|
||||
//
|
||||
// Deprecated: please use package balancer. May be removed in a future 1.x release.
|
||||
type Balancer interface {
|
||||
// Start does the initialization work to bootstrap a Balancer. For example,
|
||||
// this function may start the name resolution and watch the updates. It will
|
||||
// be called when dialing.
|
||||
Start(target string, config BalancerConfig) error
|
||||
// Up informs the Balancer that gRPC has a connection to the server at
|
||||
// addr. It returns down which is called once the connection to addr gets
|
||||
// lost or closed.
|
||||
// TODO: It is not clear how to construct and take advantage of the meaningful error
|
||||
// parameter for down. Need realistic demands to guide.
|
||||
Up(addr Address) (down func(error))
|
||||
// Get gets the address of a server for the RPC corresponding to ctx.
|
||||
// i) If it returns a connected address, gRPC internals issues the RPC on the
|
||||
// connection to this address;
|
||||
// ii) If it returns an address on which the connection is under construction
|
||||
// (initiated by Notify(...)) but not connected, gRPC internals
|
||||
// * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
|
||||
// Shutdown state;
|
||||
// or
|
||||
// * issues RPC on the connection otherwise.
|
||||
// iii) If it returns an address on which the connection does not exist, gRPC
|
||||
// internals treats it as an error and will fail the corresponding RPC.
|
||||
//
|
||||
// Therefore, the following is the recommended rule when writing a custom Balancer.
|
||||
// If opts.BlockingWait is true, it should return a connected address or
|
||||
// block if there is no connected address. It should respect the timeout or
|
||||
// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
|
||||
// RPCs), it should return an address it has notified via Notify(...) immediately
|
||||
// instead of blocking.
|
||||
//
|
||||
// The function returns put which is called once the rpc has completed or failed.
|
||||
// put can collect and report RPC stats to a remote load balancer.
|
||||
//
|
||||
// This function should only return the errors Balancer cannot recover by itself.
|
||||
// gRPC internals will fail the RPC if an error is returned.
|
||||
Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
|
||||
// Notify returns a channel that is used by gRPC internals to watch the addresses
|
||||
// gRPC needs to connect. The addresses might be from a name resolver or remote
|
||||
// load balancer. gRPC internals will compare it with the existing connected
|
||||
// addresses. If the address Balancer notified is not in the existing connected
|
||||
// addresses, gRPC starts to connect the address. If an address in the existing
|
||||
// connected addresses is not in the notification list, the corresponding connection
|
||||
// is shutdown gracefully. Otherwise, there are no operations to take. Note that
|
||||
// the Address slice must be the full list of the Addresses which should be connected.
|
||||
// It is NOT delta.
|
||||
Notify() <-chan []Address
|
||||
// Close shuts down the balancer.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
||||
// the name resolution updates and updates the addresses available correspondingly.
|
||||
//
|
||||
// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
|
||||
func RoundRobin(r naming.Resolver) Balancer {
|
||||
return &roundRobin{r: r}
|
||||
}
|
||||
|
||||
type addrInfo struct {
|
||||
addr Address
|
||||
connected bool
|
||||
}
|
||||
|
||||
type roundRobin struct {
|
||||
r naming.Resolver
|
||||
w naming.Watcher
|
||||
addrs []*addrInfo // all the addresses the client should potentially connect
|
||||
mu sync.Mutex
|
||||
addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
|
||||
next int // index of the next address to return for Get()
|
||||
waitCh chan struct{} // the channel to block when there is no connected address available
|
||||
done bool // The Balancer is closed.
|
||||
}
|
||||
|
||||
func (rr *roundRobin) watchAddrUpdates() error {
|
||||
updates, err := rr.w.Next()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
|
||||
return err
|
||||
}
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
for _, update := range updates {
|
||||
addr := Address{
|
||||
Addr: update.Addr,
|
||||
Metadata: update.Metadata,
|
||||
}
|
||||
switch update.Op {
|
||||
case naming.Add:
|
||||
var exist bool
|
||||
for _, v := range rr.addrs {
|
||||
if addr == v.addr {
|
||||
exist = true
|
||||
grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
|
||||
break
|
||||
}
|
||||
}
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
|
||||
case naming.Delete:
|
||||
for i, v := range rr.addrs {
|
||||
if addr == v.addr {
|
||||
copy(rr.addrs[i:], rr.addrs[i+1:])
|
||||
rr.addrs = rr.addrs[:len(rr.addrs)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
grpclog.Errorln("Unknown update.Op ", update.Op)
|
||||
}
|
||||
}
|
||||
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
|
||||
open := make([]Address, len(rr.addrs))
|
||||
for i, v := range rr.addrs {
|
||||
open[i] = v.addr
|
||||
}
|
||||
if rr.done {
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
select {
|
||||
case <-rr.addrCh:
|
||||
default:
|
||||
}
|
||||
rr.addrCh <- open
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rr *roundRobin) Start(target string, config BalancerConfig) error {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
if rr.done {
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
if rr.r == nil {
|
||||
// If there is no name resolver installed, it is not needed to
|
||||
// do name resolution. In this case, target is added into rr.addrs
|
||||
// as the only address available and rr.addrCh stays nil.
|
||||
rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
|
||||
return nil
|
||||
}
|
||||
w, err := rr.r.Resolve(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rr.w = w
|
||||
rr.addrCh = make(chan []Address, 1)
|
||||
go func() {
|
||||
for {
|
||||
if err := rr.watchAddrUpdates(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Up sets the connected state of addr and sends notification if there are pending
|
||||
// Get() calls.
|
||||
func (rr *roundRobin) Up(addr Address) func(error) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
var cnt int
|
||||
for _, a := range rr.addrs {
|
||||
if a.addr == addr {
|
||||
if a.connected {
|
||||
return nil
|
||||
}
|
||||
a.connected = true
|
||||
}
|
||||
if a.connected {
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
// addr is only one which is connected. Notify the Get() callers who are blocking.
|
||||
if cnt == 1 && rr.waitCh != nil {
|
||||
close(rr.waitCh)
|
||||
rr.waitCh = nil
|
||||
}
|
||||
return func(err error) {
|
||||
rr.down(addr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// down unsets the connected state of addr.
|
||||
func (rr *roundRobin) down(addr Address, err error) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
for _, a := range rr.addrs {
|
||||
if addr == a.addr {
|
||||
a.connected = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the next addr in the rotation.
|
||||
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
||||
var ch chan struct{}
|
||||
rr.mu.Lock()
|
||||
if rr.done {
|
||||
rr.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
|
||||
if len(rr.addrs) > 0 {
|
||||
if rr.next >= len(rr.addrs) {
|
||||
rr.next = 0
|
||||
}
|
||||
next := rr.next
|
||||
for {
|
||||
a := rr.addrs[next]
|
||||
next = (next + 1) % len(rr.addrs)
|
||||
if a.connected {
|
||||
addr = a.addr
|
||||
rr.next = next
|
||||
rr.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if next == rr.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
if len(rr.addrs) == 0 {
|
||||
rr.mu.Unlock()
|
||||
err = status.Errorf(codes.Unavailable, "there is no address available")
|
||||
return
|
||||
}
|
||||
// Returns the next addr on rr.addrs for failfast RPCs.
|
||||
addr = rr.addrs[rr.next].addr
|
||||
rr.next++
|
||||
rr.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// Wait on rr.waitCh for non-failfast RPCs.
|
||||
if rr.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
rr.waitCh = ch
|
||||
} else {
|
||||
ch = rr.waitCh
|
||||
}
|
||||
rr.mu.Unlock()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
case <-ch:
|
||||
rr.mu.Lock()
|
||||
if rr.done {
|
||||
rr.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
|
||||
if len(rr.addrs) > 0 {
|
||||
if rr.next >= len(rr.addrs) {
|
||||
rr.next = 0
|
||||
}
|
||||
next := rr.next
|
||||
for {
|
||||
a := rr.addrs[next]
|
||||
next = (next + 1) % len(rr.addrs)
|
||||
if a.connected {
|
||||
addr = a.addr
|
||||
rr.next = next
|
||||
rr.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if next == rr.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// The newly added addr got removed by Down() again.
|
||||
if rr.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
rr.waitCh = ch
|
||||
} else {
|
||||
ch = rr.waitCh
|
||||
}
|
||||
rr.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rr *roundRobin) Notify() <-chan []Address {
|
||||
return rr.addrCh
|
||||
}
|
||||
|
||||
func (rr *roundRobin) Close() error {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
if rr.done {
|
||||
return errBalancerClosed
|
||||
}
|
||||
rr.done = true
|
||||
if rr.w != nil {
|
||||
rr.w.Close()
|
||||
}
|
||||
if rr.waitCh != nil {
|
||||
close(rr.waitCh)
|
||||
rr.waitCh = nil
|
||||
}
|
||||
if rr.addrCh != nil {
|
||||
close(rr.addrCh)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
|
||||
// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
|
||||
// returns the only address Up by resetTransport().
|
||||
type pickFirst struct {
|
||||
*roundRobin
|
||||
}
|
|
@ -101,6 +101,9 @@ type SubConn interface {
|
|||
// a new connection will be created.
|
||||
//
|
||||
// This will trigger a state transition for the SubConn.
|
||||
//
|
||||
// Deprecated: This method is now part of the ClientConn interface and will
|
||||
// eventually be removed from here.
|
||||
UpdateAddresses([]resolver.Address)
|
||||
// Connect starts the connecting for this SubConn.
|
||||
Connect()
|
||||
|
@ -111,6 +114,9 @@ type NewSubConnOptions struct {
|
|||
// CredsBundle is the credentials bundle that will be used in the created
|
||||
// SubConn. If it's nil, the original creds from grpc DialOptions will be
|
||||
// used.
|
||||
//
|
||||
// Deprecated: Use the Attributes field in resolver.Address to pass
|
||||
// arbitrary data to the credential handshaker.
|
||||
CredsBundle credentials.Bundle
|
||||
// HealthCheckEnabled indicates whether health check service should be
|
||||
// enabled on this SubConn
|
||||
|
@ -123,7 +129,7 @@ type State struct {
|
|||
// determine the state of the ClientConn.
|
||||
ConnectivityState connectivity.State
|
||||
// Picker is used to choose connections (SubConns) for RPCs.
|
||||
Picker V2Picker
|
||||
Picker Picker
|
||||
}
|
||||
|
||||
// ClientConn represents a gRPC ClientConn.
|
||||
|
@ -140,21 +146,19 @@ type ClientConn interface {
|
|||
// RemoveSubConn removes the SubConn from ClientConn.
|
||||
// The SubConn will be shutdown.
|
||||
RemoveSubConn(SubConn)
|
||||
|
||||
// UpdateBalancerState is called by balancer to notify gRPC that some internal
|
||||
// state in balancer has changed.
|
||||
// UpdateAddresses updates the addresses used in the passed in SubConn.
|
||||
// gRPC checks if the currently connected address is still in the new list.
|
||||
// If so, the connection will be kept. Else, the connection will be
|
||||
// gracefully closed, and a new connection will be created.
|
||||
//
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
||||
// on the new picker to pick new SubConn.
|
||||
//
|
||||
// Deprecated: use UpdateState instead
|
||||
UpdateBalancerState(s connectivity.State, p Picker)
|
||||
// This will trigger a state transition for the SubConn.
|
||||
UpdateAddresses(SubConn, []resolver.Address)
|
||||
|
||||
// UpdateState notifies gRPC that the balancer's internal state has
|
||||
// changed.
|
||||
//
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
||||
// on the new picker to pick new SubConns.
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call
|
||||
// Pick on the new Picker to pick new SubConns.
|
||||
UpdateState(State)
|
||||
|
||||
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
||||
|
@ -180,6 +184,10 @@ type BuildOptions struct {
|
|||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// ChannelzParentID is the entity parent's channelz unique identification number.
|
||||
ChannelzParentID int64
|
||||
// CustomUserAgent is the custom user agent set on the parent ClientConn.
|
||||
// The balancer should set the same custom user agent if it creates a
|
||||
// ClientConn.
|
||||
CustomUserAgent string
|
||||
// Target contains the parsed address info of the dial target. It is the same resolver.Target as
|
||||
// passed to the resolver.
|
||||
// See the documentation for the resolver.Target type for details about what it contains.
|
||||
|
@ -232,56 +240,17 @@ type DoneInfo struct {
|
|||
|
||||
var (
|
||||
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
|
||||
// gRPC will block the RPC until a new picker is available via UpdateBalancerState().
|
||||
// gRPC will block the RPC until a new picker is available via UpdateState().
|
||||
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
||||
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
||||
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
||||
ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
|
||||
//
|
||||
// Deprecated: return an appropriate error based on the last resolution or
|
||||
// connection attempt instead. The behavior is the same for any non-gRPC
|
||||
// status error.
|
||||
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
|
||||
)
|
||||
|
||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||
//
|
||||
// Deprecated: use V2Picker instead
|
||||
type Picker interface {
|
||||
// Pick returns the SubConn to be used to send the RPC.
|
||||
// The returned SubConn must be one returned by NewSubConn().
|
||||
//
|
||||
// This functions is expected to return:
|
||||
// - a SubConn that is known to be READY;
|
||||
// - ErrNoSubConnAvailable if no SubConn is available, but progress is being
|
||||
// made (for example, some SubConn is in CONNECTING mode);
|
||||
// - other errors if no active connecting is happening (for example, all SubConn
|
||||
// are in TRANSIENT_FAILURE mode).
|
||||
//
|
||||
// If a SubConn is returned:
|
||||
// - If it is READY, gRPC will send the RPC on it;
|
||||
// - If it is not ready, or becomes not ready after it's returned, gRPC will
|
||||
// block until UpdateBalancerState() is called and will call pick on the
|
||||
// new picker. The done function returned from Pick(), if not nil, will be
|
||||
// called with nil error, no bytes sent and no bytes received.
|
||||
//
|
||||
// If the returned error is not nil:
|
||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
||||
// - If the error is ErrTransientFailure or implements IsTransientFailure()
|
||||
// bool, returning true:
|
||||
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
|
||||
// is called to pick again;
|
||||
// - Otherwise, RPC will fail with unavailable error.
|
||||
// - Else (error is other non-nil error):
|
||||
// - The RPC will fail with the error's status code, or Unknown if it is
|
||||
// not a status error.
|
||||
//
|
||||
// The returned done() function will be called once the rpc has finished,
|
||||
// with the final status of that RPC. If the SubConn returned is not a
|
||||
// valid SubConn type, done may not be called. done may be nil if balancer
|
||||
// doesn't care about the RPC status.
|
||||
Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
|
||||
}
|
||||
|
||||
// PickResult contains information related to a connection chosen for an RPC.
|
||||
type PickResult struct {
|
||||
// SubConn is the connection to use for this pick, if its state is Ready.
|
||||
|
@ -297,24 +266,19 @@ type PickResult struct {
|
|||
Done func(DoneInfo)
|
||||
}
|
||||
|
||||
type transientFailureError struct {
|
||||
error
|
||||
}
|
||||
// TransientFailureError returns e. It exists for backward compatibility and
|
||||
// will be deleted soon.
|
||||
//
|
||||
// Deprecated: no longer necessary, picker errors are treated this way by
|
||||
// default.
|
||||
func TransientFailureError(e error) error { return e }
|
||||
|
||||
func (e *transientFailureError) IsTransientFailure() bool { return true }
|
||||
|
||||
// TransientFailureError wraps err in an error implementing
|
||||
// IsTransientFailure() bool, returning true.
|
||||
func TransientFailureError(err error) error {
|
||||
return &transientFailureError{error: err}
|
||||
}
|
||||
|
||||
// V2Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||
type V2Picker interface {
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateState().
|
||||
type Picker interface {
|
||||
// Pick returns the connection to use for this RPC and related information.
|
||||
//
|
||||
// Pick should not block. If the balancer needs to do I/O or any blocking
|
||||
|
@ -327,14 +291,13 @@ type V2Picker interface {
|
|||
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
|
||||
// Picker is provided by the balancer (using ClientConn.UpdateState).
|
||||
//
|
||||
// - If the error implements IsTransientFailure() bool, returning true,
|
||||
// wait for ready RPCs will wait, but non-wait for ready RPCs will be
|
||||
// terminated with this error's Error() string and status code
|
||||
// Unavailable.
|
||||
// - If the error is a status error (implemented by the grpc/status
|
||||
// package), gRPC will terminate the RPC with the code and message
|
||||
// provided.
|
||||
//
|
||||
// - Any other errors terminate all RPCs with the code and message
|
||||
// provided. If the error is not a status error, it will be converted by
|
||||
// gRPC to a status error with code Unknown.
|
||||
// - For all other errors, wait for ready RPCs will wait, but non-wait for
|
||||
// ready RPCs will be terminated with this error's Error() string and
|
||||
// status code Unavailable.
|
||||
Pick(info PickInfo) (PickResult, error)
|
||||
}
|
||||
|
||||
|
@ -343,29 +306,21 @@ type V2Picker interface {
|
|||
//
|
||||
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
|
||||
//
|
||||
// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
|
||||
// to be called synchronously from the same goroutine.
|
||||
// There's no guarantee on picker.Pick, it may be called anytime.
|
||||
// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
|
||||
// guaranteed to be called synchronously from the same goroutine. There's no
|
||||
// guarantee on picker.Pick, it may be called anytime.
|
||||
type Balancer interface {
|
||||
// HandleSubConnStateChange is called by gRPC when the connectivity state
|
||||
// of sc has changed.
|
||||
// Balancer is expected to aggregate all the state of SubConn and report
|
||||
// that back to gRPC.
|
||||
// Balancer should also generate and update Pickers when its internal state has
|
||||
// been changed by the new state.
|
||||
//
|
||||
// Deprecated: if V2Balancer is implemented by the Balancer,
|
||||
// UpdateSubConnState will be called instead.
|
||||
HandleSubConnStateChange(sc SubConn, state connectivity.State)
|
||||
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
|
||||
// balancers.
|
||||
// Balancer can create new SubConn or remove SubConn with the addresses.
|
||||
// An empty address slice and a non-nil error will be passed if the resolver returns
|
||||
// non-nil error to gRPC.
|
||||
//
|
||||
// Deprecated: if V2Balancer is implemented by the Balancer,
|
||||
// UpdateClientConnState will be called instead.
|
||||
HandleResolvedAddrs([]resolver.Address, error)
|
||||
// UpdateClientConnState is called by gRPC when the state of the ClientConn
|
||||
// changes. If the error returned is ErrBadResolverState, the ClientConn
|
||||
// will begin calling ResolveNow on the active name resolver with
|
||||
// exponential backoff until a subsequent call to UpdateClientConnState
|
||||
// returns a nil error. Any other errors are currently ignored.
|
||||
UpdateClientConnState(ClientConnState) error
|
||||
// ResolverError is called by gRPC when the name resolver reports an error.
|
||||
ResolverError(error)
|
||||
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||
// changes.
|
||||
UpdateSubConnState(SubConn, SubConnState)
|
||||
// Close closes the balancer. The balancer is not required to call
|
||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||
Close()
|
||||
|
@ -393,27 +348,6 @@ type ClientConnState struct {
|
|||
// problem with the provided name resolver data.
|
||||
var ErrBadResolverState = errors.New("bad resolver state")
|
||||
|
||||
// V2Balancer is defined for documentation purposes. If a Balancer also
|
||||
// implements V2Balancer, its UpdateClientConnState method will be called
|
||||
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
|
||||
// instead of HandleSubConnStateChange.
|
||||
type V2Balancer interface {
|
||||
// UpdateClientConnState is called by gRPC when the state of the ClientConn
|
||||
// changes. If the error returned is ErrBadResolverState, the ClientConn
|
||||
// will begin calling ResolveNow on the active name resolver with
|
||||
// exponential backoff until a subsequent call to UpdateClientConnState
|
||||
// returns a nil error. Any other errors are currently ignored.
|
||||
UpdateClientConnState(ClientConnState) error
|
||||
// ResolverError is called by gRPC when the name resolver reports an error.
|
||||
ResolverError(error)
|
||||
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||
// changes.
|
||||
UpdateSubConnState(SubConn, SubConnState)
|
||||
// Close closes the balancer. The balancer is not required to call
|
||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||
Close()
|
||||
}
|
||||
|
||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||
// and returns one aggregated connectivity state.
|
||||
//
|
||||
|
|
|
@ -19,20 +19,21 @@
|
|||
package base
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("balancer")
|
||||
|
||||
type baseBuilder struct {
|
||||
name string
|
||||
pickerBuilder PickerBuilder
|
||||
v2PickerBuilder V2PickerBuilder
|
||||
config Config
|
||||
}
|
||||
|
||||
|
@ -40,9 +41,8 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
|
|||
bal := &baseBalancer{
|
||||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
v2PickerBuilder: bb.v2PickerBuilder,
|
||||
|
||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||
subConns: make(map[resolver.Address]subConnInfo),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
config: bb.config,
|
||||
|
@ -50,11 +50,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
|
|||
// Initialize picker to a picker that always returns
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
// may call UpdateState with this picker.
|
||||
if bb.pickerBuilder != nil {
|
||||
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
} else {
|
||||
bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
||||
}
|
||||
return bal
|
||||
}
|
||||
|
||||
|
@ -62,88 +58,111 @@ func (bb *baseBuilder) Name() string {
|
|||
return bb.name
|
||||
}
|
||||
|
||||
var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
|
||||
type subConnInfo struct {
|
||||
subConn balancer.SubConn
|
||||
attrs *attributes.Attributes
|
||||
}
|
||||
|
||||
type baseBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
pickerBuilder PickerBuilder
|
||||
v2PickerBuilder V2PickerBuilder
|
||||
|
||||
csEvltr *balancer.ConnectivityStateEvaluator
|
||||
state connectivity.State
|
||||
|
||||
subConns map[resolver.Address]balancer.SubConn
|
||||
subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses)
|
||||
scStates map[balancer.SubConn]connectivity.State
|
||||
picker balancer.Picker
|
||||
v2Picker balancer.V2Picker
|
||||
config Config
|
||||
|
||||
resolverErr error // the last error reported by the resolver; cleared on successful resolution
|
||||
connErr error // the last connection error; cleared upon leaving TransientFailure
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (b *baseBalancer) ResolverError(err error) {
|
||||
b.resolverErr = err
|
||||
if len(b.subConns) == 0 {
|
||||
b.state = connectivity.TransientFailure
|
||||
}
|
||||
|
||||
if b.state != connectivity.TransientFailure {
|
||||
// The picker will not change since the balancer does not currently
|
||||
// report an error.
|
||||
return
|
||||
}
|
||||
b.regeneratePicker()
|
||||
if b.picker != nil {
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
} else {
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: b.state,
|
||||
Picker: b.v2Picker,
|
||||
Picker: b.picker,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
|
||||
// TODO: handle s.ResolverState.ServiceConfig?
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
|
||||
}
|
||||
if len(s.ResolverState.Addresses) == 0 {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
if logger.V(2) {
|
||||
logger.Info("base.baseBalancer: got new ClientConn state: ", s)
|
||||
}
|
||||
// Successful resolution; clear resolver error and ensure we return nil.
|
||||
b.resolverErr = nil
|
||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||
addrsSet := make(map[resolver.Address]struct{})
|
||||
for _, a := range s.ResolverState.Addresses {
|
||||
addrsSet[a] = struct{}{}
|
||||
if _, ok := b.subConns[a]; !ok {
|
||||
// Strip attributes from addresses before using them as map keys. So
|
||||
// that when two addresses only differ in attributes pointers (but with
|
||||
// the same attribute content), they are considered the same address.
|
||||
//
|
||||
// Note that this doesn't handle the case where the attribute content is
|
||||
// different. So if users want to set different attributes to create
|
||||
// duplicate connections to the same backend, it doesn't work. This is
|
||||
// fine for now, because duplicate is done by setting Metadata today.
|
||||
//
|
||||
// TODO: read attributes to handle duplicate connections.
|
||||
aNoAttrs := a
|
||||
aNoAttrs.Attributes = nil
|
||||
addrsSet[aNoAttrs] = struct{}{}
|
||||
if scInfo, ok := b.subConns[aNoAttrs]; !ok {
|
||||
// a is a new address (not existing in b.subConns).
|
||||
//
|
||||
// When creating SubConn, the original address with attributes is
|
||||
// passed through. So that connection configurations in attributes
|
||||
// (like creds) will be used.
|
||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
|
||||
if err != nil {
|
||||
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||
logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||
continue
|
||||
}
|
||||
b.subConns[a] = sc
|
||||
b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes}
|
||||
b.scStates[sc] = connectivity.Idle
|
||||
sc.Connect()
|
||||
} else {
|
||||
// Always update the subconn's address in case the attributes
|
||||
// changed.
|
||||
//
|
||||
// The SubConn does a reflect.DeepEqual of the new and old
|
||||
// addresses. So this is a noop if the current address is the same
|
||||
// as the old one (including attributes).
|
||||
scInfo.attrs = a.Attributes
|
||||
b.subConns[aNoAttrs] = scInfo
|
||||
b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a})
|
||||
}
|
||||
}
|
||||
for a, sc := range b.subConns {
|
||||
for a, scInfo := range b.subConns {
|
||||
// a was removed by resolver.
|
||||
if _, ok := addrsSet[a]; !ok {
|
||||
b.cc.RemoveSubConn(sc)
|
||||
b.cc.RemoveSubConn(scInfo.subConn)
|
||||
delete(b.subConns, a)
|
||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||
// The entry will be deleted in HandleSubConnStateChange.
|
||||
// The entry will be deleted in UpdateSubConnState.
|
||||
}
|
||||
}
|
||||
// If resolver state contains no addresses, return an error so ClientConn
|
||||
// will trigger re-resolve. Also records this as an resolver error, so when
|
||||
// the overall state turns transient failure, the error message will have
|
||||
// the zero address information.
|
||||
if len(s.ResolverState.Addresses) == 0 {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -167,52 +186,39 @@ func (b *baseBalancer) mergeErrors() error {
|
|||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
func (b *baseBalancer) regeneratePicker() {
|
||||
if b.state == connectivity.TransientFailure {
|
||||
if b.pickerBuilder != nil {
|
||||
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
||||
} else {
|
||||
b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
|
||||
}
|
||||
b.picker = NewErrPicker(b.mergeErrors())
|
||||
return
|
||||
}
|
||||
if b.pickerBuilder != nil {
|
||||
readySCs := make(map[resolver.Address]balancer.SubConn)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[addr] = sc
|
||||
}
|
||||
}
|
||||
b.picker = b.pickerBuilder.Build(readySCs)
|
||||
} else {
|
||||
readySCs := make(map[balancer.SubConn]SubConnInfo)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[sc] = SubConnInfo{Address: addr}
|
||||
for addr, scInfo := range b.subConns {
|
||||
if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready {
|
||||
addr.Attributes = scInfo.attrs
|
||||
readySCs[scInfo.subConn] = SubConnInfo{Address: addr}
|
||||
}
|
||||
}
|
||||
b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
panic("not implemented")
|
||||
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||
}
|
||||
|
||||
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
s := state.ConnectivityState
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||
if logger.V(2) {
|
||||
logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||
}
|
||||
oldS, ok := b.scStates[sc]
|
||||
if !ok {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
||||
if logger.V(2) {
|
||||
logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
||||
}
|
||||
return
|
||||
}
|
||||
if oldS == connectivity.TransientFailure && s == connectivity.Connecting {
|
||||
// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent
|
||||
// CONNECTING transitions to prevent the aggregated state from being
|
||||
// always CONNECTING when many backends exist but are all down.
|
||||
return
|
||||
}
|
||||
b.scStates[sc] = s
|
||||
switch s {
|
||||
case connectivity.Idle:
|
||||
|
@ -221,29 +227,23 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
|||
// When an address was removed by resolver, b called RemoveSubConn but
|
||||
// kept the sc's state in scStates. Remove state for this sc here.
|
||||
delete(b.scStates, sc)
|
||||
case connectivity.TransientFailure:
|
||||
// Save error to be reported via picker.
|
||||
b.connErr = state.ConnectionError
|
||||
}
|
||||
|
||||
oldAggrState := b.state
|
||||
b.state = b.csEvltr.RecordTransition(oldS, s)
|
||||
|
||||
// Set or clear the last connection error accordingly.
|
||||
b.connErr = state.ConnectionError
|
||||
|
||||
// Regenerate picker when one of the following happens:
|
||||
// - this sc became ready from not-ready
|
||||
// - this sc became not-ready from ready
|
||||
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
||||
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
||||
// - this sc entered or left ready
|
||||
// - the aggregated state of balancer is TransientFailure
|
||||
// (may need to update error message)
|
||||
if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
|
||||
(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
|
||||
b.state == connectivity.TransientFailure {
|
||||
b.regeneratePicker()
|
||||
}
|
||||
|
||||
if b.picker != nil {
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
} else {
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
|
||||
}
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
|
||||
}
|
||||
|
||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||
|
@ -251,28 +251,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
|||
func (b *baseBalancer) Close() {
|
||||
}
|
||||
|
||||
// NewErrPicker returns a picker that always returns err on Pick().
|
||||
// NewErrPicker returns a Picker that always returns err on Pick().
|
||||
func NewErrPicker(err error) balancer.Picker {
|
||||
return &errPicker{err: err}
|
||||
}
|
||||
|
||||
// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
|
||||
//
|
||||
// Deprecated: use NewErrPicker instead.
|
||||
var NewErrPickerV2 = NewErrPicker
|
||||
|
||||
type errPicker struct {
|
||||
err error // Pick() always returns this err.
|
||||
}
|
||||
|
||||
func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
|
||||
// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
|
||||
func NewErrPickerV2(err error) balancer.V2Picker {
|
||||
return &errPickerV2{err: err}
|
||||
}
|
||||
|
||||
type errPickerV2 struct {
|
||||
err error // Pick() always returns this err.
|
||||
}
|
||||
|
||||
func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
return balancer.PickResult{}, p.err
|
||||
}
|
||||
|
|
|
@ -37,15 +37,8 @@ import (
|
|||
|
||||
// PickerBuilder creates balancer.Picker.
|
||||
type PickerBuilder interface {
|
||||
// Build takes a slice of ready SubConns, and returns a picker that will be
|
||||
// used by gRPC to pick a SubConn.
|
||||
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
|
||||
}
|
||||
|
||||
// V2PickerBuilder creates balancer.V2Picker.
|
||||
type V2PickerBuilder interface {
|
||||
// Build returns a picker that will be used by gRPC to pick a SubConn.
|
||||
Build(info PickerBuildInfo) balancer.V2Picker
|
||||
Build(info PickerBuildInfo) balancer.Picker
|
||||
}
|
||||
|
||||
// PickerBuildInfo contains information needed by the picker builder to
|
||||
|
@ -62,32 +55,17 @@ type SubConnInfo struct {
|
|||
Address resolver.Address // the address used to create this SubConn
|
||||
}
|
||||
|
||||
// NewBalancerBuilder returns a balancer builder. The balancers
|
||||
// built by this builder will use the picker builder to build pickers.
|
||||
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
||||
return NewBalancerBuilderWithConfig(name, pb, Config{})
|
||||
}
|
||||
|
||||
// Config contains the config info about the base balancer builder.
|
||||
type Config struct {
|
||||
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
|
||||
HealthCheck bool
|
||||
}
|
||||
|
||||
// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
|
||||
func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
|
||||
// NewBalancerBuilder returns a base balancer builder configured by the provided config.
|
||||
func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
|
||||
return &baseBuilder{
|
||||
name: name,
|
||||
pickerBuilder: pb,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
|
||||
func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
|
||||
return &baseBuilder{
|
||||
name: name,
|
||||
v2PickerBuilder: pb,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package state declares grpclb types to be set by resolvers wishing to pass
|
||||
// information to grpclb via resolver.State Attributes.
|
||||
package state
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// keyType is the key to use for storing State in Attributes.
|
||||
type keyType string
|
||||
|
||||
const key = keyType("grpc.grpclb.state")
|
||||
|
||||
// State contains gRPCLB-relevant data passed from the name resolver.
|
||||
type State struct {
|
||||
// BalancerAddresses contains the remote load balancer address(es). If
|
||||
// set, overrides any resolver-provided addresses with Type of GRPCLB.
|
||||
BalancerAddresses []resolver.Address
|
||||
}
|
||||
|
||||
// Set returns a copy of the provided state with attributes containing s. s's
|
||||
// data should not be mutated after calling Set.
|
||||
func Set(state resolver.State, s *State) resolver.State {
|
||||
state.Attributes = state.Attributes.WithValues(key, s)
|
||||
return state
|
||||
}
|
||||
|
||||
// Get returns the grpclb State in the resolver.State, or nil if not present.
|
||||
// The returned data should not be mutated.
|
||||
func Get(state resolver.State) *State {
|
||||
s, _ := state.Attributes.Value(key).(*State)
|
||||
return s
|
||||
}
|
|
@ -33,9 +33,11 @@ import (
|
|||
// Name is the name of round_robin balancer.
|
||||
const Name = "round_robin"
|
||||
|
||||
var logger = grpclog.Component("roundrobin")
|
||||
|
||||
// newBuilder creates a new roundrobin balancer builder.
|
||||
func newBuilder() balancer.Builder {
|
||||
return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||
return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -44,10 +46,10 @@ func init() {
|
|||
|
||||
type rrPickerBuilder struct{}
|
||||
|
||||
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
|
||||
grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
||||
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
||||
logger.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
||||
if len(info.ReadySCs) == 0 {
|
||||
return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
||||
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
}
|
||||
var scs []balancer.SubConn
|
||||
for sc := range info.ReadySCs {
|
||||
|
|
|
@ -24,8 +24,8 @@ import (
|
|||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
@ -74,11 +74,7 @@ func (ccb *ccBalancerWrapper) watcher() {
|
|||
}
|
||||
ccb.balancerMu.Lock()
|
||||
su := t.(*scStateUpdate)
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
|
||||
} else {
|
||||
ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
|
||||
}
|
||||
ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
|
||||
ccb.balancerMu.Unlock()
|
||||
case <-ccb.done.Done():
|
||||
}
|
||||
|
@ -123,19 +119,13 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
|
|||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
ccb.balancerMu.Lock()
|
||||
defer ccb.balancerMu.Unlock()
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
return ub.UpdateClientConnState(*ccs)
|
||||
}
|
||||
ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
|
||||
return nil
|
||||
return ccb.balancer.UpdateClientConnState(*ccs)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
ccb.balancerMu.Lock()
|
||||
ub.ResolverError(err)
|
||||
ccb.balancer.ResolverError(err)
|
||||
ccb.balancerMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
|
@ -173,19 +163,12 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
|||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Update picker before updating state. Even though the ordering here does
|
||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
// updated later, we could call the "connecting" picker when the state is
|
||||
// updated, and then call the "ready" picker after the picker gets updated.
|
||||
ccb.cc.blockingpicker.updatePicker(p)
|
||||
ccb.cc.csMgr.updateState(s)
|
||||
acbw.UpdateAddresses(addrs)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
|
@ -199,7 +182,7 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
|||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
// updated later, we could call the "connecting" picker when the state is
|
||||
// updated, and then call the "ready" picker after the picker gets updated.
|
||||
ccb.cc.blockingpicker.updatePickerV2(s.Picker)
|
||||
ccb.cc.blockingpicker.updatePicker(s.Picker)
|
||||
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
||||
}
|
||||
|
||||
|
@ -222,7 +205,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
|||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
acbw.ac.tearDown(errConnDrain)
|
||||
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
return
|
||||
}
|
||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||
|
@ -237,7 +220,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
|||
acbw.ac.acbw = nil
|
||||
acbw.ac.mu.Unlock()
|
||||
acState := acbw.ac.getState()
|
||||
acbw.ac.tearDown(errConnDrain)
|
||||
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
|
||||
if acState == connectivity.Shutdown {
|
||||
return
|
||||
|
@ -245,7 +228,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
|||
|
||||
ac, err := cc.newAddrConn(addrs, opts)
|
||||
if err != nil {
|
||||
grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
return
|
||||
}
|
||||
acbw.ac = ac
|
||||
|
|
|
@ -1,334 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
type balancerWrapperBuilder struct {
|
||||
b Balancer // The v1 balancer.
|
||||
}
|
||||
|
||||
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
|
||||
bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
|
||||
DialCreds: opts.DialCreds,
|
||||
Dialer: opts.Dialer,
|
||||
})
|
||||
_, pickfirst := bwb.b.(*pickFirst)
|
||||
bw := &balancerWrapper{
|
||||
balancer: bwb.b,
|
||||
pickfirst: pickfirst,
|
||||
cc: cc,
|
||||
targetAddr: opts.Target.Endpoint,
|
||||
startCh: make(chan struct{}),
|
||||
conns: make(map[resolver.Address]balancer.SubConn),
|
||||
connSt: make(map[balancer.SubConn]*scState),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
state: connectivity.Idle,
|
||||
}
|
||||
cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
|
||||
go bw.lbWatcher()
|
||||
return bw
|
||||
}
|
||||
|
||||
func (bwb *balancerWrapperBuilder) Name() string {
|
||||
return "wrapper"
|
||||
}
|
||||
|
||||
type scState struct {
|
||||
addr Address // The v1 address type.
|
||||
s connectivity.State
|
||||
down func(error)
|
||||
}
|
||||
|
||||
type balancerWrapper struct {
|
||||
balancer Balancer // The v1 balancer.
|
||||
pickfirst bool
|
||||
|
||||
cc balancer.ClientConn
|
||||
targetAddr string // Target without the scheme.
|
||||
|
||||
mu sync.Mutex
|
||||
conns map[resolver.Address]balancer.SubConn
|
||||
connSt map[balancer.SubConn]*scState
|
||||
// This channel is closed when handling the first resolver result.
|
||||
// lbWatcher blocks until this is closed, to avoid race between
|
||||
// - NewSubConn is created, cc wants to notify balancer of state changes;
|
||||
// - Build hasn't return, cc doesn't have access to balancer.
|
||||
startCh chan struct{}
|
||||
|
||||
// To aggregate the connectivity state.
|
||||
csEvltr *balancer.ConnectivityStateEvaluator
|
||||
state connectivity.State
|
||||
}
|
||||
|
||||
// lbWatcher watches the Notify channel of the balancer and manages
|
||||
// connections accordingly.
|
||||
func (bw *balancerWrapper) lbWatcher() {
|
||||
<-bw.startCh
|
||||
notifyCh := bw.balancer.Notify()
|
||||
if notifyCh == nil {
|
||||
// There's no resolver in the balancer. Connect directly.
|
||||
a := resolver.Address{
|
||||
Addr: bw.targetAddr,
|
||||
Type: resolver.Backend,
|
||||
}
|
||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
bw.conns[a] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: Address{Addr: bw.targetAddr},
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
sc.Connect()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for addrs := range notifyCh {
|
||||
grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
|
||||
if bw.pickfirst {
|
||||
var (
|
||||
oldA resolver.Address
|
||||
oldSC balancer.SubConn
|
||||
)
|
||||
bw.mu.Lock()
|
||||
for oldA, oldSC = range bw.conns {
|
||||
break
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
if oldSC != nil {
|
||||
// Teardown old sc.
|
||||
bw.mu.Lock()
|
||||
delete(bw.conns, oldA)
|
||||
delete(bw.connSt, oldSC)
|
||||
bw.mu.Unlock()
|
||||
bw.cc.RemoveSubConn(oldSC)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var newAddrs []resolver.Address
|
||||
for _, a := range addrs {
|
||||
newAddr := resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}
|
||||
newAddrs = append(newAddrs, newAddr)
|
||||
}
|
||||
if oldSC == nil {
|
||||
// Create new sc.
|
||||
sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
// For pickfirst, there should be only one SubConn, so the
|
||||
// address doesn't matter. All states updating (up and down)
|
||||
// and picking should all happen on that only SubConn.
|
||||
bw.conns[resolver.Address{}] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: addrs[0], // Use the first address.
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
sc.Connect()
|
||||
}
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
bw.connSt[oldSC].addr = addrs[0]
|
||||
bw.mu.Unlock()
|
||||
oldSC.UpdateAddresses(newAddrs)
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
add []resolver.Address // Addresses need to setup connections.
|
||||
del []balancer.SubConn // Connections need to tear down.
|
||||
)
|
||||
resAddrs := make(map[resolver.Address]Address)
|
||||
for _, a := range addrs {
|
||||
resAddrs[resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}] = a
|
||||
}
|
||||
bw.mu.Lock()
|
||||
for a := range resAddrs {
|
||||
if _, ok := bw.conns[a]; !ok {
|
||||
add = append(add, a)
|
||||
}
|
||||
}
|
||||
for a, c := range bw.conns {
|
||||
if _, ok := resAddrs[a]; !ok {
|
||||
del = append(del, c)
|
||||
delete(bw.conns, a)
|
||||
// Keep the state of this sc in bw.connSt until its state becomes Shutdown.
|
||||
}
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
for _, a := range add {
|
||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
bw.conns[a] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: resAddrs[a],
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
for _, c := range del {
|
||||
bw.cc.RemoveSubConn(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
scSt, ok := bw.connSt[sc]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if s == connectivity.Idle {
|
||||
sc.Connect()
|
||||
}
|
||||
oldS := scSt.s
|
||||
scSt.s = s
|
||||
if oldS != connectivity.Ready && s == connectivity.Ready {
|
||||
scSt.down = bw.balancer.Up(scSt.addr)
|
||||
} else if oldS == connectivity.Ready && s != connectivity.Ready {
|
||||
if scSt.down != nil {
|
||||
scSt.down(errConnClosing)
|
||||
}
|
||||
}
|
||||
sa := bw.csEvltr.RecordTransition(oldS, s)
|
||||
if bw.state != sa {
|
||||
bw.state = sa
|
||||
}
|
||||
bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
|
||||
if s == connectivity.Shutdown {
|
||||
// Remove state for this sc.
|
||||
delete(bw.connSt, sc)
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
select {
|
||||
case <-bw.startCh:
|
||||
default:
|
||||
close(bw.startCh)
|
||||
}
|
||||
// There should be a resolver inside the balancer.
|
||||
// All updates here, if any, are ignored.
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) Close() {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
select {
|
||||
case <-bw.startCh:
|
||||
default:
|
||||
close(bw.startCh)
|
||||
}
|
||||
bw.balancer.Close()
|
||||
}
|
||||
|
||||
// The picker is the balancerWrapper itself.
|
||||
// It either blocks or returns error, consistent with v1 balancer Get().
|
||||
func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
|
||||
failfast := true // Default failfast is true.
|
||||
if ss, ok := rpcInfoFromContext(info.Ctx); ok {
|
||||
failfast = ss.failfast
|
||||
}
|
||||
a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
|
||||
if err != nil {
|
||||
return balancer.PickResult{}, toRPCErr(err)
|
||||
}
|
||||
if p != nil {
|
||||
result.Done = func(balancer.DoneInfo) { p() }
|
||||
defer func() {
|
||||
if err != nil {
|
||||
p()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
if bw.pickfirst {
|
||||
// Get the first sc in conns.
|
||||
for _, result.SubConn = range bw.conns {
|
||||
return result, nil
|
||||
}
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
var ok1 bool
|
||||
result.SubConn, ok1 = bw.conns[resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend,
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}]
|
||||
s, ok2 := bw.connSt[result.SubConn]
|
||||
if !ok1 || !ok2 {
|
||||
// This can only happen due to a race where Get() returned an address
|
||||
// that was subsequently removed by Notify. In this case we should
|
||||
// retry always.
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
switch s.s {
|
||||
case connectivity.Ready, connectivity.Idle:
|
||||
return result, nil
|
||||
case connectivity.Shutdown, connectivity.TransientFailure:
|
||||
// If the returned sc has been shut down or is in transient failure,
|
||||
// return error, and this RPC will fail or wait for another picker (if
|
||||
// non-failfast).
|
||||
return balancer.PickResult{}, balancer.ErrTransientFailure
|
||||
default:
|
||||
// For other states (connecting or unknown), the v1 balancer would
|
||||
// traditionally wait until ready and then issue the RPC. Returning
|
||||
// ErrNoSubConnAvailable will be a slight improvement in that it will
|
||||
// allow the balancer to choose another address in case others are
|
||||
// connected.
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -23,7 +23,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -35,10 +34,11 @@ import (
|
|||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
iresolver "google.golang.org/grpc/internal/resolver"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/resolver"
|
||||
|
@ -48,6 +48,7 @@ import (
|
|||
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
|
||||
_ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
|
||||
_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
|
||||
_ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver.
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -68,8 +69,6 @@ var (
|
|||
errConnDrain = errors.New("grpc: the connection is drained")
|
||||
// errConnClosing indicates that the connection is closing.
|
||||
errConnClosing = errors.New("grpc: the connection is closing")
|
||||
// errBalancerClosed indicates that the balancer is closed.
|
||||
errBalancerClosed = errors.New("grpc: balancer is closed")
|
||||
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
|
||||
// service config.
|
||||
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
|
||||
|
@ -106,6 +105,17 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
|||
return DialContext(context.Background(), target, opts...)
|
||||
}
|
||||
|
||||
type defaultConfigSelector struct {
|
||||
sc *ServiceConfig
|
||||
}
|
||||
|
||||
func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
|
||||
return &iresolver.RPCConfig{
|
||||
Context: rpcInfo.Context,
|
||||
MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DialContext creates a client connection to the given target. By default, it's
|
||||
// a non-blocking dial (the function won't wait for connections to be
|
||||
// established, and connecting happens in the background). To make it a blocking
|
||||
|
@ -133,6 +143,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
firstResolveEvent: grpcsync.NewEvent(),
|
||||
}
|
||||
cc.retryThrottler.Store((*retryThrottler)(nil))
|
||||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
|
||||
cc.ctx, cc.cancel = context.WithCancel(context.Background())
|
||||
|
||||
for _, opt := range opts {
|
||||
|
@ -151,20 +162,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
if channelz.IsOn() {
|
||||
if cc.dopts.channelzParentID != 0 {
|
||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
|
||||
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
|
||||
channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{
|
||||
Desc: "Channel Created",
|
||||
Severity: channelz.CtINFO,
|
||||
Severity: channelz.CtInfo,
|
||||
Parent: &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
|
||||
Severity: channelz.CtINFO,
|
||||
Severity: channelz.CtInfo,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
|
||||
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: "Channel Created",
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
channelz.Info(logger, cc.channelzID, "Channel Created")
|
||||
}
|
||||
cc.csMgr.channelzID = cc.channelzID
|
||||
}
|
||||
|
@ -196,15 +204,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
}
|
||||
cc.mkp = cc.dopts.copts.KeepaliveParams
|
||||
|
||||
if cc.dopts.copts.Dialer == nil {
|
||||
cc.dopts.copts.Dialer = newProxyDialer(
|
||||
func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
network, addr := parseDialTarget(addr)
|
||||
return (&net.Dialer{}).DialContext(ctx, network, addr)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if cc.dopts.copts.UserAgent != "" {
|
||||
cc.dopts.copts.UserAgent += " " + grpcUA
|
||||
} else {
|
||||
|
@ -219,7 +218,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
defer func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
switch {
|
||||
case ctx.Err() == err:
|
||||
conn = nil
|
||||
case err == nil || !cc.dopts.returnLastError:
|
||||
conn, err = nil, ctx.Err()
|
||||
default:
|
||||
conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
|
||||
}
|
||||
default:
|
||||
}
|
||||
}()
|
||||
|
@ -231,6 +237,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
case sc, ok := <-cc.dopts.scChan:
|
||||
if ok {
|
||||
cc.sc = &sc
|
||||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
|
||||
scSet = true
|
||||
}
|
||||
default:
|
||||
|
@ -241,14 +248,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
}
|
||||
|
||||
// Determine the resolver to use.
|
||||
cc.parsedTarget = parseTarget(cc.target)
|
||||
grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
|
||||
cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil)
|
||||
channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme)
|
||||
resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
|
||||
if resolverBuilder == nil {
|
||||
// If resolver builder is still nil, the parsed target's scheme is
|
||||
// not registered. Fallback to default resolver and set Endpoint to
|
||||
// the original target.
|
||||
grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
|
||||
channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
|
||||
cc.parsedTarget = resolver.Target{
|
||||
Scheme: resolver.GetDefaultScheme(),
|
||||
Endpoint: target,
|
||||
|
@ -264,6 +271,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
cc.authority = creds.Info().ServerName
|
||||
} else if cc.dopts.insecure && cc.dopts.authority != "" {
|
||||
cc.authority = cc.dopts.authority
|
||||
} else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") {
|
||||
cc.authority = "localhost"
|
||||
} else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") {
|
||||
cc.authority = "localhost" + cc.parsedTarget.Endpoint
|
||||
} else {
|
||||
// Use endpoint from "scheme://authority/endpoint" as the default
|
||||
// authority for ClientConn.
|
||||
|
@ -276,6 +287,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
case sc, ok := <-cc.dopts.scChan:
|
||||
if ok {
|
||||
cc.sc = &sc
|
||||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
|
@ -293,6 +305,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
DialCreds: credsClone,
|
||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||
Dialer: cc.dopts.copts.Dialer,
|
||||
CustomUserAgent: cc.dopts.copts.UserAgent,
|
||||
ChannelzParentID: cc.channelzID,
|
||||
Target: cc.parsedTarget,
|
||||
}
|
||||
|
@ -313,7 +326,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
if s == connectivity.Ready {
|
||||
break
|
||||
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
|
||||
if err = cc.blockingpicker.connectionError(); err != nil {
|
||||
if err = cc.connectionError(); err != nil {
|
||||
terr, ok := err.(interface {
|
||||
Temporary() bool
|
||||
})
|
||||
|
@ -324,6 +337,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
}
|
||||
if !cc.WaitForStateChange(ctx, s) {
|
||||
// ctx got timeout or canceled.
|
||||
if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
|
||||
return nil, err
|
||||
}
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
@ -416,12 +432,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
|
|||
return
|
||||
}
|
||||
csm.state = state
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Channel Connectivity change to %v", state),
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
}
|
||||
channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
|
||||
if csm.notifyChan != nil {
|
||||
// There are other goroutines waiting on this channel.
|
||||
close(csm.notifyChan)
|
||||
|
@ -483,6 +494,8 @@ type ClientConn struct {
|
|||
balancerBuildOpts balancer.BuildOptions
|
||||
blockingpicker *pickerWrapper
|
||||
|
||||
safeConfigSelector iresolver.SafeConfigSelector
|
||||
|
||||
mu sync.RWMutex
|
||||
resolverWrapper *ccResolverWrapper
|
||||
sc *ServiceConfig
|
||||
|
@ -497,11 +510,18 @@ type ClientConn struct {
|
|||
|
||||
channelzID int64 // channelz unique identification number
|
||||
czData *channelzData
|
||||
|
||||
lceMu sync.Mutex // protects lastConnectionError
|
||||
lastConnectionError error
|
||||
}
|
||||
|
||||
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
||||
// ctx expires. A true value is returned in former case and false in latter.
|
||||
// This is an EXPERIMENTAL API.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
|
||||
ch := cc.csMgr.getNotifyChan()
|
||||
if cc.csMgr.getState() != sourceState {
|
||||
|
@ -516,7 +536,11 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
|
|||
}
|
||||
|
||||
// GetState returns the connectivity.State of ClientConn.
|
||||
// This is an EXPERIMENTAL API.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func (cc *ClientConn) GetState() connectivity.State {
|
||||
return cc.csMgr.getState()
|
||||
}
|
||||
|
@ -532,6 +556,7 @@ func (cc *ClientConn) scWatcher() {
|
|||
// TODO: load balance policy runtime change is ignored.
|
||||
// We may revisit this decision in the future.
|
||||
cc.sc = &sc
|
||||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
|
||||
cc.mu.Unlock()
|
||||
case <-cc.ctx.Done():
|
||||
return
|
||||
|
@ -570,13 +595,13 @@ func init() {
|
|||
|
||||
func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
|
||||
if cc.sc != nil {
|
||||
cc.applyServiceConfigAndBalancer(cc.sc, addrs)
|
||||
cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs)
|
||||
return
|
||||
}
|
||||
if cc.dopts.defaultServiceConfig != nil {
|
||||
cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
|
||||
cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs)
|
||||
} else {
|
||||
cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
|
||||
cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -613,7 +638,15 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
|||
// default, per the error handling design?
|
||||
} else {
|
||||
if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
|
||||
cc.applyServiceConfigAndBalancer(sc, s.Addresses)
|
||||
configSelector := iresolver.GetConfigSelector(s)
|
||||
if configSelector != nil {
|
||||
if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
|
||||
channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector")
|
||||
}
|
||||
} else {
|
||||
configSelector = &defaultConfigSelector{sc}
|
||||
}
|
||||
cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
|
||||
} else {
|
||||
ret = balancer.ErrBadResolverState
|
||||
if cc.balancerWrapper == nil {
|
||||
|
@ -623,6 +656,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
|||
} else {
|
||||
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
|
||||
}
|
||||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc})
|
||||
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
|
||||
cc.csMgr.updateState(connectivity.TransientFailure)
|
||||
cc.mu.Unlock()
|
||||
|
@ -671,9 +705,9 @@ func (cc *ClientConn) switchBalancer(name string) {
|
|||
return
|
||||
}
|
||||
|
||||
grpclog.Infof("ClientConn switching balancer to %q", name)
|
||||
channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name)
|
||||
if cc.dopts.balancerBuilder != nil {
|
||||
grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
|
||||
channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead")
|
||||
return
|
||||
}
|
||||
if cc.balancerWrapper != nil {
|
||||
|
@ -681,22 +715,12 @@ func (cc *ClientConn) switchBalancer(name string) {
|
|||
}
|
||||
|
||||
builder := balancer.Get(name)
|
||||
if channelz.IsOn() {
|
||||
if builder == nil {
|
||||
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName),
|
||||
Severity: channelz.CtWarning,
|
||||
})
|
||||
} else {
|
||||
channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Channel switches to new LB policy %q", name),
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
}
|
||||
}
|
||||
if builder == nil {
|
||||
grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
|
||||
channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName)
|
||||
channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name)
|
||||
builder = newPickfirstBuilder()
|
||||
} else {
|
||||
channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name)
|
||||
}
|
||||
|
||||
cc.curBalancerName = builder.Name()
|
||||
|
@ -720,6 +744,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
|
|||
// Caller needs to make sure len(addrs) > 0.
|
||||
func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
|
||||
ac := &addrConn{
|
||||
state: connectivity.Idle,
|
||||
cc: cc,
|
||||
addrs: addrs,
|
||||
scopts: opts,
|
||||
|
@ -736,12 +761,12 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
|
|||
}
|
||||
if channelz.IsOn() {
|
||||
ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
||||
Desc: "Subchannel Created",
|
||||
Severity: channelz.CtINFO,
|
||||
Severity: channelz.CtInfo,
|
||||
Parent: &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
|
||||
Severity: channelz.CtINFO,
|
||||
Severity: channelz.CtInfo,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -775,7 +800,11 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
|
|||
}
|
||||
|
||||
// Target returns the target string of the ClientConn.
|
||||
// This is an EXPERIMENTAL API.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func (cc *ClientConn) Target() string {
|
||||
return cc.target
|
||||
}
|
||||
|
@ -834,7 +863,7 @@ func (ac *addrConn) connect() error {
|
|||
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
||||
channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
||||
if ac.state == connectivity.Shutdown ||
|
||||
ac.state == connectivity.TransientFailure ||
|
||||
ac.state == connectivity.Idle {
|
||||
|
@ -854,7 +883,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
|||
break
|
||||
}
|
||||
}
|
||||
grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
|
||||
channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
|
||||
if curAddrFound {
|
||||
ac.addrs = addrs
|
||||
}
|
||||
|
@ -862,26 +891,33 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
|||
return curAddrFound
|
||||
}
|
||||
|
||||
func getMethodConfig(sc *ServiceConfig, method string) MethodConfig {
|
||||
if sc == nil {
|
||||
return MethodConfig{}
|
||||
}
|
||||
if m, ok := sc.Methods[method]; ok {
|
||||
return m
|
||||
}
|
||||
i := strings.LastIndex(method, "/")
|
||||
if m, ok := sc.Methods[method[:i+1]]; ok {
|
||||
return m
|
||||
}
|
||||
return sc.Methods[""]
|
||||
}
|
||||
|
||||
// GetMethodConfig gets the method config of the input method.
|
||||
// If there's an exact match for input method (i.e. /service/method), we return
|
||||
// the corresponding MethodConfig.
|
||||
// If there isn't an exact match for the input method, we look for the default config
|
||||
// under the service (i.e /service/). If there is a default MethodConfig for
|
||||
// the service, we return it.
|
||||
// If there isn't an exact match for the input method, we look for the service's default
|
||||
// config under the service (i.e /service/) and then for the default for all services (empty string).
|
||||
//
|
||||
// If there is a default MethodConfig for the service, we return it.
|
||||
// Otherwise, we return an empty MethodConfig.
|
||||
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
|
||||
// TODO: Avoid the locking here.
|
||||
cc.mu.RLock()
|
||||
defer cc.mu.RUnlock()
|
||||
if cc.sc == nil {
|
||||
return MethodConfig{}
|
||||
}
|
||||
m, ok := cc.sc.Methods[method]
|
||||
if !ok {
|
||||
i := strings.LastIndex(method, "/")
|
||||
m = cc.sc.Methods[method[:i+1]]
|
||||
}
|
||||
return m
|
||||
return getMethodConfig(cc.sc, method)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
|
||||
|
@ -904,12 +940,15 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st
|
|||
return t, done, nil
|
||||
}
|
||||
|
||||
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
|
||||
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
|
||||
if sc == nil {
|
||||
// should never reach here.
|
||||
return
|
||||
}
|
||||
cc.sc = sc
|
||||
if configSelector != nil {
|
||||
cc.safeConfigSelector.UpdateConfigSelector(configSelector)
|
||||
}
|
||||
|
||||
if cc.sc.retryThrottling != nil {
|
||||
newThrottler := &retryThrottler{
|
||||
|
@ -973,7 +1012,10 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
|||
// However, if a previously unavailable network becomes available, this may be
|
||||
// used to trigger an immediate reconnect.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func (cc *ClientConn) ResetConnectBackoff() {
|
||||
cc.mu.Lock()
|
||||
conns := cc.conns
|
||||
|
@ -1017,15 +1059,15 @@ func (cc *ClientConn) Close() error {
|
|||
if channelz.IsOn() {
|
||||
ted := &channelz.TraceEventDesc{
|
||||
Desc: "Channel Deleted",
|
||||
Severity: channelz.CtINFO,
|
||||
Severity: channelz.CtInfo,
|
||||
}
|
||||
if cc.dopts.channelzParentID != 0 {
|
||||
ted.Parent = &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
|
||||
Severity: channelz.CtINFO,
|
||||
Severity: channelz.CtInfo,
|
||||
}
|
||||
}
|
||||
channelz.AddTraceEvent(cc.channelzID, ted)
|
||||
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
|
||||
// the entity being deleted, and thus prevent it from being deleted right away.
|
||||
channelz.RemoveEntry(cc.channelzID)
|
||||
|
@ -1068,15 +1110,8 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
|
|||
if ac.state == s {
|
||||
return
|
||||
}
|
||||
|
||||
updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s)
|
||||
ac.state = s
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: updateMsg,
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
}
|
||||
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
|
||||
ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
|
||||
}
|
||||
|
||||
|
@ -1163,7 +1198,7 @@ func (ac *addrConn) resetTransport() {
|
|||
ac.mu.Lock()
|
||||
if ac.state == connectivity.Shutdown {
|
||||
ac.mu.Unlock()
|
||||
newTr.Close()
|
||||
newTr.Close(fmt.Errorf("reached connectivity state: SHUTDOWN"))
|
||||
return
|
||||
}
|
||||
ac.curAddr = addr
|
||||
|
@ -1213,12 +1248,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
|||
}
|
||||
ac.mu.Unlock()
|
||||
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
}
|
||||
channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
|
||||
|
||||
newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
|
||||
if err == nil {
|
||||
|
@ -1227,7 +1257,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
|||
if firstConnErr == nil {
|
||||
firstConnErr = err
|
||||
}
|
||||
ac.cc.blockingpicker.updateConnectionError(err)
|
||||
ac.cc.updateConnectionError(err)
|
||||
}
|
||||
|
||||
// Couldn't connect to any address.
|
||||
|
@ -1242,16 +1272,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
onCloseCalled := make(chan struct{})
|
||||
reconnect := grpcsync.NewEvent()
|
||||
|
||||
authority := ac.cc.authority
|
||||
// addr.ServerName takes precedent over ClientConn authority, if present.
|
||||
if addr.ServerName != "" {
|
||||
authority = addr.ServerName
|
||||
}
|
||||
|
||||
target := transport.TargetInfo{
|
||||
Addr: addr.Addr,
|
||||
Metadata: addr.Metadata,
|
||||
Authority: authority,
|
||||
if addr.ServerName == "" {
|
||||
addr.ServerName = ac.cc.authority
|
||||
}
|
||||
|
||||
once := sync.Once{}
|
||||
|
@ -1297,18 +1320,18 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
copts.ChannelzParentID = ac.channelzID
|
||||
}
|
||||
|
||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
|
||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose)
|
||||
if err != nil {
|
||||
// newTr is either nil, or closed.
|
||||
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
|
||||
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(time.Until(connectDeadline)):
|
||||
// We didn't get the preface in time.
|
||||
newTr.Close()
|
||||
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
|
||||
newTr.Close(fmt.Errorf("failed to receive server preface within timeout"))
|
||||
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
|
||||
return nil, nil, errors.New("timed out waiting for server handshake")
|
||||
case <-prefaceReceived:
|
||||
// We got the preface - huzzah! things are good.
|
||||
|
@ -1325,7 +1348,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
//
|
||||
// LB channel health checking is enabled when all requirements below are met:
|
||||
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
|
||||
// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package
|
||||
// 2. internal.HealthCheckFunc is set by importing the grpc/health package
|
||||
// 3. a service config with non-empty healthCheckConfig field is provided
|
||||
// 4. the load balancer requests it
|
||||
//
|
||||
|
@ -1355,7 +1378,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
|||
// The health package is not imported to set health check function.
|
||||
//
|
||||
// TODO: add a link to the health check doc in the error message.
|
||||
grpclog.Error("Health check is requested but health check function is not set.")
|
||||
channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1385,15 +1408,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
|||
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Unimplemented {
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled",
|
||||
Severity: channelz.CtError,
|
||||
})
|
||||
}
|
||||
grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
|
||||
channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
|
||||
} else {
|
||||
grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
|
||||
channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -1430,10 +1447,9 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
|
|||
}
|
||||
|
||||
// tearDown starts to tear down the addrConn.
|
||||
// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
|
||||
// some edge cases (e.g., the caller opens and closes many addrConn's in a
|
||||
// tight loop.
|
||||
// tearDown doesn't remove ac from ac.cc.conns.
|
||||
//
|
||||
// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
|
||||
// will leak. In most cases, call cc.removeAddrConn() instead.
|
||||
func (ac *addrConn) tearDown(err error) {
|
||||
ac.mu.Lock()
|
||||
if ac.state == connectivity.Shutdown {
|
||||
|
@ -1458,12 +1474,12 @@ func (ac *addrConn) tearDown(err error) {
|
|||
ac.mu.Lock()
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
|
||||
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
|
||||
Desc: "Subchannel Deleted",
|
||||
Severity: channelz.CtINFO,
|
||||
Severity: channelz.CtInfo,
|
||||
Parent: &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
|
||||
Severity: channelz.CtINFO,
|
||||
Severity: channelz.CtInfo,
|
||||
},
|
||||
})
|
||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
|
||||
|
@ -1560,9 +1576,21 @@ var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
|
|||
|
||||
func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
|
||||
for _, rb := range cc.dopts.resolvers {
|
||||
if cc.parsedTarget.Scheme == rb.Scheme() {
|
||||
if scheme == rb.Scheme() {
|
||||
return rb
|
||||
}
|
||||
}
|
||||
return resolver.Get(cc.parsedTarget.Scheme)
|
||||
return resolver.Get(scheme)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) updateConnectionError(err error) {
|
||||
cc.lceMu.Lock()
|
||||
cc.lastConnectionError = err
|
||||
cc.lceMu.Unlock()
|
||||
}
|
||||
|
||||
func (cc *ClientConn) connectionError() error {
|
||||
cc.lceMu.Lock()
|
||||
defer cc.lceMu.Unlock()
|
||||
return cc.lastConnectionError
|
||||
}
|
||||
|
|
|
@ -33,6 +33,9 @@ const (
|
|||
OK Code = 0
|
||||
|
||||
// Canceled indicates the operation was canceled (typically by the caller).
|
||||
//
|
||||
// The gRPC framework will generate this error code when cancellation
|
||||
// is requested.
|
||||
Canceled Code = 1
|
||||
|
||||
// Unknown error. An example of where this error may be returned is
|
||||
|
@ -40,12 +43,17 @@ const (
|
|||
// an error-space that is not known in this address space. Also
|
||||
// errors raised by APIs that do not return enough error information
|
||||
// may be converted to this error.
|
||||
//
|
||||
// The gRPC framework will generate this error code in the above two
|
||||
// mentioned cases.
|
||||
Unknown Code = 2
|
||||
|
||||
// InvalidArgument indicates client specified an invalid argument.
|
||||
// Note that this differs from FailedPrecondition. It indicates arguments
|
||||
// that are problematic regardless of the state of the system
|
||||
// (e.g., a malformed file name).
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
InvalidArgument Code = 3
|
||||
|
||||
// DeadlineExceeded means operation expired before completion.
|
||||
|
@ -53,14 +61,21 @@ const (
|
|||
// returned even if the operation has completed successfully. For
|
||||
// example, a successful response from a server could have been delayed
|
||||
// long enough for the deadline to expire.
|
||||
//
|
||||
// The gRPC framework will generate this error code when the deadline is
|
||||
// exceeded.
|
||||
DeadlineExceeded Code = 4
|
||||
|
||||
// NotFound means some requested entity (e.g., file or directory) was
|
||||
// not found.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
NotFound Code = 5
|
||||
|
||||
// AlreadyExists means an attempt to create an entity failed because one
|
||||
// already exists.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
AlreadyExists Code = 6
|
||||
|
||||
// PermissionDenied indicates the caller does not have permission to
|
||||
|
@ -69,10 +84,17 @@ const (
|
|||
// instead for those errors). It must not be
|
||||
// used if the caller cannot be identified (use Unauthenticated
|
||||
// instead for those errors).
|
||||
//
|
||||
// This error code will not be generated by the gRPC core framework,
|
||||
// but expect authentication middleware to use it.
|
||||
PermissionDenied Code = 7
|
||||
|
||||
// ResourceExhausted indicates some resource has been exhausted, perhaps
|
||||
// a per-user quota, or perhaps the entire file system is out of space.
|
||||
//
|
||||
// This error code will be generated by the gRPC framework in
|
||||
// out-of-memory and server overload situations, or when a message is
|
||||
// larger than the configured maximum size.
|
||||
ResourceExhausted Code = 8
|
||||
|
||||
// FailedPrecondition indicates operation was rejected because the
|
||||
|
@ -94,6 +116,8 @@ const (
|
|||
// REST Get/Update/Delete on a resource and the resource on the
|
||||
// server does not match the condition. E.g., conflicting
|
||||
// read-modify-write on the same resource.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
FailedPrecondition Code = 9
|
||||
|
||||
// Aborted indicates the operation was aborted, typically due to a
|
||||
|
@ -102,6 +126,8 @@ const (
|
|||
//
|
||||
// See litmus test above for deciding between FailedPrecondition,
|
||||
// Aborted, and Unavailable.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
Aborted Code = 10
|
||||
|
||||
// OutOfRange means operation was attempted past the valid range.
|
||||
|
@ -119,15 +145,26 @@ const (
|
|||
// error) when it applies so that callers who are iterating through
|
||||
// a space can easily look for an OutOfRange error to detect when
|
||||
// they are done.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
OutOfRange Code = 11
|
||||
|
||||
// Unimplemented indicates operation is not implemented or not
|
||||
// supported/enabled in this service.
|
||||
//
|
||||
// This error code will be generated by the gRPC framework. Most
|
||||
// commonly, you will see this error code when a method implementation
|
||||
// is missing on the server. It can also be generated for unknown
|
||||
// compression algorithms or a disagreement as to whether an RPC should
|
||||
// be streaming.
|
||||
Unimplemented Code = 12
|
||||
|
||||
// Internal errors. Means some invariants expected by underlying
|
||||
// system has been broken. If you see one of these errors,
|
||||
// something is very broken.
|
||||
//
|
||||
// This error code will be generated by the gRPC framework in several
|
||||
// internal error conditions.
|
||||
Internal Code = 13
|
||||
|
||||
// Unavailable indicates the service is currently unavailable.
|
||||
|
@ -137,13 +174,22 @@ const (
|
|||
//
|
||||
// See litmus test above for deciding between FailedPrecondition,
|
||||
// Aborted, and Unavailable.
|
||||
//
|
||||
// This error code will be generated by the gRPC framework during
|
||||
// abrupt shutdown of a server process or network connection.
|
||||
Unavailable Code = 14
|
||||
|
||||
// DataLoss indicates unrecoverable data loss or corruption.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
DataLoss Code = 15
|
||||
|
||||
// Unauthenticated indicates the request does not have valid
|
||||
// authentication credentials for the operation.
|
||||
//
|
||||
// The gRPC framework will generate this error code when the
|
||||
// authentication metadata is invalid or a Credentials callback fails,
|
||||
// but also expect authentication middleware to generate it.
|
||||
Unauthenticated Code = 16
|
||||
|
||||
_maxCode = 17
|
||||
|
|
|
@ -22,11 +22,11 @@
|
|||
package connectivity
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
// State indicates the state of connectivity.
|
||||
// It can be the state of a ClientConn or SubConn.
|
||||
type State int
|
||||
|
@ -44,7 +44,7 @@ func (s State) String() string {
|
|||
case Shutdown:
|
||||
return "SHUTDOWN"
|
||||
default:
|
||||
grpclog.Errorf("unknown connectivity state: %d", s)
|
||||
logger.Errorf("unknown connectivity state: %d", s)
|
||||
return "Invalid-State"
|
||||
}
|
||||
}
|
||||
|
@ -61,13 +61,3 @@ const (
|
|||
// Shutdown indicates the ClientConn has started shutting down.
|
||||
Shutdown
|
||||
)
|
||||
|
||||
// Reporter reports the connectivity states.
|
||||
type Reporter interface {
|
||||
// CurrentState returns the current state of the reporter.
|
||||
CurrentState() State
|
||||
// WaitForStateChange blocks until the reporter's state is different from the given state,
|
||||
// and returns true.
|
||||
// It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
|
||||
WaitForStateChange(context.Context, State) bool
|
||||
}
|
||||
|
|
|
@ -29,7 +29,8 @@ import (
|
|||
"net"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/attributes"
|
||||
icredentials "google.golang.org/grpc/internal/credentials"
|
||||
)
|
||||
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
|
@ -57,9 +58,11 @@ type PerRPCCredentials interface {
|
|||
type SecurityLevel int
|
||||
|
||||
const (
|
||||
// NoSecurity indicates a connection is insecure.
|
||||
// InvalidSecurityLevel indicates an invalid security level.
|
||||
// The zero SecurityLevel value is invalid for backward compatibility.
|
||||
NoSecurity SecurityLevel = iota + 1
|
||||
InvalidSecurityLevel SecurityLevel = iota
|
||||
// NoSecurity indicates a connection is insecure.
|
||||
NoSecurity
|
||||
// IntegrityOnly indicates a connection only provides integrity protection.
|
||||
IntegrityOnly
|
||||
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
|
||||
|
@ -89,7 +92,7 @@ type CommonAuthInfo struct {
|
|||
}
|
||||
|
||||
// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
|
||||
func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo {
|
||||
func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
|
||||
return c
|
||||
}
|
||||
|
||||
|
@ -100,7 +103,11 @@ type ProtocolInfo struct {
|
|||
ProtocolVersion string
|
||||
// SecurityProtocol is the security protocol in use.
|
||||
SecurityProtocol string
|
||||
// SecurityVersion is the security protocol version.
|
||||
// SecurityVersion is the security protocol version. It is a static version string from the
|
||||
// credentials, not a value that reflects per-connection protocol negotiation. To retrieve
|
||||
// details about the credentials used for a connection, use the Peer's AuthInfo field instead.
|
||||
//
|
||||
// Deprecated: please use Peer.AuthInfo.
|
||||
SecurityVersion string
|
||||
// ServerName is the user-configured server name.
|
||||
ServerName string
|
||||
|
@ -120,15 +127,18 @@ var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gR
|
|||
// TransportCredentials defines the common interface for all the live gRPC wire
|
||||
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||
type TransportCredentials interface {
|
||||
// ClientHandshake does the authentication handshake specified by the corresponding
|
||||
// authentication protocol on rawConn for clients. It returns the authenticated
|
||||
// connection and the corresponding auth information about the connection.
|
||||
// The auth information should embed CommonAuthInfo to return additional information about
|
||||
// the credentials. Implementations must use the provided context to implement timely cancellation.
|
||||
// gRPC will try to reconnect if the error returned is a temporary error
|
||||
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
|
||||
// If the returned error is a wrapper error, implementations should make sure that
|
||||
// ClientHandshake does the authentication handshake specified by the
|
||||
// corresponding authentication protocol on rawConn for clients. It returns
|
||||
// the authenticated connection and the corresponding auth information
|
||||
// about the connection. The auth information should embed CommonAuthInfo
|
||||
// to return additional information about the credentials. Implementations
|
||||
// must use the provided context to implement timely cancellation. gRPC
|
||||
// will try to reconnect if the error returned is a temporary error
|
||||
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
|
||||
// returned error is a wrapper error, implementations should make sure that
|
||||
// the error implements Temporary() to have the correct retry behaviors.
|
||||
// Additionally, ClientHandshakeInfo data will be available via the context
|
||||
// passed to this call.
|
||||
//
|
||||
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
||||
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
||||
|
@ -178,15 +188,33 @@ type RequestInfo struct {
|
|||
AuthInfo AuthInfo
|
||||
}
|
||||
|
||||
// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
|
||||
type requestInfoKey struct{}
|
||||
|
||||
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
|
||||
//
|
||||
// This API is experimental.
|
||||
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
|
||||
ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
|
||||
return
|
||||
ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
|
||||
return ri, ok
|
||||
}
|
||||
|
||||
// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
|
||||
// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
|
||||
// balancer etc. Individual credential implementations control the actual
|
||||
// format of the data that they are willing to receive.
|
||||
//
|
||||
// This API is experimental.
|
||||
type ClientHandshakeInfo struct {
|
||||
// Attributes contains the attributes for the address. It could be provided
|
||||
// by the gRPC, resolver, balancer etc.
|
||||
Attributes *attributes.Attributes
|
||||
}
|
||||
|
||||
// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
|
||||
// in ctx.
|
||||
//
|
||||
// This API is experimental.
|
||||
func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
|
||||
chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
|
||||
return chi
|
||||
}
|
||||
|
||||
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
|
||||
|
@ -194,17 +222,16 @@ func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
|
|||
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
|
||||
//
|
||||
// This API is experimental.
|
||||
func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
|
||||
func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
|
||||
type internalInfo interface {
|
||||
GetCommonAuthInfo() *CommonAuthInfo
|
||||
GetCommonAuthInfo() CommonAuthInfo
|
||||
}
|
||||
ri, _ := RequestInfoFromContext(ctx)
|
||||
if ri.AuthInfo == nil {
|
||||
return errors.New("unable to obtain SecurityLevel from context")
|
||||
if ai == nil {
|
||||
return errors.New("AuthInfo is nil")
|
||||
}
|
||||
if ci, ok := ri.AuthInfo.(internalInfo); ok {
|
||||
if ci, ok := ai.(internalInfo); ok {
|
||||
// CommonAuthInfo.SecurityLevel has an invalid value.
|
||||
if ci.GetCommonAuthInfo().SecurityLevel == 0 {
|
||||
if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
|
||||
return nil
|
||||
}
|
||||
if ci.GetCommonAuthInfo().SecurityLevel < level {
|
||||
|
@ -215,12 +242,6 @@ func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
|
||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelzSecurityInfo defines the interface that security protocols should implement
|
||||
// in order to provide security info to channelz.
|
||||
//
|
||||
|
|
|
@ -25,8 +25,9 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
|
||||
"google.golang.org/grpc/credentials/internal"
|
||||
credinternal "google.golang.org/grpc/internal/credentials"
|
||||
)
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
|
@ -34,6 +35,8 @@ import (
|
|||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
CommonAuthInfo
|
||||
// This API is experimental.
|
||||
SPIFFEID *url.URL
|
||||
}
|
||||
|
||||
// AuthType returns the type of TLSInfo as a string.
|
||||
|
@ -69,7 +72,7 @@ func (c tlsCreds) Info() ProtocolInfo {
|
|||
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||
cfg := cloneTLSConfig(c.config)
|
||||
cfg := credinternal.CloneTLSConfig(c.config)
|
||||
if cfg.ServerName == "" {
|
||||
serverName, _, err := net.SplitHostPort(authority)
|
||||
if err != nil {
|
||||
|
@ -94,7 +97,17 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
|
|||
conn.Close()
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
|
||||
tlsInfo := TLSInfo{
|
||||
State: conn.ConnectionState(),
|
||||
CommonAuthInfo: CommonAuthInfo{
|
||||
SecurityLevel: PrivacyAndIntegrity,
|
||||
},
|
||||
}
|
||||
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
|
||||
if id != nil {
|
||||
tlsInfo.SPIFFEID = id
|
||||
}
|
||||
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
|
@ -103,7 +116,17 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
|
|||
conn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
|
||||
tlsInfo := TLSInfo{
|
||||
State: conn.ConnectionState(),
|
||||
CommonAuthInfo: CommonAuthInfo{
|
||||
SecurityLevel: PrivacyAndIntegrity,
|
||||
},
|
||||
}
|
||||
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
|
||||
if id != nil {
|
||||
tlsInfo.SPIFFEID = id
|
||||
}
|
||||
return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) Clone() TransportCredentials {
|
||||
|
@ -115,36 +138,33 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
func appendH2ToNextProtos(ps []string) []string {
|
||||
for _, p := range ps {
|
||||
if p == alpnProtoStrH2 {
|
||||
return ps
|
||||
}
|
||||
}
|
||||
ret := make([]string, 0, len(ps)+1)
|
||||
ret = append(ret, ps...)
|
||||
return append(ret, alpnProtoStrH2)
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportCredentials {
|
||||
tc := &tlsCreds{cloneTLSConfig(c)}
|
||||
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
|
||||
tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
|
||||
tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
|
||||
return tc
|
||||
}
|
||||
|
||||
// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
|
||||
// NewClientTLSFromCert constructs TLS credentials from the provided root
|
||||
// certificate authority certificate(s) to validate server connections. If
|
||||
// certificates to establish the identity of the client need to be included in
|
||||
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
|
||||
// tls.Config can be specified.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
// it will override the virtual host name of authority (e.g. :authority header
|
||||
// field) in requests.
|
||||
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
|
||||
}
|
||||
|
||||
// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
|
||||
// NewClientTLSFromFile constructs TLS credentials from the provided root
|
||||
// certificate authority certificate file(s) to validate server connections. If
|
||||
// certificates to establish the identity of the client need to be included in
|
||||
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
|
||||
// tls.Config can be specified.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
// it will override the virtual host name of authority (e.g. :authority header
|
||||
// field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
|
@ -175,7 +195,10 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
|
|||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type TLSChannelzSecurityValue struct {
|
||||
ChannelzSecurityValue
|
||||
StandardName string
|
||||
|
@ -208,18 +231,3 @@ var cipherSuiteLookup = map[uint16]string{
|
|||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
}
|
||||
|
||||
// cloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO: inline this function if possible.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
|
||||
return cfg.Clone()
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
|
@ -50,14 +49,14 @@ type dialOptions struct {
|
|||
dc Decompressor
|
||||
bs internalbackoff.Strategy
|
||||
block bool
|
||||
returnLastError bool
|
||||
insecure bool
|
||||
timeout time.Duration
|
||||
scChan <-chan ServiceConfig
|
||||
authority string
|
||||
copts transport.ConnectOptions
|
||||
callOptions []CallOption
|
||||
// This is used by v1 balancer dial option WithBalancer to support v1
|
||||
// balancer, and also by WithBalancerName dial option.
|
||||
// This is used by WithBalancerName dial option.
|
||||
balancerBuilder balancer.Builder
|
||||
channelzParentID int64
|
||||
disableServiceConfig bool
|
||||
|
@ -67,10 +66,6 @@ type dialOptions struct {
|
|||
minConnectTimeout func() time.Duration
|
||||
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
||||
defaultServiceConfigRawJSON *string
|
||||
// This is used by ccResolverWrapper to backoff between successive calls to
|
||||
// resolver.ResolveNow(). The user will have no need to configure this, but
|
||||
// we need to be able to configure this in tests.
|
||||
resolveNowBackoff func(int) time.Duration
|
||||
resolvers []resolver.Builder
|
||||
}
|
||||
|
||||
|
@ -82,7 +77,10 @@ type DialOption interface {
|
|||
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
||||
// another structure to build custom dial options.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type EmptyDialOption struct{}
|
||||
|
||||
func (EmptyDialOption) apply(*dialOptions) {}
|
||||
|
@ -198,19 +196,6 @@ func WithDecompressor(dc Decompressor) DialOption {
|
|||
})
|
||||
}
|
||||
|
||||
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
|
||||
// Name resolver will be ignored if this DialOption is specified.
|
||||
//
|
||||
// Deprecated: use the new balancer APIs in balancer package and
|
||||
// WithBalancerName. Will be removed in a future 1.x release.
|
||||
func WithBalancer(b Balancer) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.balancerBuilder = &balancerWrapperBuilder{
|
||||
b: b,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
||||
// with. Balancer registered with balancerName will be used. This function
|
||||
// panics if no balancer was registered by balancerName.
|
||||
|
@ -251,7 +236,10 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
|||
// using the backoff.DefaultConfig as a base, in cases where you want to
|
||||
// override only a subset of the backoff configuration.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithConnectParams(p ConnectParams) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.bs = internalbackoff.Exponential{Config: p.Backoff}
|
||||
|
@ -298,6 +286,22 @@ func WithBlock() DialOption {
|
|||
})
|
||||
}
|
||||
|
||||
// WithReturnConnectionError returns a DialOption which makes the client connection
|
||||
// return a string containing both the last connection error that occurred and
|
||||
// the context.DeadlineExceeded error.
|
||||
// Implies WithBlock()
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithReturnConnectionError() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.block = true
|
||||
o.returnLastError = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithInsecure returns a DialOption which disables transport security for this
|
||||
// ClientConn. Note that transport security is required unless WithInsecure is
|
||||
// set.
|
||||
|
@ -307,6 +311,19 @@ func WithInsecure() DialOption {
|
|||
})
|
||||
}
|
||||
|
||||
// WithNoProxy returns a DialOption which disables the use of proxies for this
|
||||
// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithNoProxy() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.UseProxy = false
|
||||
})
|
||||
}
|
||||
|
||||
// WithTransportCredentials returns a DialOption which configures a connection
|
||||
// level security credentials (e.g., TLS/SSL). This should not be used together
|
||||
// with WithCredentialsBundle.
|
||||
|
@ -328,7 +345,10 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
|
|||
// the ClientConn.WithCreds. This should not be used together with
|
||||
// WithTransportCredentials.
|
||||
//
|
||||
// This API is experimental.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithCredentialsBundle(b credentials.Bundle) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.CredsBundle = b
|
||||
|
@ -393,7 +413,10 @@ func WithStatsHandler(h stats.Handler) DialOption {
|
|||
// FailOnNonTempDialError only affects the initial dial, and does not do
|
||||
// anything useful unless you are also using WithBlock().
|
||||
//
|
||||
// This is an EXPERIMENTAL API.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func FailOnNonTempDialError(f bool) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.FailOnNonTempDialError = f
|
||||
|
@ -412,7 +435,7 @@ func WithUserAgent(s string) DialOption {
|
|||
// for the client transport.
|
||||
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
|
||||
if kp.Time < internal.KeepaliveMinPingTime {
|
||||
grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
|
||||
logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
|
||||
kp.Time = internal.KeepaliveMinPingTime
|
||||
}
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
|
@ -448,7 +471,7 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
|
|||
}
|
||||
|
||||
// WithChainStreamInterceptor returns a DialOption that specifies the chained
|
||||
// interceptor for unary RPCs. The first interceptor will be the outer most,
|
||||
// interceptor for streaming RPCs. The first interceptor will be the outer most,
|
||||
// while the last interceptor will be the inner most wrapper around the real call.
|
||||
// All interceptors added by this method will be chained, and the interceptor
|
||||
// defined by WithStreamInterceptor will always be prepended to the chain.
|
||||
|
@ -471,7 +494,10 @@ func WithAuthority(a string) DialOption {
|
|||
// current ClientConn's parent. This function is used in nested channel creation
|
||||
// (e.g. grpclb dial).
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithChannelzParentID(id int64) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.channelzParentID = id
|
||||
|
@ -497,7 +523,10 @@ func WithDisableServiceConfig() DialOption {
|
|||
// 2. Resolver does not return a service config or if the resolver returns an
|
||||
// invalid service config.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithDefaultServiceConfig(s string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.defaultServiceConfigRawJSON = &s
|
||||
|
@ -513,7 +542,10 @@ func WithDefaultServiceConfig(s string) DialOption {
|
|||
// default in the future. Until then, it may be enabled by setting the
|
||||
// environment variable "GRPC_GO_RETRY" to "on".
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithDisableRetry() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.disableRetry = true
|
||||
|
@ -531,7 +563,10 @@ func WithMaxHeaderListSize(s uint32) DialOption {
|
|||
// WithDisableHealthCheck disables the LB channel health checking for all
|
||||
// SubConns of this ClientConn.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithDisableHealthCheck() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.disableHealthCheck = true
|
||||
|
@ -555,8 +590,8 @@ func defaultDialOptions() dialOptions {
|
|||
copts: transport.ConnectOptions{
|
||||
WriteBufferSize: defaultWriteBufSize,
|
||||
ReadBufferSize: defaultReadBufSize,
|
||||
UseProxy: true,
|
||||
},
|
||||
resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -571,22 +606,15 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
|
|||
})
|
||||
}
|
||||
|
||||
// withResolveNowBackoff specifies the function that clientconn uses to backoff
|
||||
// between successive calls to resolver.ResolveNow().
|
||||
//
|
||||
// For testing purpose only.
|
||||
func withResolveNowBackoff(f func(int) time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolveNowBackoff = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithResolvers allows a list of resolver implementations to be registered
|
||||
// locally with the ClientConn without needing to be globally registered via
|
||||
// resolver.Register. They will be matched against the scheme used for the
|
||||
// current Dial only, and will take precedence over the global registry.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithResolvers(rs ...resolver.Builder) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolvers = append(o.resolvers, rs...)
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
//go:generate ./regenerate.sh
|
||||
|
||||
/*
|
||||
Package grpc implements an RPC system called gRPC.
|
||||
|
||||
|
|
|
@ -19,7 +19,10 @@
|
|||
// Package encoding defines the interface for the compressor and codec, and
|
||||
// functions to register and retrieve compressors and codecs.
|
||||
//
|
||||
// This package is EXPERIMENTAL.
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
package encoding
|
||||
|
||||
import (
|
||||
|
@ -46,10 +49,15 @@ type Compressor interface {
|
|||
// coding header. The result must be static; the result cannot change
|
||||
// between calls.
|
||||
Name() string
|
||||
// EXPERIMENTAL: if a Compressor implements
|
||||
// If a Compressor implements
|
||||
// DecompressedSize(compressedBytes []byte) int, gRPC will call it
|
||||
// to determine the size of the buffer allocated for the result of decompression.
|
||||
// Return -1 to indicate unknown size.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
}
|
||||
|
||||
var registeredCompressor = make(map[string]Compressor)
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
package proto
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/encoding"
|
||||
|
@ -38,73 +37,22 @@ func init() {
|
|||
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||
type codec struct{}
|
||||
|
||||
type cachedProtoBuffer struct {
|
||||
lastMarshaledSize uint32
|
||||
proto.Buffer
|
||||
}
|
||||
|
||||
func capToMaxInt32(val int) uint32 {
|
||||
if val > math.MaxInt32 {
|
||||
return uint32(math.MaxInt32)
|
||||
}
|
||||
return uint32(val)
|
||||
}
|
||||
|
||||
func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
|
||||
protoMsg := v.(proto.Message)
|
||||
newSlice := make([]byte, 0, cb.lastMarshaledSize)
|
||||
|
||||
cb.SetBuf(newSlice)
|
||||
cb.Reset()
|
||||
if err := cb.Marshal(protoMsg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := cb.Bytes()
|
||||
cb.lastMarshaledSize = capToMaxInt32(len(out))
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (codec) Marshal(v interface{}) ([]byte, error) {
|
||||
if pm, ok := v.(proto.Marshaler); ok {
|
||||
// object can marshal itself, no need for buffer
|
||||
return pm.Marshal()
|
||||
vv, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
|
||||
}
|
||||
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
out, err := marshal(v, cb)
|
||||
|
||||
// put back buffer and lose the ref to the slice
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return out, err
|
||||
return proto.Marshal(vv)
|
||||
}
|
||||
|
||||
func (codec) Unmarshal(data []byte, v interface{}) error {
|
||||
protoMsg := v.(proto.Message)
|
||||
protoMsg.Reset()
|
||||
|
||||
if pu, ok := protoMsg.(proto.Unmarshaler); ok {
|
||||
// object can unmarshal itself, no need for buffer
|
||||
return pu.Unmarshal(data)
|
||||
vv, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
|
||||
}
|
||||
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
cb.SetBuf(data)
|
||||
err := cb.Unmarshal(protoMsg)
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return err
|
||||
return proto.Unmarshal(data, vv)
|
||||
}
|
||||
|
||||
func (codec) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
var protoBufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &cachedProtoBuffer{
|
||||
Buffer: proto.Buffer{},
|
||||
lastMarshaledSize: 16,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -3,14 +3,15 @@ module google.golang.org/grpc
|
|||
go 1.11
|
||||
|
||||
require (
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/golang/mock v1.1.1
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/google/go-cmp v0.2.0
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/go-cmp v0.5.0
|
||||
github.com/google/uuid v1.1.2
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
|
||||
google.golang.org/protobuf v1.25.0
|
||||
)
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
// componentData records the settings for a component.
|
||||
type componentData struct {
|
||||
name string
|
||||
}
|
||||
|
||||
var cache = map[string]*componentData{}
|
||||
|
||||
func (c *componentData) InfoDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.InfoDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) WarningDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.WarningDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.ErrorDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) FatalDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.FatalDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Info(args ...interface{}) {
|
||||
c.InfoDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Warning(args ...interface{}) {
|
||||
c.WarningDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Error(args ...interface{}) {
|
||||
c.ErrorDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Fatal(args ...interface{}) {
|
||||
c.FatalDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Infof(format string, args ...interface{}) {
|
||||
c.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Warningf(format string, args ...interface{}) {
|
||||
c.WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Errorf(format string, args ...interface{}) {
|
||||
c.ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Fatalf(format string, args ...interface{}) {
|
||||
c.FatalDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Infoln(args ...interface{}) {
|
||||
c.InfoDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Warningln(args ...interface{}) {
|
||||
c.WarningDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Errorln(args ...interface{}) {
|
||||
c.ErrorDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Fatalln(args ...interface{}) {
|
||||
c.FatalDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) V(l int) bool {
|
||||
return V(l)
|
||||
}
|
||||
|
||||
// Component creates a new component and returns it for logging. If a component
|
||||
// with the name already exists, nothing will be created and it will be
|
||||
// returned. SetLoggerV2 will panic if it is called with a logger created by
|
||||
// Component.
|
||||
func Component(componentName string) DepthLoggerV2 {
|
||||
if cData, ok := cache[componentName]; ok {
|
||||
return cData
|
||||
}
|
||||
c := &componentData{componentName}
|
||||
cache[componentName] = c
|
||||
return c
|
||||
}
|
|
@ -26,64 +26,70 @@
|
|||
// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
|
||||
package grpclog // import "google.golang.org/grpc/grpclog"
|
||||
|
||||
import "os"
|
||||
import (
|
||||
"os"
|
||||
|
||||
var logger = newLoggerV2()
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
SetLoggerV2(newLoggerV2())
|
||||
}
|
||||
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
func V(l int) bool {
|
||||
return logger.V(l)
|
||||
return grpclog.Logger.V(l)
|
||||
}
|
||||
|
||||
// Info logs to the INFO log.
|
||||
func Info(args ...interface{}) {
|
||||
logger.Info(args...)
|
||||
grpclog.Logger.Info(args...)
|
||||
}
|
||||
|
||||
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
logger.Infof(format, args...)
|
||||
grpclog.Logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
func Infoln(args ...interface{}) {
|
||||
logger.Infoln(args...)
|
||||
grpclog.Logger.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warning logs to the WARNING log.
|
||||
func Warning(args ...interface{}) {
|
||||
logger.Warning(args...)
|
||||
grpclog.Logger.Warning(args...)
|
||||
}
|
||||
|
||||
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
logger.Warningf(format, args...)
|
||||
grpclog.Logger.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
func Warningln(args ...interface{}) {
|
||||
logger.Warningln(args...)
|
||||
grpclog.Logger.Warningln(args...)
|
||||
}
|
||||
|
||||
// Error logs to the ERROR log.
|
||||
func Error(args ...interface{}) {
|
||||
logger.Error(args...)
|
||||
grpclog.Logger.Error(args...)
|
||||
}
|
||||
|
||||
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
logger.Errorf(format, args...)
|
||||
grpclog.Logger.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
func Errorln(args ...interface{}) {
|
||||
logger.Errorln(args...)
|
||||
grpclog.Logger.Errorln(args...)
|
||||
}
|
||||
|
||||
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatal(args ...interface{}) {
|
||||
logger.Fatal(args...)
|
||||
grpclog.Logger.Fatal(args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -91,7 +97,7 @@ func Fatal(args ...interface{}) {
|
|||
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
logger.Fatalf(format, args...)
|
||||
grpclog.Logger.Fatalf(format, args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -99,7 +105,7 @@ func Fatalf(format string, args ...interface{}) {
|
|||
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
|
||||
// It calle os.Exit()) with exit code 1.
|
||||
func Fatalln(args ...interface{}) {
|
||||
logger.Fatalln(args...)
|
||||
grpclog.Logger.Fatalln(args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -108,19 +114,19 @@ func Fatalln(args ...interface{}) {
|
|||
//
|
||||
// Deprecated: use Info.
|
||||
func Print(args ...interface{}) {
|
||||
logger.Info(args...)
|
||||
grpclog.Logger.Info(args...)
|
||||
}
|
||||
|
||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||
//
|
||||
// Deprecated: use Infof.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
logger.Infof(format, args...)
|
||||
grpclog.Logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||
//
|
||||
// Deprecated: use Infoln.
|
||||
func Println(args ...interface{}) {
|
||||
logger.Infoln(args...)
|
||||
grpclog.Logger.Infoln(args...)
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package grpclog
|
||||
|
||||
import "google.golang.org/grpc/internal/grpclog"
|
||||
|
||||
// Logger mimics golang's standard Logger as an interface.
|
||||
//
|
||||
// Deprecated: use LoggerV2.
|
||||
|
@ -35,7 +37,7 @@ type Logger interface {
|
|||
//
|
||||
// Deprecated: use SetLoggerV2.
|
||||
func SetLogger(l Logger) {
|
||||
logger = &loggerWrapper{Logger: l}
|
||||
grpclog.Logger = &loggerWrapper{Logger: l}
|
||||
}
|
||||
|
||||
// loggerWrapper wraps Logger into a LoggerV2.
|
||||
|
|
|
@ -24,6 +24,8 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
// LoggerV2 does underlying logging work for grpclog.
|
||||
|
@ -65,7 +67,11 @@ type LoggerV2 interface {
|
|||
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
|
||||
// Not mutex-protected, should be called before any gRPC functions.
|
||||
func SetLoggerV2(l LoggerV2) {
|
||||
logger = l
|
||||
if _, ok := l.(*componentData); ok {
|
||||
panic("cannot use component logger as grpclog logger")
|
||||
}
|
||||
grpclog.Logger = l
|
||||
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -193,3 +199,23 @@ func (g *loggerT) Fatalf(format string, args ...interface{}) {
|
|||
func (g *loggerT) V(l int) bool {
|
||||
return l <= g.v
|
||||
}
|
||||
|
||||
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
|
||||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
LoggerV2
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
||||
|
|
|
@ -1,28 +1,46 @@
|
|||
// Copyright 2015 The gRPC Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The canonical version of this proto can be found at
|
||||
// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.14.0
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type HealthCheckResponse_ServingStatus int32
|
||||
|
||||
|
@ -30,314 +48,266 @@ const (
|
|||
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
|
||||
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
|
||||
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
|
||||
HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3
|
||||
HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method.
|
||||
)
|
||||
|
||||
var HealthCheckResponse_ServingStatus_name = map[int32]string{
|
||||
// Enum value maps for HealthCheckResponse_ServingStatus.
|
||||
var (
|
||||
HealthCheckResponse_ServingStatus_name = map[int32]string{
|
||||
0: "UNKNOWN",
|
||||
1: "SERVING",
|
||||
2: "NOT_SERVING",
|
||||
3: "SERVICE_UNKNOWN",
|
||||
}
|
||||
|
||||
var HealthCheckResponse_ServingStatus_value = map[string]int32{
|
||||
}
|
||||
HealthCheckResponse_ServingStatus_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"SERVING": 1,
|
||||
"NOT_SERVING": 2,
|
||||
"SERVICE_UNKNOWN": 3,
|
||||
}
|
||||
)
|
||||
|
||||
func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus {
|
||||
p := new(HealthCheckResponse_ServingStatus)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x HealthCheckResponse_ServingStatus) String() string {
|
||||
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType {
|
||||
return &file_grpc_health_v1_health_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead.
|
||||
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e265fd9d4e077217, []int{1, 0}
|
||||
return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0}
|
||||
}
|
||||
|
||||
type HealthCheckRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
|
||||
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (x *HealthCheckRequest) Reset() {
|
||||
*x = HealthCheckRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HealthCheckRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthCheckRequest) ProtoMessage() {}
|
||||
|
||||
func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
|
||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e265fd9d4e077217, []int{0}
|
||||
return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckRequest.Merge(m, src)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckRequest.Size(m)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *HealthCheckRequest) GetService() string {
|
||||
if m != nil {
|
||||
return m.Service
|
||||
func (x *HealthCheckRequest) GetService() string {
|
||||
if x != nil {
|
||||
return x.Service
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type HealthCheckResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
|
||||
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (x *HealthCheckResponse) Reset() {
|
||||
*x = HealthCheckResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HealthCheckResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HealthCheckResponse) ProtoMessage() {}
|
||||
|
||||
func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_health_v1_health_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
|
||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e265fd9d4e077217, []int{1}
|
||||
return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckResponse.Merge(m, src)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckResponse.Size(m)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
||||
if m != nil {
|
||||
return m.Status
|
||||
func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
||||
if x != nil {
|
||||
return x.Status
|
||||
}
|
||||
return HealthCheckResponse_UNKNOWN
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
|
||||
proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest")
|
||||
proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse")
|
||||
var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_grpc_health_v1_health_proto_rawDesc = []byte{
|
||||
0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67,
|
||||
0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a,
|
||||
0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01,
|
||||
0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
|
||||
0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
|
||||
0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75,
|
||||
0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b,
|
||||
0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,
|
||||
0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
|
||||
0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
|
||||
0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05,
|
||||
0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
|
||||
0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
|
||||
0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52,
|
||||
0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
|
||||
0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72,
|
||||
0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61,
|
||||
0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
|
||||
0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
|
||||
0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c,
|
||||
0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) }
|
||||
var (
|
||||
file_grpc_health_v1_health_proto_rawDescOnce sync.Once
|
||||
file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc
|
||||
)
|
||||
|
||||
var fileDescriptor_e265fd9d4e077217 = []byte{
|
||||
// 297 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
||||
0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
|
||||
0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
|
||||
0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
|
||||
0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
|
||||
0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
|
||||
0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
|
||||
0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
|
||||
0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
|
||||
0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
|
||||
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
|
||||
0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
|
||||
0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
|
||||
0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
|
||||
0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
|
||||
0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
|
||||
0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
|
||||
0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
|
||||
0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
|
||||
func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
|
||||
file_grpc_health_v1_health_proto_rawDescOnce.Do(func() {
|
||||
file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData)
|
||||
})
|
||||
return file_grpc_health_v1_health_proto_rawDescData
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// HealthClient is the client API for Health service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type HealthClient interface {
|
||||
// If the requested service is unknown, the call will fail with status
|
||||
// NOT_FOUND.
|
||||
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
|
||||
// Performs a watch for the serving status of the requested service.
|
||||
// The server will immediately send back a message indicating the current
|
||||
// serving status. It will then subsequently send a new message whenever
|
||||
// the service's serving status changes.
|
||||
//
|
||||
// If the requested service is unknown when the call is received, the
|
||||
// server will send a message setting the serving status to
|
||||
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
|
||||
// future point, the serving status of the service becomes known, the
|
||||
// server will send a new message with the service's serving status.
|
||||
//
|
||||
// If the call terminates with status UNIMPLEMENTED, then clients
|
||||
// should assume this method is not supported and should not retry the
|
||||
// call. If the call terminates with any other status (including OK),
|
||||
// clients should retry the call with appropriate exponential backoff.
|
||||
Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
|
||||
var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_grpc_health_v1_health_proto_goTypes = []interface{}{
|
||||
(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
|
||||
(*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
|
||||
(*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
|
||||
}
|
||||
var file_grpc_health_v1_health_proto_depIdxs = []int32{
|
||||
0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
|
||||
1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
|
||||
1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
|
||||
2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
|
||||
2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
|
||||
3, // [3:5] is the sub-list for method output_type
|
||||
1, // [1:3] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
type healthClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewHealthClient(cc *grpc.ClientConn) HealthClient {
|
||||
return &healthClient{cc}
|
||||
}
|
||||
|
||||
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
|
||||
out := new(HealthCheckResponse)
|
||||
err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func init() { file_grpc_health_v1_health_proto_init() }
|
||||
func file_grpc_health_v1_health_proto_init() {
|
||||
if File_grpc_health_v1_health_proto != nil {
|
||||
return
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HealthCheckRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
x := &healthWatchClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HealthCheckResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type Health_WatchClient interface {
|
||||
Recv() (*HealthCheckResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type healthWatchClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
|
||||
m := new(HealthCheckResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// HealthServer is the server API for Health service.
|
||||
type HealthServer interface {
|
||||
// If the requested service is unknown, the call will fail with status
|
||||
// NOT_FOUND.
|
||||
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
|
||||
// Performs a watch for the serving status of the requested service.
|
||||
// The server will immediately send back a message indicating the current
|
||||
// serving status. It will then subsequently send a new message whenever
|
||||
// the service's serving status changes.
|
||||
//
|
||||
// If the requested service is unknown when the call is received, the
|
||||
// server will send a message setting the serving status to
|
||||
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
|
||||
// future point, the serving status of the service becomes known, the
|
||||
// server will send a new message with the service's serving status.
|
||||
//
|
||||
// If the call terminates with status UNIMPLEMENTED, then clients
|
||||
// should assume this method is not supported and should not retry the
|
||||
// call. If the call terminates with any other status (including OK),
|
||||
// clients should retry the call with appropriate exponential backoff.
|
||||
Watch(*HealthCheckRequest, Health_WatchServer) error
|
||||
}
|
||||
|
||||
// UnimplementedHealthServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedHealthServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
|
||||
}
|
||||
func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
|
||||
}
|
||||
|
||||
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
|
||||
s.RegisterService(&_Health_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HealthCheckRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HealthServer).Check(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.health.v1.Health/Check",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(HealthCheckRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
|
||||
}
|
||||
|
||||
type Health_WatchServer interface {
|
||||
Send(*HealthCheckResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type healthWatchServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
var _Health_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "grpc.health.v1.Health",
|
||||
HandlerType: (*HealthServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Check",
|
||||
Handler: _Health_Check_Handler,
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_grpc_health_v1_health_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Watch",
|
||||
Handler: _Health_Watch_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "grpc/health/v1/health.proto",
|
||||
GoTypes: file_grpc_health_v1_health_proto_goTypes,
|
||||
DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs,
|
||||
EnumInfos: file_grpc_health_v1_health_proto_enumTypes,
|
||||
MessageInfos: file_grpc_health_v1_health_proto_msgTypes,
|
||||
}.Build()
|
||||
File_grpc_health_v1_health_proto = out.File
|
||||
file_grpc_health_v1_health_proto_rawDesc = nil
|
||||
file_grpc_health_v1_health_proto_goTypes = nil
|
||||
file_grpc_health_v1_health_proto_depIdxs = nil
|
||||
}
|
||||
|
|
201
vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
generated
vendored
Normal file
201
vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.1.0
|
||||
// - protoc v3.14.0
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// HealthClient is the client API for Health service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type HealthClient interface {
|
||||
// If the requested service is unknown, the call will fail with status
|
||||
// NOT_FOUND.
|
||||
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
|
||||
// Performs a watch for the serving status of the requested service.
|
||||
// The server will immediately send back a message indicating the current
|
||||
// serving status. It will then subsequently send a new message whenever
|
||||
// the service's serving status changes.
|
||||
//
|
||||
// If the requested service is unknown when the call is received, the
|
||||
// server will send a message setting the serving status to
|
||||
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
|
||||
// future point, the serving status of the service becomes known, the
|
||||
// server will send a new message with the service's serving status.
|
||||
//
|
||||
// If the call terminates with status UNIMPLEMENTED, then clients
|
||||
// should assume this method is not supported and should not retry the
|
||||
// call. If the call terminates with any other status (including OK),
|
||||
// clients should retry the call with appropriate exponential backoff.
|
||||
Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
|
||||
}
|
||||
|
||||
type healthClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
|
||||
return &healthClient{cc}
|
||||
}
|
||||
|
||||
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
|
||||
out := new(HealthCheckResponse)
|
||||
err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &healthWatchClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type Health_WatchClient interface {
|
||||
Recv() (*HealthCheckResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type healthWatchClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
|
||||
m := new(HealthCheckResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// HealthServer is the server API for Health service.
|
||||
// All implementations should embed UnimplementedHealthServer
|
||||
// for forward compatibility
|
||||
type HealthServer interface {
|
||||
// If the requested service is unknown, the call will fail with status
|
||||
// NOT_FOUND.
|
||||
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
|
||||
// Performs a watch for the serving status of the requested service.
|
||||
// The server will immediately send back a message indicating the current
|
||||
// serving status. It will then subsequently send a new message whenever
|
||||
// the service's serving status changes.
|
||||
//
|
||||
// If the requested service is unknown when the call is received, the
|
||||
// server will send a message setting the serving status to
|
||||
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
|
||||
// future point, the serving status of the service becomes known, the
|
||||
// server will send a new message with the service's serving status.
|
||||
//
|
||||
// If the call terminates with status UNIMPLEMENTED, then clients
|
||||
// should assume this method is not supported and should not retry the
|
||||
// call. If the call terminates with any other status (including OK),
|
||||
// clients should retry the call with appropriate exponential backoff.
|
||||
Watch(*HealthCheckRequest, Health_WatchServer) error
|
||||
}
|
||||
|
||||
// UnimplementedHealthServer should be embedded to have forward compatible implementations.
|
||||
type UnimplementedHealthServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
|
||||
}
|
||||
func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
|
||||
}
|
||||
|
||||
// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to HealthServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeHealthServer interface {
|
||||
mustEmbedUnimplementedHealthServer()
|
||||
}
|
||||
|
||||
func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
|
||||
s.RegisterService(&Health_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HealthCheckRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HealthServer).Check(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/grpc.health.v1.Health/Check",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(HealthCheckRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
|
||||
}
|
||||
|
||||
type Health_WatchServer interface {
|
||||
Send(*HealthCheckResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type healthWatchServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var Health_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "grpc.health.v1.Health",
|
||||
HandlerType: (*HealthServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Check",
|
||||
Handler: _Health_Check_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Watch",
|
||||
Handler: _Health_Watch_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "grpc/health/v1/health.proto",
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -16,29 +16,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
// This file contains wrappers for grpclog functions.
|
||||
// The transport package only logs to verbose level 2 by default.
|
||||
|
||||
package transport
|
||||
package health
|
||||
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
|
||||
const logLevel = 2
|
||||
|
||||
func infof(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Infof(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func warningf(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Warningf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Errorf(format, args...)
|
||||
}
|
||||
}
|
||||
var logger = grpclog.Component("health_service")
|
|
@ -16,8 +16,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
//go:generate ./regenerate.sh
|
||||
|
||||
// Package health provides a service that exposes server's health and it must be
|
||||
// imported to enable support for client-side health checks.
|
||||
package health
|
||||
|
@ -27,7 +25,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/status"
|
||||
|
@ -35,6 +32,7 @@ import (
|
|||
|
||||
// Server implements `service Health`.
|
||||
type Server struct {
|
||||
healthgrpc.UnimplementedHealthServer
|
||||
mu sync.RWMutex
|
||||
// If shutdown is true, it's expected all serving status is NOT_SERVING, and
|
||||
// will stay in NOT_SERVING.
|
||||
|
@ -115,7 +113,7 @@ func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthC
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.shutdown {
|
||||
grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
|
||||
logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -25,17 +25,41 @@ import (
|
|||
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
|
||||
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
|
||||
|
||||
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
|
||||
// and it is the responsibility of the interceptor to call it.
|
||||
// This is an EXPERIMENTAL API.
|
||||
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
|
||||
// Unary interceptors can be specified as a DialOption, using
|
||||
// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a
|
||||
// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC
|
||||
// delegates all unary RPC invocations to the interceptor, and it is the
|
||||
// responsibility of the interceptor to call invoker to complete the processing
|
||||
// of the RPC.
|
||||
//
|
||||
// method is the RPC name. req and reply are the corresponding request and
|
||||
// response messages. cc is the ClientConn on which the RPC was invoked. invoker
|
||||
// is the handler to complete the RPC and it is the responsibility of the
|
||||
// interceptor to call it. opts contain all applicable call options, including
|
||||
// defaults from the ClientConn as well as per-call options.
|
||||
//
|
||||
// The returned error must be compatible with the status package.
|
||||
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
|
||||
|
||||
// Streamer is called by StreamClientInterceptor to create a ClientStream.
|
||||
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
|
||||
|
||||
// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
|
||||
// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
|
||||
// This is an EXPERIMENTAL API.
|
||||
// StreamClientInterceptor intercepts the creation of a ClientStream. Stream
|
||||
// interceptors can be specified as a DialOption, using WithStreamInterceptor()
|
||||
// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream
|
||||
// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations
|
||||
// to the interceptor, and it is the responsibility of the interceptor to call
|
||||
// streamer.
|
||||
//
|
||||
// desc contains a description of the stream. cc is the ClientConn on which the
|
||||
// RPC was invoked. streamer is the handler to create a ClientStream and it is
|
||||
// the responsibility of the interceptor to call it. opts contain all applicable
|
||||
// call options, including defaults from the ClientConn as well as per-call
|
||||
// options.
|
||||
//
|
||||
// StreamClientInterceptor may return a custom ClientStream to intercept all I/O
|
||||
// operations. The returned error must be compatible with the status package.
|
||||
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
|
||||
|
||||
// UnaryServerInfo consists of various information about a unary RPC on
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"os"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
)
|
||||
|
||||
// Logger is the global binary logger. It can be used to get binary logger for
|
||||
|
@ -39,6 +40,8 @@ type Logger interface {
|
|||
// It is used to get a methodLogger for each individual method.
|
||||
var binLogger Logger
|
||||
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// SetLogger sets the binarg logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
|
@ -146,9 +149,9 @@ func (l *logger) setBlacklist(method string) error {
|
|||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
|
||||
s, m, err := parseMethodName(methodName)
|
||||
s, m, err := grpcutil.ParseMethod(methodName)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.methods[s+"/"+m]; ok {
|
||||
|
|
|
@ -24,8 +24,6 @@ import (
|
|||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// NewLoggerFromConfigString reads the string and build a logger. It can be used
|
||||
|
@ -52,7 +50,7 @@ func NewLoggerFromConfigString(s string) Logger {
|
|||
methods := strings.Split(s, ",")
|
||||
for _, method := range methods {
|
||||
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
|
||||
grpclog.Warningf("failed to parse binary log config: %v", err)
|
||||
grpclogLogger.Warningf("failed to parse binary log config: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
@ -66,7 +65,7 @@ func newMethodLogger(h, m uint64) *MethodLogger {
|
|||
callID: idGen.next(),
|
||||
idWithinCallGen: &callIDGenerator{},
|
||||
|
||||
sink: defaultSink, // TODO(blog): make it plugable.
|
||||
sink: DefaultSink, // TODO(blog): make it plugable.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -219,12 +218,12 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
|||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
|
@ -259,12 +258,12 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
|||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
|
@ -315,7 +314,7 @@ type ServerTrailer struct {
|
|||
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
st, ok := status.FromError(c.Err)
|
||||
if !ok {
|
||||
grpclog.Info("binarylogging: error in trailer is not a status error")
|
||||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||||
}
|
||||
var (
|
||||
detailsBytes []byte
|
||||
|
@ -325,7 +324,7 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
|||
if stProto != nil && len(stProto.Details) != 0 {
|
||||
detailsBytes, err = proto.Marshal(stProto)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
|
|
|
@ -21,32 +21,23 @@ package binarylog
|
|||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
|
||||
// DefaultSink is the sink where the logs will be written to. It's exported
|
||||
// for the binarylog package to update.
|
||||
DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
|
||||
)
|
||||
|
||||
// SetDefaultSink sets the sink where binary logs will be written to.
|
||||
//
|
||||
// Not thread safe. Only set during initialization.
|
||||
func SetDefaultSink(s Sink) {
|
||||
if defaultSink != nil {
|
||||
defaultSink.Close()
|
||||
}
|
||||
defaultSink = s
|
||||
}
|
||||
|
||||
// Sink writes log entry into the binary log sink.
|
||||
//
|
||||
// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
|
||||
type Sink interface {
|
||||
// Write will be called to write the log entry into the sink.
|
||||
//
|
||||
|
@ -67,7 +58,7 @@ func (ns *noopSink) Close() error { return nil }
|
|||
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||
//
|
||||
// No buffer is done, Close() doesn't try to close the writer.
|
||||
func newWriterSink(w io.Writer) *writerSink {
|
||||
func newWriterSink(w io.Writer) Sink {
|
||||
return &writerSink{out: w}
|
||||
}
|
||||
|
||||
|
@ -78,7 +69,7 @@ type writerSink struct {
|
|||
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||||
b, err := proto.Marshal(e)
|
||||
if err != nil {
|
||||
grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
|
||||
grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
hdr := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
|
||||
|
@ -93,17 +84,17 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
|||
|
||||
func (ws *writerSink) Close() error { return nil }
|
||||
|
||||
type bufWriteCloserSink struct {
|
||||
type bufferedSink struct {
|
||||
mu sync.Mutex
|
||||
closer io.Closer
|
||||
out *writerSink // out is built on buf.
|
||||
out Sink // out is built on buf.
|
||||
buf *bufio.Writer // buf is kept for flush.
|
||||
|
||||
writeStartOnce sync.Once
|
||||
writeTicker *time.Ticker
|
||||
}
|
||||
|
||||
func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
|
||||
func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
|
||||
// Start the write loop when Write is called.
|
||||
fs.writeStartOnce.Do(fs.startFlushGoroutine)
|
||||
fs.mu.Lock()
|
||||
|
@ -119,44 +110,50 @@ const (
|
|||
bufFlushDuration = 60 * time.Second
|
||||
)
|
||||
|
||||
func (fs *bufWriteCloserSink) startFlushGoroutine() {
|
||||
func (fs *bufferedSink) startFlushGoroutine() {
|
||||
fs.writeTicker = time.NewTicker(bufFlushDuration)
|
||||
go func() {
|
||||
for range fs.writeTicker.C {
|
||||
fs.mu.Lock()
|
||||
fs.buf.Flush()
|
||||
if err := fs.buf.Flush(); err != nil {
|
||||
grpclogLogger.Warningf("failed to flush to Sink: %v", err)
|
||||
}
|
||||
fs.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (fs *bufWriteCloserSink) Close() error {
|
||||
func (fs *bufferedSink) Close() error {
|
||||
if fs.writeTicker != nil {
|
||||
fs.writeTicker.Stop()
|
||||
}
|
||||
fs.mu.Lock()
|
||||
fs.buf.Flush()
|
||||
fs.closer.Close()
|
||||
fs.out.Close()
|
||||
if err := fs.buf.Flush(); err != nil {
|
||||
grpclogLogger.Warningf("failed to flush to Sink: %v", err)
|
||||
}
|
||||
if err := fs.closer.Close(); err != nil {
|
||||
grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
|
||||
}
|
||||
if err := fs.out.Close(); err != nil {
|
||||
grpclogLogger.Warningf("failed to close the Sink: %v", err)
|
||||
}
|
||||
fs.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBufWriteCloserSink(o io.WriteCloser) Sink {
|
||||
// NewBufferedSink creates a binary log sink with the given WriteCloser.
|
||||
//
|
||||
// Write() marshals the proto message and writes it to the given writer. Each
|
||||
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||
//
|
||||
// Content is kept in a buffer, and is flushed every 60 seconds.
|
||||
//
|
||||
// Close closes the WriteCloser.
|
||||
func NewBufferedSink(o io.WriteCloser) Sink {
|
||||
bufW := bufio.NewWriter(o)
|
||||
return &bufWriteCloserSink{
|
||||
return &bufferedSink{
|
||||
closer: o,
|
||||
out: newWriterSink(bufW),
|
||||
buf: bufW,
|
||||
}
|
||||
}
|
||||
|
||||
// NewTempFileSink creates a temp file and returns a Sink that writes to this
|
||||
// file.
|
||||
func NewTempFileSink() (Sink, error) {
|
||||
tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp file: %v", err)
|
||||
}
|
||||
return newBufWriteCloserSink(tempFile), nil
|
||||
}
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// parseMethodName splits service and method from the input. It expects format
|
||||
// "/service/method".
|
||||
//
|
||||
// TODO: move to internal/grpcutil.
|
||||
func parseMethodName(methodName string) (service, method string, _ error) {
|
||||
if !strings.HasPrefix(methodName, "/") {
|
||||
return "", "", errors.New("invalid method name: should start with /")
|
||||
}
|
||||
methodName = methodName[1:]
|
||||
|
||||
pos := strings.LastIndex(methodName, "/")
|
||||
if pos < 0 {
|
||||
return "", "", errors.New("invalid method name: suffix /method is missing")
|
||||
}
|
||||
return methodName[:pos], methodName[pos+1:], nil
|
||||
}
|
|
@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
|||
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
||||
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a SubChannel's parent id cannot be 0")
|
||||
logger.Error("a SubChannel's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
|
@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 {
|
|||
// this listen socket.
|
||||
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a ListenSocket's parent id cannot be 0")
|
||||
logger.Error("a ListenSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
|
@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
|||
// this normal socket.
|
||||
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
grpclog.Error("a NormalSocket's parent id cannot be 0")
|
||||
logger.Error("a NormalSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
|
@ -294,7 +294,17 @@ type TraceEventDesc struct {
|
|||
}
|
||||
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
|
||||
func AddTraceEvent(id int64, desc *TraceEventDesc) {
|
||||
func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
|
||||
for d := desc; d != nil; d = d.Parent {
|
||||
switch d.Severity {
|
||||
case CtUnknown, CtInfo:
|
||||
l.InfoDepth(depth+1, d.Desc)
|
||||
case CtWarning:
|
||||
l.WarningDepth(depth+1, d.Desc)
|
||||
case CtError:
|
||||
l.ErrorDepth(depth+1, d.Desc)
|
||||
}
|
||||
}
|
||||
if getMaxTraceEntry() == 0 {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("channelz")
|
||||
|
||||
// Info logs and adds a trace event if channelz is on.
|
||||
func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
} else {
|
||||
l.InfoDepth(1, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Infof logs and adds a trace event if channelz is on.
|
||||
func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtInfo,
|
||||
})
|
||||
} else {
|
||||
l.InfoDepth(1, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Warning logs and adds a trace event if channelz is on.
|
||||
func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
} else {
|
||||
l.WarningDepth(1, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Warningf logs and adds a trace event if channelz is on.
|
||||
func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtWarning,
|
||||
})
|
||||
} else {
|
||||
l.WarningDepth(1, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs and adds a trace event if channelz is on.
|
||||
func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
} else {
|
||||
l.ErrorDepth(1, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf logs and adds a trace event if channelz is on.
|
||||
func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtError,
|
||||
})
|
||||
} else {
|
||||
l.ErrorDepth(1, msg)
|
||||
}
|
||||
}
|
|
@ -26,7 +26,6 @@ import (
|
|||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// entry represents a node in the channelz database.
|
||||
|
@ -60,17 +59,17 @@ func (d *dummyEntry) addChild(id int64, e entry) {
|
|||
// the addrConn will create a new transport. And when registering the new transport in
|
||||
// channelz, its parent addrConn could have already been torn down and deleted
|
||||
// from channelz tracking, and thus reach the code here.
|
||||
grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) deleteChild(id int64) {
|
||||
// It is possible for a normal program to reach here under race condition.
|
||||
// Refer to the example described in addChild().
|
||||
grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) triggerDelete() {
|
||||
grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
}
|
||||
|
||||
func (*dummyEntry) deleteSelfIfReady() {
|
||||
|
@ -215,7 +214,7 @@ func (c *channel) addChild(id int64, e entry) {
|
|||
case *channel:
|
||||
c.nestedChans[id] = v.refName
|
||||
default:
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -326,7 +325,7 @@ func (sc *subChannel) addChild(id int64, e entry) {
|
|||
if v, ok := e.(*normalSocket); ok {
|
||||
sc.sockets[id] = v.refName
|
||||
} else {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -493,11 +492,11 @@ type listenSocket struct {
|
|||
}
|
||||
|
||||
func (ls *listenSocket) addChild(id int64, e entry) {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteChild(id int64) {
|
||||
grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) triggerDelete() {
|
||||
|
@ -506,7 +505,7 @@ func (ls *listenSocket) triggerDelete() {
|
|||
}
|
||||
|
||||
func (ls *listenSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
}
|
||||
|
||||
func (ls *listenSocket) getParentID() int64 {
|
||||
|
@ -522,11 +521,11 @@ type normalSocket struct {
|
|||
}
|
||||
|
||||
func (ns *normalSocket) addChild(id int64, e entry) {
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteChild(id int64) {
|
||||
grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||
logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) triggerDelete() {
|
||||
|
@ -535,7 +534,7 @@ func (ns *normalSocket) triggerDelete() {
|
|||
}
|
||||
|
||||
func (ns *normalSocket) deleteSelfIfReady() {
|
||||
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
}
|
||||
|
||||
func (ns *normalSocket) getParentID() int64 {
|
||||
|
@ -594,7 +593,7 @@ func (s *server) addChild(id int64, e entry) {
|
|||
case *listenSocket:
|
||||
s.listenSockets[id] = v.refName
|
||||
default:
|
||||
grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -673,10 +672,10 @@ func (c *channelTrace) clear() {
|
|||
type Severity int
|
||||
|
||||
const (
|
||||
// CtUNKNOWN indicates unknown severity of a trace event.
|
||||
CtUNKNOWN Severity = iota
|
||||
// CtINFO indicates info level severity of a trace event.
|
||||
CtINFO
|
||||
// CtUnknown indicates unknown severity of a trace event.
|
||||
CtUnknown Severity = iota
|
||||
// CtInfo indicates info level severity of a trace event.
|
||||
CtInfo
|
||||
// CtWarning indicates warning level severity of a trace event.
|
||||
CtWarning
|
||||
// CtError indicates error level severity of a trace event.
|
||||
|
|
|
@ -22,8 +22,6 @@ package channelz
|
|||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
|
@ -39,6 +37,6 @@ type SocketOptionData struct {
|
|||
// Windows OS doesn't support Socket Option
|
||||
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
once.Do(func() {
|
||||
grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
|
||||
logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.")
|
||||
})
|
||||
}
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// requestInfoKey is a struct to be used as the key to store RequestInfo in a
|
||||
// context.
|
||||
type requestInfoKey struct{}
|
||||
|
||||
// NewRequestInfoContext creates a context with ri.
|
||||
func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
|
||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||
}
|
||||
|
||||
// RequestInfoFromContext extracts the RequestInfo from ctx.
|
||||
func RequestInfoFromContext(ctx context.Context) interface{} {
|
||||
return ctx.Value(requestInfoKey{})
|
||||
}
|
||||
|
||||
// clientHandshakeInfoKey is a struct used as the key to store
|
||||
// ClientHandshakeInfo in a context.
|
||||
type clientHandshakeInfoKey struct{}
|
||||
|
||||
// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
|
||||
func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
|
||||
return ctx.Value(clientHandshakeInfoKey{})
|
||||
}
|
||||
|
||||
// NewClientHandshakeInfoContext creates a context with chi.
|
||||
func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
|
||||
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
// +build !appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package credentials defines APIs for parsing SPIFFE ID.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net/url"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("credentials")
|
||||
|
||||
// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
|
||||
// is invalid, return nil with warning.
|
||||
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
|
||||
if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return SPIFFEIDFromCert(state.PeerCertificates[0])
|
||||
}
|
||||
|
||||
// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
|
||||
// ID format is invalid, return nil with warning.
|
||||
func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
|
||||
if cert == nil || cert.URIs == nil {
|
||||
return nil
|
||||
}
|
||||
var spiffeID *url.URL
|
||||
for _, uri := range cert.URIs {
|
||||
if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
|
||||
continue
|
||||
}
|
||||
// From this point, we assume the uri is intended for a SPIFFE ID.
|
||||
if len(uri.String()) > 2048 {
|
||||
logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
|
||||
return nil
|
||||
}
|
||||
if len(uri.Host) == 0 || len(uri.Path) == 0 {
|
||||
logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
|
||||
return nil
|
||||
}
|
||||
if len(uri.Host) > 255 {
|
||||
logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
|
||||
return nil
|
||||
}
|
||||
// A valid SPIFFE certificate can only have exactly one URI SAN field.
|
||||
if len(cert.URIs) > 1 {
|
||||
logger.Warning("invalid SPIFFE ID: multiple URI SANs")
|
||||
return nil
|
||||
}
|
||||
spiffeID = uri
|
||||
}
|
||||
return spiffeID
|
||||
}
|
31
vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go
generated
vendored
Normal file
31
vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
// +build appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// SPIFFEIDFromState is a no-op for appengine builds.
|
||||
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
|
||||
return nil
|
||||
}
|
|
@ -18,8 +18,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
// Package internal contains credentials-internal code.
|
||||
package internal
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"net"
|
|
@ -18,7 +18,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
package internal
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"net"
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
// AppendH2ToNextProtos appends h2 to next protos.
|
||||
func AppendH2ToNextProtos(ps []string) []string {
|
||||
for _, p := range ps {
|
||||
if p == alpnProtoStrH2 {
|
||||
return ps
|
||||
}
|
||||
}
|
||||
ret := make([]string, 0, len(ps)+1)
|
||||
ret = append(ret, ps...)
|
||||
return append(ret, alpnProtoStrH2)
|
||||
}
|
||||
|
||||
// CloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO: inline this function if possible.
|
||||
func CloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
|
||||
return cfg.Clone()
|
||||
}
|
|
@ -34,5 +34,5 @@ var (
|
|||
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
||||
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
||||
)
|
||||
|
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpclog (internal) defines depth logging for grpc.
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Logger is the logger used for the non-depth log functions.
|
||||
var Logger LoggerV2
|
||||
|
||||
// DepthLogger is the logger used for the depth log functions.
|
||||
var DepthLogger DepthLoggerV2
|
||||
|
||||
// InfoDepth logs to the INFO log at the specified depth.
|
||||
func InfoDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.InfoDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Infoln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// WarningDepth logs to the WARNING log at the specified depth.
|
||||
func WarningDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.WarningDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Warningln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorDepth logs to the ERROR log at the specified depth.
|
||||
func ErrorDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.ErrorDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Errorln(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// FatalDepth logs to the FATAL log at the specified depth.
|
||||
func FatalDepth(depth int, args ...interface{}) {
|
||||
if DepthLogger != nil {
|
||||
DepthLogger.FatalDepth(depth, args...)
|
||||
} else {
|
||||
Logger.Fatalln(args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// LoggerV2 does underlying logging work for grpclog.
|
||||
// This is a copy of the LoggerV2 defined in the external grpclog package. It
|
||||
// is defined here to avoid a circular dependency.
|
||||
type LoggerV2 interface {
|
||||
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
||||
Info(args ...interface{})
|
||||
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
Infoln(args ...interface{})
|
||||
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
Infof(format string, args ...interface{})
|
||||
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
||||
Warning(args ...interface{})
|
||||
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
Warningln(args ...interface{})
|
||||
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
Warningf(format string, args ...interface{})
|
||||
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
Error(args ...interface{})
|
||||
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
Errorln(args ...interface{})
|
||||
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
Errorf(format string, args ...interface{})
|
||||
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatal(args ...interface{})
|
||||
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalln(args ...interface{})
|
||||
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalf(format string, args ...interface{})
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
V(l int) bool
|
||||
}
|
||||
|
||||
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
|
||||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
|
||||
// It is defined here to avoid a circular dependency.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// PrefixLogger does logging with a prefix.
|
||||
//
|
||||
// Logging method on a nil logs without any prefix.
|
||||
type PrefixLogger struct {
|
||||
logger DepthLoggerV2
|
||||
prefix string
|
||||
}
|
||||
|
||||
// Infof does info logging.
|
||||
func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
// Handle nil, so the tests can pass in a nil logger.
|
||||
format = pl.prefix + format
|
||||
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Warningf does warning logging.
|
||||
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Errorf does error logging.
|
||||
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
||||
if pl != nil {
|
||||
format = pl.prefix + format
|
||||
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Debugf does info logging at verbose level 2.
|
||||
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
||||
if !Logger.V(2) {
|
||||
return
|
||||
}
|
||||
if pl != nil {
|
||||
// Handle nil, so the tests can pass in a nil logger.
|
||||
format = pl.prefix + format
|
||||
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
return
|
||||
}
|
||||
InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// NewPrefixLogger creates a prefix logger with the given prefix.
|
||||
func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
|
||||
return &PrefixLogger{logger: logger, prefix: prefix}
|
||||
}
|
63
vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
generated
vendored
Normal file
63
vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const maxTimeoutValue int64 = 100000000 - 1
|
||||
|
||||
// div does integer division and round-up the result. Note that this is
|
||||
// equivalent to (d+r-1)/r but has less chance to overflow.
|
||||
func div(d, r time.Duration) int64 {
|
||||
if d%r > 0 {
|
||||
return int64(d/r + 1)
|
||||
}
|
||||
return int64(d / r)
|
||||
}
|
||||
|
||||
// EncodeDuration encodes the duration to the format grpc-timeout header
|
||||
// accepts.
|
||||
//
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
func EncodeDuration(t time.Duration) string {
|
||||
// TODO: This is simplistic and not bandwidth efficient. Improve it.
|
||||
if t <= 0 {
|
||||
return "0n"
|
||||
}
|
||||
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "n"
|
||||
}
|
||||
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "u"
|
||||
}
|
||||
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "m"
|
||||
}
|
||||
if d := div(t, time.Second); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "S"
|
||||
}
|
||||
if d := div(t, time.Minute); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "M"
|
||||
}
|
||||
// Note that maxTimeoutValue * time.Hour > MaxInt64.
|
||||
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type mdExtraKey struct{}
|
||||
|
||||
// WithExtraMetadata creates a new context with incoming md attached.
|
||||
func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
|
||||
return context.WithValue(ctx, mdExtraKey{}, md)
|
||||
}
|
||||
|
||||
// ExtraMetadata returns the incoming metadata in ctx if it exists. The
|
||||
// returned MD should not be modified. Writing to it may cause races.
|
||||
// Modification should be made to copies of the returned MD.
|
||||
func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
|
||||
md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseMethod splits service and method from the input. It expects format
|
||||
// "/service/method".
|
||||
//
|
||||
func ParseMethod(methodName string) (service, method string, _ error) {
|
||||
if !strings.HasPrefix(methodName, "/") {
|
||||
return "", "", errors.New("invalid method name: should start with /")
|
||||
}
|
||||
methodName = methodName[1:]
|
||||
|
||||
pos := strings.LastIndex(methodName, "/")
|
||||
if pos < 0 {
|
||||
return "", "", errors.New("invalid method name: suffix /method is missing")
|
||||
}
|
||||
return methodName[:pos], methodName[pos+1:], nil
|
||||
}
|
||||
|
||||
const baseContentType = "application/grpc"
|
||||
|
||||
// ContentSubtype returns the content-subtype for the given content-type. The
|
||||
// given content-type must be a valid content-type that starts with
|
||||
// "application/grpc". A content-subtype will follow "application/grpc" after a
|
||||
// "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If contentType is not a valid content-type for gRPC, the boolean
|
||||
// will be false, otherwise true. If content-type == "application/grpc",
|
||||
// "application/grpc+", or "application/grpc;", the boolean will be true,
|
||||
// but no content-subtype will be returned.
|
||||
//
|
||||
// contentType is assumed to be lowercase already.
|
||||
func ContentSubtype(contentType string) (string, bool) {
|
||||
if contentType == baseContentType {
|
||||
return "", true
|
||||
}
|
||||
if !strings.HasPrefix(contentType, baseContentType) {
|
||||
return "", false
|
||||
}
|
||||
// guaranteed since != baseContentType and has baseContentType prefix
|
||||
switch contentType[len(baseContentType)] {
|
||||
case '+', ';':
|
||||
// this will return true for "application/grpc+" or "application/grpc;"
|
||||
// which the previous validContentType function tested to be valid, so we
|
||||
// just say that no content-subtype is specified in this case
|
||||
return contentType[len(baseContentType)+1:], true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// ContentType builds full content type with the given sub-type.
|
||||
//
|
||||
// contentSubtype is assumed to be lowercase
|
||||
func ContentType(contentSubtype string) string {
|
||||
if contentSubtype == "" {
|
||||
return baseContentType
|
||||
}
|
||||
return baseContentType + "+" + contentSubtype
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcutil provides a bunch of utility functions to be used across the
|
||||
// gRPC codebase.
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
||||
// If sep is not found, it returns ("", "", false) instead.
|
||||
func split2(s, sep string) (string, string, bool) {
|
||||
spl := strings.SplitN(s, sep, 2)
|
||||
if len(spl) < 2 {
|
||||
return "", "", false
|
||||
}
|
||||
return spl[0], spl[1], true
|
||||
}
|
||||
|
||||
// ParseTarget splits target into a resolver.Target struct containing scheme,
|
||||
// authority and endpoint. skipUnixColonParsing indicates that the parse should
|
||||
// not parse "unix:[path]" cases. This should be true in cases where a custom
|
||||
// dialer is present, to prevent a behavior change.
|
||||
//
|
||||
// If target is not a valid scheme://authority/endpoint as specified in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/naming.md,
|
||||
// it returns {Endpoint: target}.
|
||||
func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) {
|
||||
var ok bool
|
||||
if strings.HasPrefix(target, "unix-abstract:") {
|
||||
if strings.HasPrefix(target, "unix-abstract://") {
|
||||
// Maybe, with Authority specified, try to parse it
|
||||
var remain string
|
||||
ret.Scheme, remain, _ = split2(target, "://")
|
||||
ret.Authority, ret.Endpoint, ok = split2(remain, "/")
|
||||
if !ok {
|
||||
// No Authority, add the "//" back
|
||||
ret.Endpoint = "//" + remain
|
||||
} else {
|
||||
// Found Authority, add the "/" back
|
||||
ret.Endpoint = "/" + ret.Endpoint
|
||||
}
|
||||
} else {
|
||||
// Without Authority specified, split target on ":"
|
||||
ret.Scheme, ret.Endpoint, _ = split2(target, ":")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
|
||||
if !ok {
|
||||
if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing {
|
||||
// Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases,
|
||||
// because splitting on :// only handles the
|
||||
// "unix://[/absolute/path]" case. Only handle if the dialer is nil,
|
||||
// to avoid a behavior change with custom dialers.
|
||||
return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]}
|
||||
}
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
if ret.Scheme == "unix" {
|
||||
// Add the "/" back in the unix case, so the unix resolver receives the
|
||||
// actual endpoint in the "unix://[/absolute/path]" case.
|
||||
ret.Endpoint = "/" + ret.Endpoint
|
||||
}
|
||||
return ret
|
||||
}
|
|
@ -25,6 +25,7 @@ import (
|
|||
"time"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -37,17 +38,32 @@ var (
|
|||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||
// default, but tests may wish to set it lower for convenience.
|
||||
KeepaliveMinPingTime = 10 * time.Second
|
||||
// StatusRawProto is exported by status/status.go. This func returns a
|
||||
// pointer to the wrapped Status proto for a given status.Status without a
|
||||
// call to proto.Clone(). The returned Status proto should not be mutated by
|
||||
// the caller.
|
||||
StatusRawProto interface{} // func (*status.Status) *spb.Status
|
||||
// NewRequestInfoContext creates a new context based on the argument context attaching
|
||||
// the passed in RequestInfo to the new context.
|
||||
NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
|
||||
// ParseServiceConfigForTesting is for creating a fake
|
||||
// ClientConn for resolver testing only
|
||||
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
|
||||
// EqualServiceConfigForTesting is for testing service config generation and
|
||||
// parsing. Both a and b should be returned by ParseServiceConfigForTesting.
|
||||
// This function compares the config without rawJSON stripped, in case the
|
||||
// there's difference in white space.
|
||||
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
|
||||
// GetCertificateProviderBuilder returns the registered builder for the
|
||||
// given name. This is set by package certprovider for use from xDS
|
||||
// bootstrap code while parsing certificate provider configs in the
|
||||
// bootstrap file.
|
||||
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
|
||||
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
|
||||
// stored in the passed in attributes. This is set by
|
||||
// credentials/xds/xds.go.
|
||||
GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
|
||||
// GetServerCredentials returns the transport credentials configured on a
|
||||
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
||||
// is configured on the underlying gRPC server. This is set by server.go.
|
||||
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
|
||||
// DrainServerTransports initiates a graceful close of existing connections
|
||||
// on a gRPC server accepted on the provided listener address. An
|
||||
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
||||
// listener moves to "not-serving" mode.
|
||||
DrainServerTransports interface{} // func(*grpc.Server, string)
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package metadata contains functions to set and get metadata from addresses.
|
||||
//
|
||||
// This package is experimental.
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
type mdKeyType string
|
||||
|
||||
const mdKey = mdKeyType("grpc.internal.address.metadata")
|
||||
|
||||
// Get returns the metadata of addr.
|
||||
func Get(addr resolver.Address) metadata.MD {
|
||||
attrs := addr.Attributes
|
||||
if attrs == nil {
|
||||
return nil
|
||||
}
|
||||
md, _ := attrs.Value(mdKey).(metadata.MD)
|
||||
return md
|
||||
}
|
||||
|
||||
// Set sets (overrides) the metadata in addr.
|
||||
//
|
||||
// When a SubConn is created with this address, the RPCs sent on it will all
|
||||
// have this metadata.
|
||||
func Set(addr resolver.Address, md metadata.MD) resolver.Address {
|
||||
addr.Attributes = addr.Attributes.WithValues(mdKey, md)
|
||||
return addr
|
||||
}
|
164
vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
Normal file
164
vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package resolver provides internal resolver-related functionality.
|
||||
package resolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/internal/serviceconfig"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// ConfigSelector controls what configuration to use for every RPC.
|
||||
type ConfigSelector interface {
|
||||
// Selects the configuration for the RPC, or terminates it using the error.
|
||||
// This error will be converted by the gRPC library to a status error with
|
||||
// code UNKNOWN if it is not returned as a status error.
|
||||
SelectConfig(RPCInfo) (*RPCConfig, error)
|
||||
}
|
||||
|
||||
// RPCInfo contains RPC information needed by a ConfigSelector.
|
||||
type RPCInfo struct {
|
||||
// Context is the user's context for the RPC and contains headers and
|
||||
// application timeout. It is passed for interception purposes and for
|
||||
// efficiency reasons. SelectConfig should not be blocking.
|
||||
Context context.Context
|
||||
Method string // i.e. "/Service/Method"
|
||||
}
|
||||
|
||||
// RPCConfig describes the configuration to use for each RPC.
|
||||
type RPCConfig struct {
|
||||
// The context to use for the remainder of the RPC; can pass info to LB
|
||||
// policy or affect timeout or metadata.
|
||||
Context context.Context
|
||||
MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
|
||||
OnCommitted func() // Called when the RPC has been committed (retries no longer possible)
|
||||
Interceptor ClientInterceptor
|
||||
}
|
||||
|
||||
// ClientStream is the same as grpc.ClientStream, but defined here for circular
|
||||
// dependency reasons.
|
||||
type ClientStream interface {
|
||||
// Header returns the header metadata received from the server if there
|
||||
// is any. It blocks if the metadata is not ready to read.
|
||||
Header() (metadata.MD, error)
|
||||
// Trailer returns the trailer metadata from the server, if there is any.
|
||||
// It must only be called after stream.CloseAndRecv has returned, or
|
||||
// stream.Recv has returned a non-nil error (including io.EOF).
|
||||
Trailer() metadata.MD
|
||||
// CloseSend closes the send direction of the stream. It closes the stream
|
||||
// when non-nil error is met. It is also not safe to call CloseSend
|
||||
// concurrently with SendMsg.
|
||||
CloseSend() error
|
||||
// Context returns the context for this stream.
|
||||
//
|
||||
// It should not be called until after Header or RecvMsg has returned. Once
|
||||
// called, subsequent client-side retries are disabled.
|
||||
Context() context.Context
|
||||
// SendMsg is generally called by generated code. On error, SendMsg aborts
|
||||
// the stream. If the error was generated by the client, the status is
|
||||
// returned directly; otherwise, io.EOF is returned and the status of
|
||||
// the stream may be discovered using RecvMsg.
|
||||
//
|
||||
// SendMsg blocks until:
|
||||
// - There is sufficient flow control to schedule m with the transport, or
|
||||
// - The stream is done, or
|
||||
// - The stream breaks.
|
||||
//
|
||||
// SendMsg does not wait until the message is received by the server. An
|
||||
// untimely stream closure may result in lost messages. To ensure delivery,
|
||||
// users should ensure the RPC completed successfully using RecvMsg.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines. It is also
|
||||
// not safe to call CloseSend concurrently with SendMsg.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the stream completes successfully. On
|
||||
// any other error, the stream is aborted and the error contains the RPC
|
||||
// status.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not
|
||||
// safe to call RecvMsg on the same stream in different goroutines.
|
||||
RecvMsg(m interface{}) error
|
||||
}
|
||||
|
||||
// ClientInterceptor is an interceptor for gRPC client streams.
|
||||
type ClientInterceptor interface {
|
||||
// NewStream produces a ClientStream for an RPC which may optionally use
|
||||
// the provided function to produce a stream for delegation. Note:
|
||||
// RPCInfo.Context should not be used (will be nil).
|
||||
//
|
||||
// done is invoked when the RPC is finished using its connection, or could
|
||||
// not be assigned a connection. RPC operations may still occur on
|
||||
// ClientStream after done is called, since the interceptor is invoked by
|
||||
// application-layer operations. done must never be nil when called.
|
||||
NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
|
||||
}
|
||||
|
||||
// ServerInterceptor is unimplementable; do not use.
|
||||
type ServerInterceptor interface {
|
||||
notDefined()
|
||||
}
|
||||
|
||||
type csKeyType string
|
||||
|
||||
const csKey = csKeyType("grpc.internal.resolver.configSelector")
|
||||
|
||||
// SetConfigSelector sets the config selector in state and returns the new
|
||||
// state.
|
||||
func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
|
||||
state.Attributes = state.Attributes.WithValues(csKey, cs)
|
||||
return state
|
||||
}
|
||||
|
||||
// GetConfigSelector retrieves the config selector from state, if present, and
|
||||
// returns it or nil if absent.
|
||||
func GetConfigSelector(state resolver.State) ConfigSelector {
|
||||
cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
|
||||
return cs
|
||||
}
|
||||
|
||||
// SafeConfigSelector allows for safe switching of ConfigSelector
|
||||
// implementations such that previous values are guaranteed to not be in use
|
||||
// when UpdateConfigSelector returns.
|
||||
type SafeConfigSelector struct {
|
||||
mu sync.RWMutex
|
||||
cs ConfigSelector
|
||||
}
|
||||
|
||||
// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
|
||||
// all uses of the previous ConfigSelector have completed.
|
||||
func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
|
||||
scs.mu.Lock()
|
||||
defer scs.mu.Unlock()
|
||||
scs.cs = cs
|
||||
}
|
||||
|
||||
// SelectConfig defers to the current ConfigSelector in scs.
|
||||
func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
|
||||
scs.mu.RLock()
|
||||
defer scs.mu.RUnlock()
|
||||
return scs.cs.SelectConfig(r)
|
||||
}
|
|
@ -32,7 +32,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/resolver"
|
||||
|
@ -43,6 +45,15 @@ import (
|
|||
// addresses from SRV records. Must not be changed after init time.
|
||||
var EnableSRVLookups = false
|
||||
|
||||
var logger = grpclog.Component("dns")
|
||||
|
||||
// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
|
||||
// single variable for testing the resolver?
|
||||
var (
|
||||
newTimer = time.NewTimer
|
||||
newTimerDNSResRate = time.NewTimer
|
||||
)
|
||||
|
||||
func init() {
|
||||
resolver.Register(NewBuilder())
|
||||
}
|
||||
|
@ -140,7 +151,6 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
|||
|
||||
d.wg.Add(1)
|
||||
go d.watcher()
|
||||
d.ResolveNow(resolver.ResolveNowOptions{})
|
||||
return d, nil
|
||||
}
|
||||
|
||||
|
@ -198,28 +208,38 @@ func (d *dnsResolver) Close() {
|
|||
|
||||
func (d *dnsResolver) watcher() {
|
||||
defer d.wg.Done()
|
||||
backoffIndex := 1
|
||||
for {
|
||||
state, err := d.lookup()
|
||||
if err != nil {
|
||||
// Report error to the underlying grpc.ClientConn.
|
||||
d.cc.ReportError(err)
|
||||
} else {
|
||||
err = d.cc.UpdateState(*state)
|
||||
}
|
||||
|
||||
var timer *time.Timer
|
||||
if err == nil {
|
||||
// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
|
||||
// to prevent constantly re-resolving.
|
||||
backoffIndex = 1
|
||||
timer = newTimerDNSResRate(minDNSResRate)
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-d.rn:
|
||||
}
|
||||
|
||||
state, err := d.lookup()
|
||||
if err != nil {
|
||||
d.cc.ReportError(err)
|
||||
} else {
|
||||
d.cc.UpdateState(*state)
|
||||
// Poll on an error found in DNS Resolver or an error received from ClientConn.
|
||||
timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
|
||||
backoffIndex++
|
||||
}
|
||||
|
||||
// Sleep to prevent excessive re-resolutions. Incoming resolution requests
|
||||
// will be queued in d.rn.
|
||||
t := time.NewTimer(minDNSResRate)
|
||||
select {
|
||||
case <-t.C:
|
||||
case <-d.ctx.Done():
|
||||
t.Stop()
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -251,7 +271,7 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
|||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
||||
}
|
||||
addr := ip + ":" + strconv.Itoa(int(s.Port))
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
|
||||
}
|
||||
}
|
||||
return newAddrs, nil
|
||||
|
@ -271,7 +291,7 @@ func handleDNSError(err error, lookupType string) error {
|
|||
err = filterError(err)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
|
||||
grpclog.Infoln(err)
|
||||
logger.Info(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -294,7 +314,7 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
|||
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
||||
if !strings.HasPrefix(res, txtAttribute) {
|
||||
grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
// This is not an error; it is the equivalent of not having a service config.
|
||||
return nil
|
||||
}
|
||||
|
@ -326,13 +346,15 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
|
|||
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
|
||||
return nil, hostErr
|
||||
}
|
||||
state := &resolver.State{
|
||||
Addresses: append(addrs, srv...),
|
||||
|
||||
state := resolver.State{Addresses: addrs}
|
||||
if len(srv) > 0 {
|
||||
state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
|
||||
}
|
||||
if !d.disableServiceConfig {
|
||||
state.ServiceConfig = d.lookupTXT()
|
||||
}
|
||||
return state, nil
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||
|
@ -418,12 +440,12 @@ func canaryingSC(js string) string {
|
|||
var rcs []rawChoice
|
||||
err := json.Unmarshal([]byte(js), &rcs)
|
||||
if err != nil {
|
||||
grpclog.Warningf("dns: error parsing service config json: %v", err)
|
||||
logger.Warningf("dns: error parsing service config json: %v", err)
|
||||
return ""
|
||||
}
|
||||
cliHostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
grpclog.Warningf("dns: error getting client hostname: %v", err)
|
||||
logger.Warningf("dns: error getting client hostname: %v", err)
|
||||
return ""
|
||||
}
|
||||
var sc string
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package unix implements a resolver for unix targets.
|
||||
package unix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/internal/transport/networktype"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const unixScheme = "unix"
|
||||
const unixAbstractScheme = "unix-abstract"
|
||||
|
||||
type builder struct {
|
||||
scheme string
|
||||
}
|
||||
|
||||
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Authority != "" {
|
||||
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
|
||||
}
|
||||
addr := resolver.Address{Addr: target.Endpoint}
|
||||
if b.scheme == unixAbstractScheme {
|
||||
// prepend "\x00" to address for unix-abstract
|
||||
addr.Addr = "\x00" + addr.Addr
|
||||
}
|
||||
cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
|
||||
return &nopResolver{}, nil
|
||||
}
|
||||
|
||||
func (b *builder) Scheme() string {
|
||||
return b.scheme
|
||||
}
|
||||
|
||||
type nopResolver struct {
|
||||
}
|
||||
|
||||
func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
|
||||
|
||||
func (*nopResolver) Close() {}
|
||||
|
||||
func init() {
|
||||
resolver.Register(&builder{scheme: unixScheme})
|
||||
resolver.Register(&builder{scheme: unixAbstractScheme})
|
||||
}
|
178
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
Normal file
178
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package serviceconfig contains utility functions to parse service config.
|
||||
package serviceconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
externalserviceconfig "google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
// BalancerConfig wraps the name and config associated with one load balancing
|
||||
// policy. It corresponds to a single entry of the loadBalancingConfig field
|
||||
// from ServiceConfig.
|
||||
//
|
||||
// It implements the json.Unmarshaler interface.
|
||||
//
|
||||
// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
|
||||
type BalancerConfig struct {
|
||||
Name string
|
||||
Config externalserviceconfig.LoadBalancingConfig
|
||||
}
|
||||
|
||||
type intermediateBalancerConfig []map[string]json.RawMessage
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
//
|
||||
// It marshals the balancer and config into a length-1 slice
|
||||
// ([]map[string]config).
|
||||
func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
|
||||
if bc.Config == nil {
|
||||
// If config is nil, return empty config `{}`.
|
||||
return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
|
||||
}
|
||||
c, err := json.Marshal(bc.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
//
|
||||
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
|
||||
// config. This method iterates through that list in order, and stops at the
|
||||
// first policy that is supported.
|
||||
// - If the config for the first supported policy is invalid, the whole service
|
||||
// config is invalid.
|
||||
// - If the list doesn't contain any supported policy, the whole service config
|
||||
// is invalid.
|
||||
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
var ir intermediateBalancerConfig
|
||||
err := json.Unmarshal(b, &ir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, lbcfg := range ir {
|
||||
if len(lbcfg) != 1 {
|
||||
return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
|
||||
}
|
||||
|
||||
var (
|
||||
name string
|
||||
jsonCfg json.RawMessage
|
||||
)
|
||||
// Get the key:value pair from the map. We have already made sure that
|
||||
// the map contains a single entry.
|
||||
for name, jsonCfg = range lbcfg {
|
||||
}
|
||||
|
||||
builder := balancer.Get(name)
|
||||
if builder == nil {
|
||||
// If the balancer is not registered, move on to the next config.
|
||||
// This is not an error.
|
||||
continue
|
||||
}
|
||||
bc.Name = name
|
||||
|
||||
parser, ok := builder.(balancer.ConfigParser)
|
||||
if !ok {
|
||||
if string(jsonCfg) != "{}" {
|
||||
logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
|
||||
}
|
||||
// Stop at this, though the builder doesn't support parsing config.
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg, err := parser.ParseConfig(jsonCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
|
||||
}
|
||||
bc.Config = cfg
|
||||
return nil
|
||||
}
|
||||
// This is reached when the for loop iterates over all entries, but didn't
|
||||
// return. This means we had a loadBalancingConfig slice but did not
|
||||
// encounter a registered policy. The config is considered invalid in this
|
||||
// case.
|
||||
return fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
|
||||
}
|
||||
|
||||
// MethodConfig defines the configuration recommended by the service providers for a
|
||||
// particular method.
|
||||
type MethodConfig struct {
|
||||
// WaitForReady indicates whether RPCs sent to this method should wait until
|
||||
// the connection is ready by default (!failfast). The value specified via the
|
||||
// gRPC client API will override the value set here.
|
||||
WaitForReady *bool
|
||||
// Timeout is the default timeout for RPCs sent to this method. The actual
|
||||
// deadline used will be the minimum of the value specified here and the value
|
||||
// set by the application via the gRPC client API. If either one is not set,
|
||||
// then the other will be used. If neither is set, then the RPC has no deadline.
|
||||
Timeout *time.Duration
|
||||
// MaxReqSize is the maximum allowed payload size for an individual request in a
|
||||
// stream (client->server) in bytes. The size which is measured is the serialized
|
||||
// payload after per-message compression (but before stream compression) in bytes.
|
||||
// The actual value used is the minimum of the value specified here and the value set
|
||||
// by the application via the gRPC client API. If either one is not set, then the other
|
||||
// will be used. If neither is set, then the built-in default is used.
|
||||
MaxReqSize *int
|
||||
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
||||
// stream (server->client) in bytes.
|
||||
MaxRespSize *int
|
||||
// RetryPolicy configures retry options for the method.
|
||||
RetryPolicy *RetryPolicy
|
||||
}
|
||||
|
||||
// RetryPolicy defines the go-native version of the retry policy defined by the
|
||||
// service config here:
|
||||
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
|
||||
type RetryPolicy struct {
|
||||
// MaxAttempts is the maximum number of attempts, including the original RPC.
|
||||
//
|
||||
// This field is required and must be two or greater.
|
||||
MaxAttempts int
|
||||
|
||||
// Exponential backoff parameters. The initial retry attempt will occur at
|
||||
// random(0, initialBackoff). In general, the nth attempt will occur at
|
||||
// random(0,
|
||||
// min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
|
||||
//
|
||||
// These fields are required and must be greater than zero.
|
||||
InitialBackoff time.Duration
|
||||
MaxBackoff time.Duration
|
||||
BackoffMultiplier float64
|
||||
|
||||
// The set of status codes which may be retried.
|
||||
//
|
||||
// Status codes are specified as strings, e.g., "UNAVAILABLE".
|
||||
//
|
||||
// This field is required and must be non-empty.
|
||||
// Note: a set is used to store this for easy lookup.
|
||||
RetryableStatusCodes map[codes.Code]bool
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue