mirror of https://github.com/docker/cli.git
bump google.golang.org/grpc v1.20.1
full diff: https://github.com/grpc/grpc-go/compare/v1.12.2...v1.20.1 includes grpc/grpc-go#2695 transport: do not close channel that can lead to panic addresses moby/moby#39053 Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
11d2e404c6
commit
93d76c5c90
|
@ -85,7 +85,7 @@ golang.org/x/sys 4b34438f7a67ee5f45cc6132e2ba
|
||||||
golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 # v0.3.0
|
golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 # v0.3.0
|
||||||
golang.org/x/time fbb02b2291d28baffd63558aa44b4b56f178d650
|
golang.org/x/time fbb02b2291d28baffd63558aa44b4b56f178d650
|
||||||
google.golang.org/genproto 02b4e95473316948020af0b7a4f0f22c73929b0e
|
google.golang.org/genproto 02b4e95473316948020af0b7a4f0f22c73929b0e
|
||||||
google.golang.org/grpc 7a6a684ca69eb4cae85ad0a484f2e531598c047b # v1.12.2
|
google.golang.org/grpc 25c4f928eaa6d96443009bd842389fb4fa48664e # v1.20.1
|
||||||
gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
|
gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1
|
||||||
gopkg.in/yaml.v2 5420a8b6744d3b0345ab293f6fcba19c978f1183 # v2.2.1
|
gopkg.in/yaml.v2 5420a8b6744d3b0345ab293f6fcba19c978f1183 # v2.2.1
|
||||||
gotest.tools 1083505acf35a0bd8a696b26837e1fb3187a7a83 # v2.3.0
|
gotest.tools 1083505acf35a0bd8a696b26837e1fb3187a7a83 # v2.3.0
|
||||||
|
|
|
@ -16,11 +16,11 @@ $ go get -u google.golang.org/grpc
|
||||||
Prerequisites
|
Prerequisites
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
This requires Go 1.6 or later. Go 1.7 will be required soon.
|
gRPC-Go requires Go 1.9 or later.
|
||||||
|
|
||||||
Constraints
|
Constraints
|
||||||
-----------
|
-----------
|
||||||
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
|
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
|
||||||
|
|
||||||
Documentation
|
Documentation
|
||||||
-------------
|
-------------
|
||||||
|
@ -43,3 +43,25 @@ Please update proto package, gRPC package and rebuild the proto files:
|
||||||
- `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
|
- `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
|
||||||
- `go get -u google.golang.org/grpc`
|
- `go get -u google.golang.org/grpc`
|
||||||
- `protoc --go_out=plugins=grpc:. *.proto`
|
- `protoc --go_out=plugins=grpc:. *.proto`
|
||||||
|
|
||||||
|
#### How to turn on logging
|
||||||
|
|
||||||
|
The default logger is controlled by the environment variables. Turn everything
|
||||||
|
on by setting:
|
||||||
|
|
||||||
|
```
|
||||||
|
GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
|
||||||
|
```
|
||||||
|
|
||||||
|
#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
|
||||||
|
|
||||||
|
This error means the connection the RPC is using was closed, and there are many
|
||||||
|
possible reasons, including:
|
||||||
|
1. mis-configured transport credentials, connection failed on handshaking
|
||||||
|
1. bytes disrupted, possibly by a proxy in between
|
||||||
|
1. server shutdown
|
||||||
|
|
||||||
|
It can be tricky to debug this because the error happens on the client side but
|
||||||
|
the root cause of the connection being closed is on the server side. Turn on
|
||||||
|
logging on __both client and server__, and see if there are any transport
|
||||||
|
errors.
|
||||||
|
|
|
@ -16,81 +16,23 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// See internal/backoff package for the backoff implementation. This file is
|
||||||
|
// kept for the exported types and API backward compatibility.
|
||||||
|
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultBackoffConfig uses values specified for backoff in
|
// DefaultBackoffConfig uses values specified for backoff in
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||||
var DefaultBackoffConfig = BackoffConfig{
|
var DefaultBackoffConfig = BackoffConfig{
|
||||||
MaxDelay: 120 * time.Second,
|
MaxDelay: 120 * time.Second,
|
||||||
baseDelay: 1.0 * time.Second,
|
|
||||||
factor: 1.6,
|
|
||||||
jitter: 0.2,
|
|
||||||
}
|
|
||||||
|
|
||||||
// backoffStrategy defines the methodology for backing off after a grpc
|
|
||||||
// connection failure.
|
|
||||||
//
|
|
||||||
// This is unexported until the gRPC project decides whether or not to allow
|
|
||||||
// alternative backoff strategies. Once a decision is made, this type and its
|
|
||||||
// method may be exported.
|
|
||||||
type backoffStrategy interface {
|
|
||||||
// backoff returns the amount of time to wait before the next retry given
|
|
||||||
// the number of consecutive failures.
|
|
||||||
backoff(retries int) time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
|
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
|
||||||
type BackoffConfig struct {
|
type BackoffConfig struct {
|
||||||
// MaxDelay is the upper bound of backoff delay.
|
// MaxDelay is the upper bound of backoff delay.
|
||||||
MaxDelay time.Duration
|
MaxDelay time.Duration
|
||||||
|
|
||||||
// TODO(stevvooe): The following fields are not exported, as allowing
|
|
||||||
// changes would violate the current gRPC specification for backoff. If
|
|
||||||
// gRPC decides to allow more interesting backoff strategies, these fields
|
|
||||||
// may be opened up in the future.
|
|
||||||
|
|
||||||
// baseDelay is the amount of time to wait before retrying after the first
|
|
||||||
// failure.
|
|
||||||
baseDelay time.Duration
|
|
||||||
|
|
||||||
// factor is applied to the backoff after each retry.
|
|
||||||
factor float64
|
|
||||||
|
|
||||||
// jitter provides a range to randomize backoff delays.
|
|
||||||
jitter float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func setDefaults(bc *BackoffConfig) {
|
|
||||||
md := bc.MaxDelay
|
|
||||||
*bc = DefaultBackoffConfig
|
|
||||||
|
|
||||||
if md > 0 {
|
|
||||||
bc.MaxDelay = md
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bc BackoffConfig) backoff(retries int) time.Duration {
|
|
||||||
if retries == 0 {
|
|
||||||
return bc.baseDelay
|
|
||||||
}
|
|
||||||
backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
|
|
||||||
for backoff < max && retries > 0 {
|
|
||||||
backoff *= bc.factor
|
|
||||||
retries--
|
|
||||||
}
|
|
||||||
if backoff > max {
|
|
||||||
backoff = max
|
|
||||||
}
|
|
||||||
// Randomize backoff delays so that if a cluster of requests start at
|
|
||||||
// the same time, they won't operate in lockstep.
|
|
||||||
backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
|
|
||||||
if backoff < 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return time.Duration(backoff)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,11 +19,10 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
@ -118,26 +117,6 @@ type Balancer interface {
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
|
|
||||||
// call of Balancer.
|
|
||||||
type downErr struct {
|
|
||||||
timeout bool
|
|
||||||
temporary bool
|
|
||||||
desc string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e downErr) Error() string { return e.desc }
|
|
||||||
func (e downErr) Timeout() bool { return e.timeout }
|
|
||||||
func (e downErr) Temporary() bool { return e.temporary }
|
|
||||||
|
|
||||||
func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
|
|
||||||
return downErr{
|
|
||||||
timeout: timeout,
|
|
||||||
temporary: temporary,
|
|
||||||
desc: fmt.Sprintf(format, a...),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
||||||
// the name resolution updates and updates the addresses available correspondingly.
|
// the name resolution updates and updates the addresses available correspondingly.
|
||||||
//
|
//
|
||||||
|
@ -410,7 +389,3 @@ func (rr *roundRobin) Close() error {
|
||||||
type pickFirst struct {
|
type pickFirst struct {
|
||||||
*roundRobin
|
*roundRobin
|
||||||
}
|
}
|
||||||
|
|
||||||
func pickFirstBalancerV1(r naming.Resolver) Balancer {
|
|
||||||
return &pickFirst{&roundRobin{r: r}}
|
|
||||||
}
|
|
||||||
|
|
|
@ -21,13 +21,15 @@
|
||||||
package balancer
|
package balancer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/internal"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -46,8 +48,20 @@ func Register(b Builder) {
|
||||||
m[strings.ToLower(b.Name())] = b
|
m[strings.ToLower(b.Name())] = b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// unregisterForTesting deletes the balancer with the given name from the
|
||||||
|
// balancer map.
|
||||||
|
//
|
||||||
|
// This function is not thread-safe.
|
||||||
|
func unregisterForTesting(name string) {
|
||||||
|
delete(m, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
internal.BalancerUnregister = unregisterForTesting
|
||||||
|
}
|
||||||
|
|
||||||
// Get returns the resolver builder registered with the given name.
|
// Get returns the resolver builder registered with the given name.
|
||||||
// Note that the compare is done in a case-insenstive fashion.
|
// Note that the compare is done in a case-insensitive fashion.
|
||||||
// If no builder is register with the name, nil will be returned.
|
// If no builder is register with the name, nil will be returned.
|
||||||
func Get(name string) Builder {
|
func Get(name string) Builder {
|
||||||
if b, ok := m[strings.ToLower(name)]; ok {
|
if b, ok := m[strings.ToLower(name)]; ok {
|
||||||
|
@ -88,7 +102,15 @@ type SubConn interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSubConnOptions contains options to create new SubConn.
|
// NewSubConnOptions contains options to create new SubConn.
|
||||||
type NewSubConnOptions struct{}
|
type NewSubConnOptions struct {
|
||||||
|
// CredsBundle is the credentials bundle that will be used in the created
|
||||||
|
// SubConn. If it's nil, the original creds from grpc DialOptions will be
|
||||||
|
// used.
|
||||||
|
CredsBundle credentials.Bundle
|
||||||
|
// HealthCheckEnabled indicates whether health check service should be
|
||||||
|
// enabled on this SubConn
|
||||||
|
HealthCheckEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
// ClientConn represents a gRPC ClientConn.
|
// ClientConn represents a gRPC ClientConn.
|
||||||
//
|
//
|
||||||
|
@ -105,7 +127,7 @@ type ClientConn interface {
|
||||||
// The SubConn will be shutdown.
|
// The SubConn will be shutdown.
|
||||||
RemoveSubConn(SubConn)
|
RemoveSubConn(SubConn)
|
||||||
|
|
||||||
// UpdateBalancerState is called by balancer to nofity gRPC that some internal
|
// UpdateBalancerState is called by balancer to notify gRPC that some internal
|
||||||
// state in balancer has changed.
|
// state in balancer has changed.
|
||||||
//
|
//
|
||||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
||||||
|
@ -125,6 +147,8 @@ type BuildOptions struct {
|
||||||
// use to dial to a remote load balancer server. The Balancer implementations
|
// use to dial to a remote load balancer server. The Balancer implementations
|
||||||
// can ignore this if it does not need to talk to another party securely.
|
// can ignore this if it does not need to talk to another party securely.
|
||||||
DialCreds credentials.TransportCredentials
|
DialCreds credentials.TransportCredentials
|
||||||
|
// CredsBundle is the credentials bundle that the Balancer can use.
|
||||||
|
CredsBundle credentials.Bundle
|
||||||
// Dialer is the custom dialer the Balancer implementation can use to dial
|
// Dialer is the custom dialer the Balancer implementation can use to dial
|
||||||
// to a remote load balancer server. The Balancer implementations
|
// to a remote load balancer server. The Balancer implementations
|
||||||
// can ignore this if it doesn't need to talk to remote balancer.
|
// can ignore this if it doesn't need to talk to remote balancer.
|
||||||
|
@ -143,16 +167,27 @@ type Builder interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PickOptions contains addition information for the Pick operation.
|
// PickOptions contains addition information for the Pick operation.
|
||||||
type PickOptions struct{}
|
type PickOptions struct {
|
||||||
|
// FullMethodName is the method name that NewClientStream() is called
|
||||||
|
// with. The canonical format is /service/Method.
|
||||||
|
FullMethodName string
|
||||||
|
}
|
||||||
|
|
||||||
// DoneInfo contains additional information for done.
|
// DoneInfo contains additional information for done.
|
||||||
type DoneInfo struct {
|
type DoneInfo struct {
|
||||||
// Err is the rpc error the RPC finished with. It could be nil.
|
// Err is the rpc error the RPC finished with. It could be nil.
|
||||||
Err error
|
Err error
|
||||||
|
// Trailer contains the metadata from the RPC's trailer, if present.
|
||||||
|
Trailer metadata.MD
|
||||||
// BytesSent indicates if any bytes have been sent to the server.
|
// BytesSent indicates if any bytes have been sent to the server.
|
||||||
BytesSent bool
|
BytesSent bool
|
||||||
// BytesReceived indicates if any byte has been received from the server.
|
// BytesReceived indicates if any byte has been received from the server.
|
||||||
BytesReceived bool
|
BytesReceived bool
|
||||||
|
// ServerLoad is the load received from server. It's usually sent as part of
|
||||||
|
// trailing metadata.
|
||||||
|
//
|
||||||
|
// The only supported type now is *orca_v1.LoadReport.
|
||||||
|
ServerLoad interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -182,8 +217,10 @@ type Picker interface {
|
||||||
//
|
//
|
||||||
// If a SubConn is returned:
|
// If a SubConn is returned:
|
||||||
// - If it is READY, gRPC will send the RPC on it;
|
// - If it is READY, gRPC will send the RPC on it;
|
||||||
// - If it is not ready, or becomes not ready after it's returned, gRPC will block
|
// - If it is not ready, or becomes not ready after it's returned, gRPC will
|
||||||
// until UpdateBalancerState() is called and will call pick on the new picker.
|
// block until UpdateBalancerState() is called and will call pick on the
|
||||||
|
// new picker. The done function returned from Pick(), if not nil, will be
|
||||||
|
// called with nil error, no bytes sent and no bytes received.
|
||||||
//
|
//
|
||||||
// If the returned error is not nil:
|
// If the returned error is not nil:
|
||||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
||||||
|
@ -194,9 +231,10 @@ type Picker interface {
|
||||||
// - Else (error is other non-nil error):
|
// - Else (error is other non-nil error):
|
||||||
// - The RPC will fail with unavailable error.
|
// - The RPC will fail with unavailable error.
|
||||||
//
|
//
|
||||||
// The returned done() function will be called once the rpc has finished, with the
|
// The returned done() function will be called once the rpc has finished,
|
||||||
// final status of that RPC.
|
// with the final status of that RPC. If the SubConn returned is not a
|
||||||
// done may be nil if balancer doesn't care about the RPC status.
|
// valid SubConn type, done may not be called. done may be nil if balancer
|
||||||
|
// doesn't care about the RPC status.
|
||||||
Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
|
Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,14 +253,84 @@ type Balancer interface {
|
||||||
// that back to gRPC.
|
// that back to gRPC.
|
||||||
// Balancer should also generate and update Pickers when its internal state has
|
// Balancer should also generate and update Pickers when its internal state has
|
||||||
// been changed by the new state.
|
// been changed by the new state.
|
||||||
|
//
|
||||||
|
// Deprecated: if V2Balancer is implemented by the Balancer,
|
||||||
|
// UpdateSubConnState will be called instead.
|
||||||
HandleSubConnStateChange(sc SubConn, state connectivity.State)
|
HandleSubConnStateChange(sc SubConn, state connectivity.State)
|
||||||
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
|
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
|
||||||
// balancers.
|
// balancers.
|
||||||
// Balancer can create new SubConn or remove SubConn with the addresses.
|
// Balancer can create new SubConn or remove SubConn with the addresses.
|
||||||
// An empty address slice and a non-nil error will be passed if the resolver returns
|
// An empty address slice and a non-nil error will be passed if the resolver returns
|
||||||
// non-nil error to gRPC.
|
// non-nil error to gRPC.
|
||||||
|
//
|
||||||
|
// Deprecated: if V2Balancer is implemented by the Balancer,
|
||||||
|
// UpdateResolverState will be called instead.
|
||||||
HandleResolvedAddrs([]resolver.Address, error)
|
HandleResolvedAddrs([]resolver.Address, error)
|
||||||
// Close closes the balancer. The balancer is not required to call
|
// Close closes the balancer. The balancer is not required to call
|
||||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SubConnState describes the state of a SubConn.
|
||||||
|
type SubConnState struct {
|
||||||
|
ConnectivityState connectivity.State
|
||||||
|
// TODO: add last connection error
|
||||||
|
}
|
||||||
|
|
||||||
|
// V2Balancer is defined for documentation purposes. If a Balancer also
|
||||||
|
// implements V2Balancer, its UpdateResolverState method will be called instead
|
||||||
|
// of HandleResolvedAddrs and its UpdateSubConnState will be called instead of
|
||||||
|
// HandleSubConnStateChange.
|
||||||
|
type V2Balancer interface {
|
||||||
|
// UpdateResolverState is called by gRPC when the state of the resolver
|
||||||
|
// changes.
|
||||||
|
UpdateResolverState(resolver.State)
|
||||||
|
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||||
|
// changes.
|
||||||
|
UpdateSubConnState(SubConn, SubConnState)
|
||||||
|
// Close closes the balancer. The balancer is not required to call
|
||||||
|
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||||
|
// and returns one aggregated connectivity state.
|
||||||
|
//
|
||||||
|
// It's not thread safe.
|
||||||
|
type ConnectivityStateEvaluator struct {
|
||||||
|
numReady uint64 // Number of addrConns in ready state.
|
||||||
|
numConnecting uint64 // Number of addrConns in connecting state.
|
||||||
|
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordTransition records state change happening in subConn and based on that
|
||||||
|
// it evaluates what aggregated state should be.
|
||||||
|
//
|
||||||
|
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||||
|
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||||
|
// - Else the aggregated state is TransientFailure.
|
||||||
|
//
|
||||||
|
// Idle and Shutdown are not considered.
|
||||||
|
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||||
|
// Update counters.
|
||||||
|
for idx, state := range []connectivity.State{oldState, newState} {
|
||||||
|
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||||
|
switch state {
|
||||||
|
case connectivity.Ready:
|
||||||
|
cse.numReady += updateVal
|
||||||
|
case connectivity.Connecting:
|
||||||
|
cse.numConnecting += updateVal
|
||||||
|
case connectivity.TransientFailure:
|
||||||
|
cse.numTransientFailure += updateVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evaluate.
|
||||||
|
if cse.numReady > 0 {
|
||||||
|
return connectivity.Ready
|
||||||
|
}
|
||||||
|
if cse.numConnecting > 0 {
|
||||||
|
return connectivity.Connecting
|
||||||
|
}
|
||||||
|
return connectivity.TransientFailure
|
||||||
|
}
|
||||||
|
|
|
@ -19,7 +19,8 @@
|
||||||
package base
|
package base
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
@ -29,6 +30,7 @@ import (
|
||||||
type baseBuilder struct {
|
type baseBuilder struct {
|
||||||
name string
|
name string
|
||||||
pickerBuilder PickerBuilder
|
pickerBuilder PickerBuilder
|
||||||
|
config Config
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||||
|
@ -38,11 +40,12 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
|
||||||
|
|
||||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||||
csEvltr: &connectivityStateEvaluator{},
|
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||||
// Initialize picker to a picker that always return
|
// Initialize picker to a picker that always return
|
||||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||||
// may call UpdateBalancerState with this picker.
|
// may call UpdateBalancerState with this picker.
|
||||||
picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
|
picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||||
|
config: bb.config,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,27 +57,30 @@ type baseBalancer struct {
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
pickerBuilder PickerBuilder
|
pickerBuilder PickerBuilder
|
||||||
|
|
||||||
csEvltr *connectivityStateEvaluator
|
csEvltr *balancer.ConnectivityStateEvaluator
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
|
|
||||||
subConns map[resolver.Address]balancer.SubConn
|
subConns map[resolver.Address]balancer.SubConn
|
||||||
scStates map[balancer.SubConn]connectivity.State
|
scStates map[balancer.SubConn]connectivity.State
|
||||||
picker balancer.Picker
|
picker balancer.Picker
|
||||||
|
config Config
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||||
if err != nil {
|
panic("not implemented")
|
||||||
grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
|
}
|
||||||
return
|
|
||||||
}
|
func (b *baseBalancer) UpdateResolverState(s resolver.State) {
|
||||||
grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
|
// TODO: handle s.Err (log if not nil) once implemented.
|
||||||
|
// TODO: handle s.ServiceConfig?
|
||||||
|
grpclog.Infoln("base.baseBalancer: got new resolver state: ", s)
|
||||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||||
addrsSet := make(map[resolver.Address]struct{})
|
addrsSet := make(map[resolver.Address]struct{})
|
||||||
for _, a := range addrs {
|
for _, a := range s.Addresses {
|
||||||
addrsSet[a] = struct{}{}
|
addrsSet[a] = struct{}{}
|
||||||
if _, ok := b.subConns[a]; !ok {
|
if _, ok := b.subConns[a]; !ok {
|
||||||
// a is a new address (not existing in b.subConns).
|
// a is a new address (not existing in b.subConns).
|
||||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||||
continue
|
continue
|
||||||
|
@ -116,6 +122,11 @@ func (b *baseBalancer) regeneratePicker() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
s := state.ConnectivityState
|
||||||
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||||
oldS, ok := b.scStates[sc]
|
oldS, ok := b.scStates[sc]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -133,7 +144,7 @@ func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectiv
|
||||||
}
|
}
|
||||||
|
|
||||||
oldAggrState := b.state
|
oldAggrState := b.state
|
||||||
b.state = b.csEvltr.recordTransition(oldS, s)
|
b.state = b.csEvltr.RecordTransition(oldS, s)
|
||||||
|
|
||||||
// Regenerate picker when one of the following happens:
|
// Regenerate picker when one of the following happens:
|
||||||
// - this sc became ready from not-ready
|
// - this sc became ready from not-ready
|
||||||
|
@ -165,44 +176,3 @@ type errPicker struct {
|
||||||
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
return nil, nil, p.err
|
return nil, nil, p.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// connectivityStateEvaluator gets updated by addrConns when their
|
|
||||||
// states transition, based on which it evaluates the state of
|
|
||||||
// ClientConn.
|
|
||||||
type connectivityStateEvaluator struct {
|
|
||||||
numReady uint64 // Number of addrConns in ready state.
|
|
||||||
numConnecting uint64 // Number of addrConns in connecting state.
|
|
||||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
|
||||||
}
|
|
||||||
|
|
||||||
// recordTransition records state change happening in every subConn and based on
|
|
||||||
// that it evaluates what aggregated state should be.
|
|
||||||
// It can only transition between Ready, Connecting and TransientFailure. Other states,
|
|
||||||
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
|
|
||||||
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
|
|
||||||
// closes it is in Shutdown state.
|
|
||||||
//
|
|
||||||
// recordTransition should only be called synchronously from the same goroutine.
|
|
||||||
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
|
|
||||||
// Update counters.
|
|
||||||
for idx, state := range []connectivity.State{oldState, newState} {
|
|
||||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
|
||||||
switch state {
|
|
||||||
case connectivity.Ready:
|
|
||||||
cse.numReady += updateVal
|
|
||||||
case connectivity.Connecting:
|
|
||||||
cse.numConnecting += updateVal
|
|
||||||
case connectivity.TransientFailure:
|
|
||||||
cse.numTransientFailure += updateVal
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Evaluate.
|
|
||||||
if cse.numReady > 0 {
|
|
||||||
return connectivity.Ready
|
|
||||||
}
|
|
||||||
if cse.numConnecting > 0 {
|
|
||||||
return connectivity.Connecting
|
|
||||||
}
|
|
||||||
return connectivity.TransientFailure
|
|
||||||
}
|
|
||||||
|
|
|
@ -45,8 +45,20 @@ type PickerBuilder interface {
|
||||||
// NewBalancerBuilder returns a balancer builder. The balancers
|
// NewBalancerBuilder returns a balancer builder. The balancers
|
||||||
// built by this builder will use the picker builder to build pickers.
|
// built by this builder will use the picker builder to build pickers.
|
||||||
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
||||||
|
return NewBalancerBuilderWithConfig(name, pb, Config{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config contains the config info about the base balancer builder.
|
||||||
|
type Config struct {
|
||||||
|
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
|
||||||
|
HealthCheck bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
|
||||||
|
func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
|
||||||
return &baseBuilder{
|
return &baseBuilder{
|
||||||
name: name,
|
name: name,
|
||||||
pickerBuilder: pb,
|
pickerBuilder: pb,
|
||||||
|
config: config,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,12 +22,13 @@
|
||||||
package roundrobin
|
package roundrobin
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/balancer/base"
|
"google.golang.org/grpc/balancer/base"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -36,7 +37,7 @@ const Name = "round_robin"
|
||||||
|
|
||||||
// newBuilder creates a new roundrobin balancer builder.
|
// newBuilder creates a new roundrobin balancer builder.
|
||||||
func newBuilder() balancer.Builder {
|
func newBuilder() balancer.Builder {
|
||||||
return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
|
return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -47,12 +48,19 @@ type rrPickerBuilder struct{}
|
||||||
|
|
||||||
func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
|
func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
|
||||||
grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
|
grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
|
||||||
|
if len(readySCs) == 0 {
|
||||||
|
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||||
|
}
|
||||||
var scs []balancer.SubConn
|
var scs []balancer.SubConn
|
||||||
for _, sc := range readySCs {
|
for _, sc := range readySCs {
|
||||||
scs = append(scs, sc)
|
scs = append(scs, sc)
|
||||||
}
|
}
|
||||||
return &rrPicker{
|
return &rrPicker{
|
||||||
subConns: scs,
|
subConns: scs,
|
||||||
|
// Start at a random index, as the same RR balancer rebuilds a new
|
||||||
|
// picker when SubConn states change, and we don't want to apply excess
|
||||||
|
// load to the first server in the list.
|
||||||
|
next: grpcrand.Intn(len(scs)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,10 +75,6 @@ type rrPicker struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
if len(p.subConns) <= 0 {
|
|
||||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
|
||||||
}
|
|
||||||
|
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
sc := p.subConns[p.next]
|
sc := p.subConns[p.next]
|
||||||
p.next = (p.next + 1) % len(p.subConns)
|
p.next = (p.next + 1) % len(p.subConns)
|
||||||
|
|
|
@ -82,20 +82,13 @@ func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
|
||||||
return b.c
|
return b.c
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolverUpdate contains the new resolved addresses or error if there's
|
|
||||||
// any.
|
|
||||||
type resolverUpdate struct {
|
|
||||||
addrs []resolver.Address
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
||||||
// It implements balancer.ClientConn interface.
|
// It implements balancer.ClientConn interface.
|
||||||
type ccBalancerWrapper struct {
|
type ccBalancerWrapper struct {
|
||||||
cc *ClientConn
|
cc *ClientConn
|
||||||
balancer balancer.Balancer
|
balancer balancer.Balancer
|
||||||
stateChangeQueue *scStateUpdateBuffer
|
stateChangeQueue *scStateUpdateBuffer
|
||||||
resolverUpdateCh chan *resolverUpdate
|
resolverUpdateCh chan *resolver.State
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
@ -106,7 +99,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
|
||||||
ccb := &ccBalancerWrapper{
|
ccb := &ccBalancerWrapper{
|
||||||
cc: cc,
|
cc: cc,
|
||||||
stateChangeQueue: newSCStateUpdateBuffer(),
|
stateChangeQueue: newSCStateUpdateBuffer(),
|
||||||
resolverUpdateCh: make(chan *resolverUpdate, 1),
|
resolverUpdateCh: make(chan *resolver.State, 1),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
subConns: make(map[*acBalancerWrapper]struct{}),
|
||||||
}
|
}
|
||||||
|
@ -128,15 +121,23 @@ func (ccb *ccBalancerWrapper) watcher() {
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
|
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||||
case t := <-ccb.resolverUpdateCh:
|
ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state})
|
||||||
|
} else {
|
||||||
|
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
|
||||||
|
}
|
||||||
|
case s := <-ccb.resolverUpdateCh:
|
||||||
select {
|
select {
|
||||||
case <-ccb.done:
|
case <-ccb.done:
|
||||||
ccb.balancer.Close()
|
ccb.balancer.Close()
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
|
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||||
|
ub.UpdateResolverState(*s)
|
||||||
|
} else {
|
||||||
|
ccb.balancer.HandleResolvedAddrs(s.Addresses, nil)
|
||||||
|
}
|
||||||
case <-ccb.done:
|
case <-ccb.done:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,15 +178,23 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) {
|
func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) {
|
||||||
|
if ccb.cc.curBalancerName != grpclbName {
|
||||||
|
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
||||||
|
for i := 0; i < len(s.Addresses); {
|
||||||
|
if s.Addresses[i].Type == resolver.GRPCLB {
|
||||||
|
copy(s.Addresses[i:], s.Addresses[i+1:])
|
||||||
|
s.Addresses = s.Addresses[:len(s.Addresses)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-ccb.resolverUpdateCh:
|
case <-ccb.resolverUpdateCh:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
ccb.resolverUpdateCh <- &resolverUpdate{
|
ccb.resolverUpdateCh <- &s
|
||||||
addrs: addrs,
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
|
@ -197,7 +206,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
|
||||||
if ccb.subConns == nil {
|
if ccb.subConns == nil {
|
||||||
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
|
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
|
||||||
}
|
}
|
||||||
ac, err := ccb.cc.newAddrConn(addrs)
|
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -229,8 +238,13 @@ func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balanc
|
||||||
if ccb.subConns == nil {
|
if ccb.subConns == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ccb.cc.csMgr.updateState(s)
|
// Update picker before updating state. Even though the ordering here does
|
||||||
|
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||||
|
// case where we wait for ready and then perform an RPC. If the picker is
|
||||||
|
// updated later, we could call the "connecting" picker when the state is
|
||||||
|
// updated, and then call the "ready" picker after the picker gets updated.
|
||||||
ccb.cc.blockingpicker.updatePicker(p)
|
ccb.cc.blockingpicker.updatePicker(p)
|
||||||
|
ccb.cc.csMgr.updateState(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
|
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
|
||||||
|
@ -257,6 +271,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||||
}
|
}
|
||||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||||
cc := acbw.ac.cc
|
cc := acbw.ac.cc
|
||||||
|
opts := acbw.ac.scopts
|
||||||
acbw.ac.mu.Lock()
|
acbw.ac.mu.Lock()
|
||||||
// Set old ac.acbw to nil so the Shutdown state update will be ignored
|
// Set old ac.acbw to nil so the Shutdown state update will be ignored
|
||||||
// by balancer.
|
// by balancer.
|
||||||
|
@ -272,7 +287,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ac, err := cc.newAddrConn(addrs)
|
ac, err := cc.newAddrConn(addrs, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -19,16 +19,14 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type balancerWrapperBuilder struct {
|
type balancerWrapperBuilder struct {
|
||||||
|
@ -55,7 +53,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
|
||||||
startCh: make(chan struct{}),
|
startCh: make(chan struct{}),
|
||||||
conns: make(map[resolver.Address]balancer.SubConn),
|
conns: make(map[resolver.Address]balancer.SubConn),
|
||||||
connSt: make(map[balancer.SubConn]*scState),
|
connSt: make(map[balancer.SubConn]*scState),
|
||||||
csEvltr: &connectivityStateEvaluator{},
|
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||||
state: connectivity.Idle,
|
state: connectivity.Idle,
|
||||||
}
|
}
|
||||||
cc.UpdateBalancerState(connectivity.Idle, bw)
|
cc.UpdateBalancerState(connectivity.Idle, bw)
|
||||||
|
@ -80,10 +78,6 @@ type balancerWrapper struct {
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
targetAddr string // Target without the scheme.
|
targetAddr string // Target without the scheme.
|
||||||
|
|
||||||
// To aggregate the connectivity state.
|
|
||||||
csEvltr *connectivityStateEvaluator
|
|
||||||
state connectivity.State
|
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
conns map[resolver.Address]balancer.SubConn
|
conns map[resolver.Address]balancer.SubConn
|
||||||
connSt map[balancer.SubConn]*scState
|
connSt map[balancer.SubConn]*scState
|
||||||
|
@ -92,6 +86,10 @@ type balancerWrapper struct {
|
||||||
// - NewSubConn is created, cc wants to notify balancer of state changes;
|
// - NewSubConn is created, cc wants to notify balancer of state changes;
|
||||||
// - Build hasn't return, cc doesn't have access to balancer.
|
// - Build hasn't return, cc doesn't have access to balancer.
|
||||||
startCh chan struct{}
|
startCh chan struct{}
|
||||||
|
|
||||||
|
// To aggregate the connectivity state.
|
||||||
|
csEvltr *balancer.ConnectivityStateEvaluator
|
||||||
|
state connectivity.State
|
||||||
}
|
}
|
||||||
|
|
||||||
// lbWatcher watches the Notify channel of the balancer and manages
|
// lbWatcher watches the Notify channel of the balancer and manages
|
||||||
|
@ -248,7 +246,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
|
||||||
scSt.down(errConnClosing)
|
scSt.down(errConnClosing)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sa := bw.csEvltr.recordTransition(oldS, s)
|
sa := bw.csEvltr.RecordTransition(oldS, s)
|
||||||
if bw.state != sa {
|
if bw.state != sa {
|
||||||
bw.state = sa
|
bw.state = sa
|
||||||
}
|
}
|
||||||
|
@ -283,9 +281,8 @@ func (bw *balancerWrapper) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// The picker is the balancerWrapper itself.
|
// The picker is the balancerWrapper itself.
|
||||||
// Pick should never return ErrNoSubConnAvailable.
|
|
||||||
// It either blocks or returns error, consistent with v1 balancer Get().
|
// It either blocks or returns error, consistent with v1 balancer Get().
|
||||||
func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
|
||||||
failfast := true // Default failfast is true.
|
failfast := true // Default failfast is true.
|
||||||
if ss, ok := rpcInfoFromContext(ctx); ok {
|
if ss, ok := rpcInfoFromContext(ctx); ok {
|
||||||
failfast = ss.failfast
|
failfast = ss.failfast
|
||||||
|
@ -294,79 +291,51 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
var done func(balancer.DoneInfo)
|
|
||||||
if p != nil {
|
if p != nil {
|
||||||
done = func(i balancer.DoneInfo) { p() }
|
done = func(balancer.DoneInfo) { p() }
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
p()
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
var sc balancer.SubConn
|
|
||||||
bw.mu.Lock()
|
bw.mu.Lock()
|
||||||
defer bw.mu.Unlock()
|
defer bw.mu.Unlock()
|
||||||
if bw.pickfirst {
|
if bw.pickfirst {
|
||||||
// Get the first sc in conns.
|
// Get the first sc in conns.
|
||||||
for _, sc = range bw.conns {
|
for _, sc := range bw.conns {
|
||||||
break
|
return sc, done, nil
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var ok bool
|
|
||||||
sc, ok = bw.conns[resolver.Address{
|
|
||||||
Addr: a.Addr,
|
|
||||||
Type: resolver.Backend,
|
|
||||||
ServerName: "",
|
|
||||||
Metadata: a.Metadata,
|
|
||||||
}]
|
|
||||||
if !ok && failfast {
|
|
||||||
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
|
|
||||||
}
|
|
||||||
if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
|
|
||||||
// If the returned sc is not ready and RPC is failfast,
|
|
||||||
// return error, and this RPC will fail.
|
|
||||||
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
|
|
||||||
}
|
}
|
||||||
|
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||||
|
}
|
||||||
|
sc, ok1 := bw.conns[resolver.Address{
|
||||||
|
Addr: a.Addr,
|
||||||
|
Type: resolver.Backend,
|
||||||
|
ServerName: "",
|
||||||
|
Metadata: a.Metadata,
|
||||||
|
}]
|
||||||
|
s, ok2 := bw.connSt[sc]
|
||||||
|
if !ok1 || !ok2 {
|
||||||
|
// This can only happen due to a race where Get() returned an address
|
||||||
|
// that was subsequently removed by Notify. In this case we should
|
||||||
|
// retry always.
|
||||||
|
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||||
|
}
|
||||||
|
switch s.s {
|
||||||
|
case connectivity.Ready, connectivity.Idle:
|
||||||
|
return sc, done, nil
|
||||||
|
case connectivity.Shutdown, connectivity.TransientFailure:
|
||||||
|
// If the returned sc has been shut down or is in transient failure,
|
||||||
|
// return error, and this RPC will fail or wait for another picker (if
|
||||||
|
// non-failfast).
|
||||||
|
return nil, nil, balancer.ErrTransientFailure
|
||||||
|
default:
|
||||||
|
// For other states (connecting or unknown), the v1 balancer would
|
||||||
|
// traditionally wait until ready and then issue the RPC. Returning
|
||||||
|
// ErrNoSubConnAvailable will be a slight improvement in that it will
|
||||||
|
// allow the balancer to choose another address in case others are
|
||||||
|
// connected.
|
||||||
|
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
return sc, done, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// connectivityStateEvaluator gets updated by addrConns when their
|
|
||||||
// states transition, based on which it evaluates the state of
|
|
||||||
// ClientConn.
|
|
||||||
type connectivityStateEvaluator struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
numReady uint64 // Number of addrConns in ready state.
|
|
||||||
numConnecting uint64 // Number of addrConns in connecting state.
|
|
||||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
|
||||||
}
|
|
||||||
|
|
||||||
// recordTransition records state change happening in every subConn and based on
|
|
||||||
// that it evaluates what aggregated state should be.
|
|
||||||
// It can only transition between Ready, Connecting and TransientFailure. Other states,
|
|
||||||
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
|
|
||||||
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
|
|
||||||
// closes it is in Shutdown state.
|
|
||||||
// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state.
|
|
||||||
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
|
|
||||||
cse.mu.Lock()
|
|
||||||
defer cse.mu.Unlock()
|
|
||||||
|
|
||||||
// Update counters.
|
|
||||||
for idx, state := range []connectivity.State{oldState, newState} {
|
|
||||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
|
||||||
switch state {
|
|
||||||
case connectivity.Ready:
|
|
||||||
cse.numReady += updateVal
|
|
||||||
case connectivity.Connecting:
|
|
||||||
cse.numConnecting += updateVal
|
|
||||||
case connectivity.TransientFailure:
|
|
||||||
cse.numTransientFailure += updateVal
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Evaluate.
|
|
||||||
if cse.numReady > 0 {
|
|
||||||
return connectivity.Ready
|
|
||||||
}
|
|
||||||
if cse.numConnecting > 0 {
|
|
||||||
return connectivity.Connecting
|
|
||||||
}
|
|
||||||
return connectivity.TransientFailure
|
|
||||||
}
|
}
|
||||||
|
|
900
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
Normal file
900
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,900 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
|
||||||
|
|
||||||
|
package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import duration "github.com/golang/protobuf/ptypes/duration"
|
||||||
|
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// Enumerates the type of event
|
||||||
|
// Note the terminology is different from the RPC semantics
|
||||||
|
// definition, but the same meaning is expressed here.
|
||||||
|
type GrpcLogEntry_EventType int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0
|
||||||
|
// Header sent from client to server
|
||||||
|
GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1
|
||||||
|
// Header sent from server to client
|
||||||
|
GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2
|
||||||
|
// Message sent from client to server
|
||||||
|
GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3
|
||||||
|
// Message sent from server to client
|
||||||
|
GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4
|
||||||
|
// A signal that client is done sending
|
||||||
|
GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5
|
||||||
|
// Trailer indicates the end of the RPC.
|
||||||
|
// On client side, this event means a trailer was either received
|
||||||
|
// from the network or the gRPC library locally generated a status
|
||||||
|
// to inform the application about a failure.
|
||||||
|
// On server side, this event means the server application requested
|
||||||
|
// to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after
|
||||||
|
// this due to races on server side.
|
||||||
|
GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6
|
||||||
|
// A signal that the RPC is cancelled. On client side, this
|
||||||
|
// indicates the client application requests a cancellation.
|
||||||
|
// On server side, this indicates that cancellation was detected.
|
||||||
|
// Note: This marks the end of the RPC. Events may arrive after
|
||||||
|
// this due to races. For example, on client side a trailer
|
||||||
|
// may arrive even though the application requested to cancel the RPC.
|
||||||
|
GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
|
||||||
|
)
|
||||||
|
|
||||||
|
var GrpcLogEntry_EventType_name = map[int32]string{
|
||||||
|
0: "EVENT_TYPE_UNKNOWN",
|
||||||
|
1: "EVENT_TYPE_CLIENT_HEADER",
|
||||||
|
2: "EVENT_TYPE_SERVER_HEADER",
|
||||||
|
3: "EVENT_TYPE_CLIENT_MESSAGE",
|
||||||
|
4: "EVENT_TYPE_SERVER_MESSAGE",
|
||||||
|
5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
|
||||||
|
6: "EVENT_TYPE_SERVER_TRAILER",
|
||||||
|
7: "EVENT_TYPE_CANCEL",
|
||||||
|
}
|
||||||
|
var GrpcLogEntry_EventType_value = map[string]int32{
|
||||||
|
"EVENT_TYPE_UNKNOWN": 0,
|
||||||
|
"EVENT_TYPE_CLIENT_HEADER": 1,
|
||||||
|
"EVENT_TYPE_SERVER_HEADER": 2,
|
||||||
|
"EVENT_TYPE_CLIENT_MESSAGE": 3,
|
||||||
|
"EVENT_TYPE_SERVER_MESSAGE": 4,
|
||||||
|
"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
|
||||||
|
"EVENT_TYPE_SERVER_TRAILER": 6,
|
||||||
|
"EVENT_TYPE_CANCEL": 7,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x GrpcLogEntry_EventType) String() string {
|
||||||
|
return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
|
||||||
|
}
|
||||||
|
func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enumerates the entity that generates the log entry
|
||||||
|
type GrpcLogEntry_Logger int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0
|
||||||
|
GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1
|
||||||
|
GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
var GrpcLogEntry_Logger_name = map[int32]string{
|
||||||
|
0: "LOGGER_UNKNOWN",
|
||||||
|
1: "LOGGER_CLIENT",
|
||||||
|
2: "LOGGER_SERVER",
|
||||||
|
}
|
||||||
|
var GrpcLogEntry_Logger_value = map[string]int32{
|
||||||
|
"LOGGER_UNKNOWN": 0,
|
||||||
|
"LOGGER_CLIENT": 1,
|
||||||
|
"LOGGER_SERVER": 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x GrpcLogEntry_Logger) String() string {
|
||||||
|
return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
|
||||||
|
}
|
||||||
|
func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Address_Type int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
Address_TYPE_UNKNOWN Address_Type = 0
|
||||||
|
// address is in 1.2.3.4 form
|
||||||
|
Address_TYPE_IPV4 Address_Type = 1
|
||||||
|
// address is in IPv6 canonical form (RFC5952 section 4)
|
||||||
|
// The scope is NOT included in the address string.
|
||||||
|
Address_TYPE_IPV6 Address_Type = 2
|
||||||
|
// address is UDS string
|
||||||
|
Address_TYPE_UNIX Address_Type = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
var Address_Type_name = map[int32]string{
|
||||||
|
0: "TYPE_UNKNOWN",
|
||||||
|
1: "TYPE_IPV4",
|
||||||
|
2: "TYPE_IPV6",
|
||||||
|
3: "TYPE_UNIX",
|
||||||
|
}
|
||||||
|
var Address_Type_value = map[string]int32{
|
||||||
|
"TYPE_UNKNOWN": 0,
|
||||||
|
"TYPE_IPV4": 1,
|
||||||
|
"TYPE_IPV6": 2,
|
||||||
|
"TYPE_UNIX": 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x Address_Type) String() string {
|
||||||
|
return proto.EnumName(Address_Type_name, int32(x))
|
||||||
|
}
|
||||||
|
func (Address_Type) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log entry we store in binary logs
|
||||||
|
type GrpcLogEntry struct {
|
||||||
|
// The timestamp of the binary log message
|
||||||
|
Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||||
|
// Uniquely identifies a call. The value must not be 0 in order to disambiguate
|
||||||
|
// from an unset value.
|
||||||
|
// Each call may have several log entries, they will all have the same call_id.
|
||||||
|
// Nothing is guaranteed about their value other than they are unique across
|
||||||
|
// different RPCs in the same gRPC process.
|
||||||
|
CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"`
|
||||||
|
// The entry sequence id for this call. The first GrpcLogEntry has a
|
||||||
|
// value of 1, to disambiguate from an unset value. The purpose of
|
||||||
|
// this field is to detect missing entries in environments where
|
||||||
|
// durability or ordering is not guaranteed.
|
||||||
|
SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
|
||||||
|
Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
|
||||||
|
Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
|
||||||
|
// The logger uses one of the following fields to record the payload,
|
||||||
|
// according to the type of the log entry.
|
||||||
|
//
|
||||||
|
// Types that are valid to be assigned to Payload:
|
||||||
|
// *GrpcLogEntry_ClientHeader
|
||||||
|
// *GrpcLogEntry_ServerHeader
|
||||||
|
// *GrpcLogEntry_Message
|
||||||
|
// *GrpcLogEntry_Trailer
|
||||||
|
Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"`
|
||||||
|
// true if payload does not represent the full message or metadata.
|
||||||
|
PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"`
|
||||||
|
// Peer address information, will only be recorded on the first
|
||||||
|
// incoming event. On client side, peer is logged on
|
||||||
|
// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
|
||||||
|
// the case of trailers-only. On server side, peer is always
|
||||||
|
// logged on EVENT_TYPE_CLIENT_HEADER.
|
||||||
|
Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
|
||||||
|
func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*GrpcLogEntry) ProtoMessage() {}
|
||||||
|
func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
|
||||||
|
}
|
||||||
|
func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *GrpcLogEntry) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_GrpcLogEntry.Size(m)
|
||||||
|
}
|
||||||
|
func (m *GrpcLogEntry) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
|
||||||
|
if m != nil {
|
||||||
|
return m.Timestamp
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetCallId() uint64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.CallId
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.SequenceIdWithinCall
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
|
||||||
|
if m != nil {
|
||||||
|
return m.Type
|
||||||
|
}
|
||||||
|
return GrpcLogEntry_EVENT_TYPE_UNKNOWN
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
|
||||||
|
if m != nil {
|
||||||
|
return m.Logger
|
||||||
|
}
|
||||||
|
return GrpcLogEntry_LOGGER_UNKNOWN
|
||||||
|
}
|
||||||
|
|
||||||
|
type isGrpcLogEntry_Payload interface {
|
||||||
|
isGrpcLogEntry_Payload()
|
||||||
|
}
|
||||||
|
|
||||||
|
type GrpcLogEntry_ClientHeader struct {
|
||||||
|
ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GrpcLogEntry_ServerHeader struct {
|
||||||
|
ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GrpcLogEntry_Message struct {
|
||||||
|
Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GrpcLogEntry_Trailer struct {
|
||||||
|
Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {}
|
||||||
|
|
||||||
|
func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {}
|
||||||
|
|
||||||
|
func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
|
||||||
|
|
||||||
|
func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
|
||||||
|
if m != nil {
|
||||||
|
return m.Payload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
|
||||||
|
if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
|
||||||
|
return x.ClientHeader
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
|
||||||
|
if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
|
||||||
|
return x.ServerHeader
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetMessage() *Message {
|
||||||
|
if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
|
||||||
|
return x.Message
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetTrailer() *Trailer {
|
||||||
|
if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
|
||||||
|
return x.Trailer
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetPayloadTruncated() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.PayloadTruncated
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GrpcLogEntry) GetPeer() *Address {
|
||||||
|
if m != nil {
|
||||||
|
return m.Peer
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||||
|
func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||||
|
return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
|
||||||
|
(*GrpcLogEntry_ClientHeader)(nil),
|
||||||
|
(*GrpcLogEntry_ServerHeader)(nil),
|
||||||
|
(*GrpcLogEntry_Message)(nil),
|
||||||
|
(*GrpcLogEntry_Trailer)(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||||
|
m := msg.(*GrpcLogEntry)
|
||||||
|
// payload
|
||||||
|
switch x := m.Payload.(type) {
|
||||||
|
case *GrpcLogEntry_ClientHeader:
|
||||||
|
b.EncodeVarint(6<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.ClientHeader); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *GrpcLogEntry_ServerHeader:
|
||||||
|
b.EncodeVarint(7<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.ServerHeader); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *GrpcLogEntry_Message:
|
||||||
|
b.EncodeVarint(8<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.Message); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *GrpcLogEntry_Trailer:
|
||||||
|
b.EncodeVarint(9<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.Trailer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||||
|
m := msg.(*GrpcLogEntry)
|
||||||
|
switch tag {
|
||||||
|
case 6: // payload.client_header
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(ClientHeader)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Payload = &GrpcLogEntry_ClientHeader{msg}
|
||||||
|
return true, err
|
||||||
|
case 7: // payload.server_header
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(ServerHeader)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Payload = &GrpcLogEntry_ServerHeader{msg}
|
||||||
|
return true, err
|
||||||
|
case 8: // payload.message
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(Message)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Payload = &GrpcLogEntry_Message{msg}
|
||||||
|
return true, err
|
||||||
|
case 9: // payload.trailer
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(Trailer)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Payload = &GrpcLogEntry_Trailer{msg}
|
||||||
|
return true, err
|
||||||
|
default:
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
|
||||||
|
m := msg.(*GrpcLogEntry)
|
||||||
|
// payload
|
||||||
|
switch x := m.Payload.(type) {
|
||||||
|
case *GrpcLogEntry_ClientHeader:
|
||||||
|
s := proto.Size(x.ClientHeader)
|
||||||
|
n += 1 // tag and wire
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *GrpcLogEntry_ServerHeader:
|
||||||
|
s := proto.Size(x.ServerHeader)
|
||||||
|
n += 1 // tag and wire
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *GrpcLogEntry_Message:
|
||||||
|
s := proto.Size(x.Message)
|
||||||
|
n += 1 // tag and wire
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *GrpcLogEntry_Trailer:
|
||||||
|
s := proto.Size(x.Trailer)
|
||||||
|
n += 1 // tag and wire
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClientHeader struct {
|
||||||
|
// This contains only the metadata from the application.
|
||||||
|
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||||
|
// The name of the RPC method, which looks something like:
|
||||||
|
// /<service>/<method>
|
||||||
|
// Note the leading "/" character.
|
||||||
|
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
|
||||||
|
// A single process may be used to run multiple virtual
|
||||||
|
// servers with different identities.
|
||||||
|
// The authority is the name of such a server identitiy.
|
||||||
|
// It is typically a portion of the URI in the form of
|
||||||
|
// <host> or <host>:<port> .
|
||||||
|
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
|
||||||
|
// the RPC timeout
|
||||||
|
Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClientHeader) Reset() { *m = ClientHeader{} }
|
||||||
|
func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*ClientHeader) ProtoMessage() {}
|
||||||
|
func (*ClientHeader) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
|
||||||
|
}
|
||||||
|
func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *ClientHeader) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ClientHeader.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *ClientHeader) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ClientHeader.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ClientHeader) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ClientHeader.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *ClientHeader) GetMetadata() *Metadata {
|
||||||
|
if m != nil {
|
||||||
|
return m.Metadata
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClientHeader) GetMethodName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.MethodName
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClientHeader) GetAuthority() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Authority
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ClientHeader) GetTimeout() *duration.Duration {
|
||||||
|
if m != nil {
|
||||||
|
return m.Timeout
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServerHeader struct {
|
||||||
|
// This contains only the metadata from the application.
|
||||||
|
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ServerHeader) Reset() { *m = ServerHeader{} }
|
||||||
|
func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*ServerHeader) ProtoMessage() {}
|
||||||
|
func (*ServerHeader) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
|
||||||
|
}
|
||||||
|
func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *ServerHeader) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ServerHeader.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *ServerHeader) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ServerHeader.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ServerHeader) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ServerHeader.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *ServerHeader) GetMetadata() *Metadata {
|
||||||
|
if m != nil {
|
||||||
|
return m.Metadata
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Trailer struct {
|
||||||
|
// This contains only the metadata from the application.
|
||||||
|
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||||
|
// The gRPC status code.
|
||||||
|
StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
|
||||||
|
// An original status message before any transport specific
|
||||||
|
// encoding.
|
||||||
|
StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
|
||||||
|
// The value of the 'grpc-status-details-bin' metadata key. If
|
||||||
|
// present, this is always an encoded 'google.rpc.Status' message.
|
||||||
|
StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Trailer) Reset() { *m = Trailer{} }
|
||||||
|
func (m *Trailer) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Trailer) ProtoMessage() {}
|
||||||
|
func (*Trailer) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
|
||||||
|
}
|
||||||
|
func (m *Trailer) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Trailer.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Trailer) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Trailer.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Trailer) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Trailer.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Trailer) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Trailer.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Trailer proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Trailer) GetMetadata() *Metadata {
|
||||||
|
if m != nil {
|
||||||
|
return m.Metadata
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Trailer) GetStatusCode() uint32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.StatusCode
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Trailer) GetStatusMessage() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.StatusMessage
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Trailer) GetStatusDetails() []byte {
|
||||||
|
if m != nil {
|
||||||
|
return m.StatusDetails
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
|
||||||
|
type Message struct {
|
||||||
|
// Length of the message. It may not be the same as the length of the
|
||||||
|
// data field, as the logging payload can be truncated or omitted.
|
||||||
|
Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
|
||||||
|
// May be truncated or omitted.
|
||||||
|
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) Reset() { *m = Message{} }
|
||||||
|
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Message) ProtoMessage() {}
|
||||||
|
func (*Message) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
|
||||||
|
}
|
||||||
|
func (m *Message) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Message.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Message) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Message.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Message) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Message.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Message) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Message.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Message proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Message) GetLength() uint32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Length
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Message) GetData() []byte {
|
||||||
|
if m != nil {
|
||||||
|
return m.Data
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A list of metadata pairs, used in the payload of client header,
|
||||||
|
// server header, and server trailer.
|
||||||
|
// Implementations may omit some entries to honor the header limits
|
||||||
|
// of GRPC_BINARY_LOG_CONFIG.
|
||||||
|
//
|
||||||
|
// Header keys added by gRPC are omitted. To be more specific,
|
||||||
|
// implementations will not log the following entries, and this is
|
||||||
|
// not to be treated as a truncation:
|
||||||
|
// - entries handled by grpc that are not user visible, such as those
|
||||||
|
// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||||||
|
// or keys like 'lb-token'
|
||||||
|
// - transport specific entries, including but not limited to:
|
||||||
|
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||||||
|
// - entries added for call credentials
|
||||||
|
//
|
||||||
|
// Implementations must always log grpc-trace-bin if it is present.
|
||||||
|
// Practically speaking it will only be visible on server side because
|
||||||
|
// grpc-trace-bin is managed by low level client side mechanisms
|
||||||
|
// inaccessible from the application level. On server side, the
|
||||||
|
// header is just a normal metadata key.
|
||||||
|
// The pair will not count towards the size limit.
|
||||||
|
type Metadata struct {
|
||||||
|
Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Metadata) Reset() { *m = Metadata{} }
|
||||||
|
func (m *Metadata) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Metadata) ProtoMessage() {}
|
||||||
|
func (*Metadata) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
|
||||||
|
}
|
||||||
|
func (m *Metadata) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Metadata.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Metadata) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Metadata.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Metadata) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Metadata.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Metadata) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Metadata.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Metadata proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Metadata) GetEntry() []*MetadataEntry {
|
||||||
|
if m != nil {
|
||||||
|
return m.Entry
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A metadata key value pair
|
||||||
|
type MetadataEntry struct {
|
||||||
|
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
|
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
|
||||||
|
func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*MetadataEntry) ProtoMessage() {}
|
||||||
|
func (*MetadataEntry) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
|
||||||
|
}
|
||||||
|
func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_MetadataEntry.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *MetadataEntry) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_MetadataEntry.Size(m)
|
||||||
|
}
|
||||||
|
func (m *MetadataEntry) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *MetadataEntry) GetKey() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Key
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MetadataEntry) GetValue() []byte {
|
||||||
|
if m != nil {
|
||||||
|
return m.Value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Address information
|
||||||
|
type Address struct {
|
||||||
|
Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
|
||||||
|
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
|
||||||
|
// only for TYPE_IPV4 and TYPE_IPV6
|
||||||
|
IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Address) Reset() { *m = Address{} }
|
||||||
|
func (m *Address) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Address) ProtoMessage() {}
|
||||||
|
func (*Address) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
|
||||||
|
}
|
||||||
|
func (m *Address) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Address.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Address.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *Address) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Address.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *Address) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Address.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Address) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Address.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Address proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Address) GetType() Address_Type {
|
||||||
|
if m != nil {
|
||||||
|
return m.Type
|
||||||
|
}
|
||||||
|
return Address_TYPE_UNKNOWN
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Address) GetAddress() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Address
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Address) GetIpPort() uint32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.IpPort
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
|
||||||
|
proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
|
||||||
|
proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
|
||||||
|
proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
|
||||||
|
proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
|
||||||
|
proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
|
||||||
|
proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
|
||||||
|
proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
|
||||||
|
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
|
||||||
|
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
|
||||||
|
proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
|
||||||
|
// 900 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
|
||||||
|
0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
|
||||||
|
0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
|
||||||
|
0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
|
||||||
|
0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
|
||||||
|
0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
|
||||||
|
0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
|
||||||
|
0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
|
||||||
|
0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
|
||||||
|
0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
|
||||||
|
0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
|
||||||
|
0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
|
||||||
|
0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
|
||||||
|
0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
|
||||||
|
0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
|
||||||
|
0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
|
||||||
|
0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
|
||||||
|
0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
|
||||||
|
0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
|
||||||
|
0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
|
||||||
|
0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
|
||||||
|
0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
|
||||||
|
0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
|
||||||
|
0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
|
||||||
|
0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
|
||||||
|
0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
|
||||||
|
0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
|
||||||
|
0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
|
||||||
|
0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
|
||||||
|
0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
|
||||||
|
0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
|
||||||
|
0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
|
||||||
|
0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
|
||||||
|
0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
|
||||||
|
0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
|
||||||
|
0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
|
||||||
|
0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
|
||||||
|
0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
|
||||||
|
0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
|
||||||
|
0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
|
||||||
|
0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
|
||||||
|
0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
|
||||||
|
0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
|
||||||
|
0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
|
||||||
|
0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
|
||||||
|
0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
|
||||||
|
0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
|
||||||
|
0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
|
||||||
|
0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
|
||||||
|
0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
|
||||||
|
0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
|
||||||
|
0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
|
||||||
|
0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
|
||||||
|
0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
|
||||||
|
0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
|
||||||
|
0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
|
||||||
|
0xd4, 0x07, 0x00, 0x00,
|
||||||
|
}
|
|
@ -19,7 +19,7 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Invoke sends the RPC request on the wire and returns after response is
|
// Invoke sends the RPC request on the wire and returns after response is
|
||||||
|
@ -40,7 +40,7 @@ func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply int
|
||||||
func combine(o1 []CallOption, o2 []CallOption) []CallOption {
|
func combine(o1 []CallOption, o2 []CallOption) []CallOption {
|
||||||
// we don't use append because o1 could have extra capacity whose
|
// we don't use append because o1 could have extra capacity whose
|
||||||
// elements would be overwritten, which could cause inadvertent
|
// elements would be overwritten, which could cause inadvertent
|
||||||
// sharing (and race connditions) between concurrent calls
|
// sharing (and race conditions) between concurrent calls
|
||||||
if len(o1) == 0 {
|
if len(o1) == 0 {
|
||||||
return o2
|
return o2
|
||||||
} else if len(o2) == 0 {
|
} else if len(o2) == 0 {
|
||||||
|
@ -63,31 +63,12 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
||||||
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
|
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
|
||||||
|
|
||||||
func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||||
// TODO: implement retries in clientStream and make this simply
|
cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
|
||||||
// newClientStream, SendMsg, RecvMsg.
|
if err != nil {
|
||||||
firstAttempt := true
|
return err
|
||||||
for {
|
|
||||||
csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cs := csInt.(*clientStream)
|
|
||||||
if err := cs.SendMsg(req); err != nil {
|
|
||||||
if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
|
|
||||||
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
|
||||||
firstAttempt = false
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := cs.RecvMsg(reply); err != nil {
|
|
||||||
if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
|
|
||||||
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
|
||||||
firstAttempt = false
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
if err := cs.SendMsg(req); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return cs.RecvMsg(reply)
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -22,6 +22,7 @@ package codes // import "google.golang.org/grpc/codes"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
||||||
|
@ -143,6 +144,8 @@ const (
|
||||||
// Unauthenticated indicates the request does not have valid
|
// Unauthenticated indicates the request does not have valid
|
||||||
// authentication credentials for the operation.
|
// authentication credentials for the operation.
|
||||||
Unauthenticated Code = 16
|
Unauthenticated Code = 16
|
||||||
|
|
||||||
|
_maxCode = 17
|
||||||
)
|
)
|
||||||
|
|
||||||
var strToCode = map[string]Code{
|
var strToCode = map[string]Code{
|
||||||
|
@ -176,6 +179,16 @@ func (c *Code) UnmarshalJSON(b []byte) error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
|
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
|
||||||
|
if ci >= _maxCode {
|
||||||
|
return fmt.Errorf("invalid code: %q", ci)
|
||||||
|
}
|
||||||
|
|
||||||
|
*c = Code(ci)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if jc, ok := strToCode[string(b)]; ok {
|
if jc, ok := strToCode[string(b)]; ok {
|
||||||
*c = jc
|
*c = jc
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -22,7 +22,8 @@
|
||||||
package connectivity
|
package connectivity
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -51,7 +52,7 @@ func (s State) String() string {
|
||||||
const (
|
const (
|
||||||
// Idle indicates the ClientConn is idle.
|
// Idle indicates the ClientConn is idle.
|
||||||
Idle State = iota
|
Idle State = iota
|
||||||
// Connecting indicates the ClienConn is connecting.
|
// Connecting indicates the ClientConn is connecting.
|
||||||
Connecting
|
Connecting
|
||||||
// Ready indicates the ClientConn is ready for work.
|
// Ready indicates the ClientConn is ready for work.
|
||||||
Ready
|
Ready
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
package credentials // import "google.golang.org/grpc/credentials"
|
package credentials // import "google.golang.org/grpc/credentials"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -31,12 +32,10 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/grpc/credentials/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// alpnProtoStr are the specified application level protocols for gRPC.
|
|
||||||
var alpnProtoStr = []string{"h2"}
|
|
||||||
|
|
||||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||||
// attach security information to every RPC (e.g., oauth2).
|
// attach security information to every RPC (e.g., oauth2).
|
||||||
type PerRPCCredentials interface {
|
type PerRPCCredentials interface {
|
||||||
|
@ -107,6 +106,25 @@ type TransportCredentials interface {
|
||||||
OverrideServerName(string) error
|
OverrideServerName(string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Bundle is a combination of TransportCredentials and PerRPCCredentials.
|
||||||
|
//
|
||||||
|
// It also contains a mode switching method, so it can be used as a combination
|
||||||
|
// of different credential policies.
|
||||||
|
//
|
||||||
|
// Bundle cannot be used together with individual TransportCredentials.
|
||||||
|
// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
|
||||||
|
//
|
||||||
|
// This API is experimental.
|
||||||
|
type Bundle interface {
|
||||||
|
TransportCredentials() TransportCredentials
|
||||||
|
PerRPCCredentials() PerRPCCredentials
|
||||||
|
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
|
||||||
|
// existing Bundle may cause races.
|
||||||
|
//
|
||||||
|
// NewWithMode returns nil if the requested mode is not supported.
|
||||||
|
NewWithMode(mode string) (Bundle, error)
|
||||||
|
}
|
||||||
|
|
||||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||||
// It implements the AuthInfo interface.
|
// It implements the AuthInfo interface.
|
||||||
type TLSInfo struct {
|
type TLSInfo struct {
|
||||||
|
@ -118,6 +136,18 @@ func (t TLSInfo) AuthType() string {
|
||||||
return "tls"
|
return "tls"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSecurityValue returns security info requested by channelz.
|
||||||
|
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
|
||||||
|
v := &TLSChannelzSecurityValue{
|
||||||
|
StandardName: cipherSuiteLookup[t.State.CipherSuite],
|
||||||
|
}
|
||||||
|
// Currently there's no way to get LocalCertificate info from tls package.
|
||||||
|
if len(t.State.PeerCertificates) > 0 {
|
||||||
|
v.RemoteCertificate = t.State.PeerCertificates[0].Raw
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||||
type tlsCreds struct {
|
type tlsCreds struct {
|
||||||
// TLS configuration
|
// TLS configuration
|
||||||
|
@ -155,7 +185,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, nil, ctx.Err()
|
return nil, nil, ctx.Err()
|
||||||
}
|
}
|
||||||
return conn, TLSInfo{conn.ConnectionState()}, nil
|
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||||
|
@ -163,7 +193,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
|
||||||
if err := conn.Handshake(); err != nil {
|
if err := conn.Handshake(); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return conn, TLSInfo{conn.ConnectionState()}, nil
|
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *tlsCreds) Clone() TransportCredentials {
|
func (c *tlsCreds) Clone() TransportCredentials {
|
||||||
|
@ -175,10 +205,23 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const alpnProtoStrH2 = "h2"
|
||||||
|
|
||||||
|
func appendH2ToNextProtos(ps []string) []string {
|
||||||
|
for _, p := range ps {
|
||||||
|
if p == alpnProtoStrH2 {
|
||||||
|
return ps
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret := make([]string, 0, len(ps)+1)
|
||||||
|
ret = append(ret, ps...)
|
||||||
|
return append(ret, alpnProtoStrH2)
|
||||||
|
}
|
||||||
|
|
||||||
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||||
func NewTLS(c *tls.Config) TransportCredentials {
|
func NewTLS(c *tls.Config) TransportCredentials {
|
||||||
tc := &tlsCreds{cloneTLSConfig(c)}
|
tc := &tlsCreds{cloneTLSConfig(c)}
|
||||||
tc.config.NextProtos = alpnProtoStr
|
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
|
||||||
return tc
|
return tc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,3 +261,78 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
|
||||||
}
|
}
|
||||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChannelzSecurityInfo defines the interface that security protocols should implement
|
||||||
|
// in order to provide security info to channelz.
|
||||||
|
type ChannelzSecurityInfo interface {
|
||||||
|
GetSecurityValue() ChannelzSecurityValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
|
||||||
|
// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
|
||||||
|
// and *OtherChannelzSecurityValue.
|
||||||
|
type ChannelzSecurityValue interface {
|
||||||
|
isChannelzSecurityValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||||
|
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||||
|
type TLSChannelzSecurityValue struct {
|
||||||
|
StandardName string
|
||||||
|
LocalCertificate []byte
|
||||||
|
RemoteCertificate []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
|
||||||
|
|
||||||
|
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
|
||||||
|
// from GetSecurityValue(), which contains protocol specific security info. Note
|
||||||
|
// the Value field will be sent to users of channelz requesting channel info, and
|
||||||
|
// thus sensitive info should better be avoided.
|
||||||
|
type OtherChannelzSecurityValue struct {
|
||||||
|
Name string
|
||||||
|
Value proto.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
|
||||||
|
|
||||||
|
var cipherSuiteLookup = map[uint16]string{
|
||||||
|
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
|
||||||
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||||
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||||
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||||
|
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||||
|
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
|
||||||
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||||
|
}
|
||||||
|
|
||||||
|
// cloneTLSConfig returns a shallow clone of the exported
|
||||||
|
// fields of cfg, ignoring the unexported sync.Once, which
|
||||||
|
// contains a mutex and must not be copied.
|
||||||
|
//
|
||||||
|
// If cfg is nil, a new zero tls.Config is returned.
|
||||||
|
//
|
||||||
|
// TODO: inline this function if possible.
|
||||||
|
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||||
|
if cfg == nil {
|
||||||
|
return &tls.Config{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg.Clone()
|
||||||
|
}
|
||||||
|
|
|
@ -1,60 +0,0 @@
|
||||||
// +build go1.7
|
|
||||||
// +build !go1.8
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package credentials
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
)
|
|
||||||
|
|
||||||
// cloneTLSConfig returns a shallow clone of the exported
|
|
||||||
// fields of cfg, ignoring the unexported sync.Once, which
|
|
||||||
// contains a mutex and must not be copied.
|
|
||||||
//
|
|
||||||
// If cfg is nil, a new zero tls.Config is returned.
|
|
||||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
|
||||||
if cfg == nil {
|
|
||||||
return &tls.Config{}
|
|
||||||
}
|
|
||||||
return &tls.Config{
|
|
||||||
Rand: cfg.Rand,
|
|
||||||
Time: cfg.Time,
|
|
||||||
Certificates: cfg.Certificates,
|
|
||||||
NameToCertificate: cfg.NameToCertificate,
|
|
||||||
GetCertificate: cfg.GetCertificate,
|
|
||||||
RootCAs: cfg.RootCAs,
|
|
||||||
NextProtos: cfg.NextProtos,
|
|
||||||
ServerName: cfg.ServerName,
|
|
||||||
ClientAuth: cfg.ClientAuth,
|
|
||||||
ClientCAs: cfg.ClientCAs,
|
|
||||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
|
||||||
CipherSuites: cfg.CipherSuites,
|
|
||||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
|
||||||
SessionTicketsDisabled: cfg.SessionTicketsDisabled,
|
|
||||||
SessionTicketKey: cfg.SessionTicketKey,
|
|
||||||
ClientSessionCache: cfg.ClientSessionCache,
|
|
||||||
MinVersion: cfg.MinVersion,
|
|
||||||
MaxVersion: cfg.MaxVersion,
|
|
||||||
CurvePreferences: cfg.CurvePreferences,
|
|
||||||
DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
|
|
||||||
Renegotiation: cfg.Renegotiation,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,57 +0,0 @@
|
||||||
// +build !go1.7
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package credentials
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
)
|
|
||||||
|
|
||||||
// cloneTLSConfig returns a shallow clone of the exported
|
|
||||||
// fields of cfg, ignoring the unexported sync.Once, which
|
|
||||||
// contains a mutex and must not be copied.
|
|
||||||
//
|
|
||||||
// If cfg is nil, a new zero tls.Config is returned.
|
|
||||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
|
||||||
if cfg == nil {
|
|
||||||
return &tls.Config{}
|
|
||||||
}
|
|
||||||
return &tls.Config{
|
|
||||||
Rand: cfg.Rand,
|
|
||||||
Time: cfg.Time,
|
|
||||||
Certificates: cfg.Certificates,
|
|
||||||
NameToCertificate: cfg.NameToCertificate,
|
|
||||||
GetCertificate: cfg.GetCertificate,
|
|
||||||
RootCAs: cfg.RootCAs,
|
|
||||||
NextProtos: cfg.NextProtos,
|
|
||||||
ServerName: cfg.ServerName,
|
|
||||||
ClientAuth: cfg.ClientAuth,
|
|
||||||
ClientCAs: cfg.ClientCAs,
|
|
||||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
|
||||||
CipherSuites: cfg.CipherSuites,
|
|
||||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
|
||||||
SessionTicketsDisabled: cfg.SessionTicketsDisabled,
|
|
||||||
SessionTicketKey: cfg.SessionTicketKey,
|
|
||||||
ClientSessionCache: cfg.ClientSessionCache,
|
|
||||||
MinVersion: cfg.MinVersion,
|
|
||||||
MaxVersion: cfg.MaxVersion,
|
|
||||||
CurvePreferences: cfg.CurvePreferences,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package internal contains credentials-internal code.
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
type sysConn = syscall.Conn
|
||||||
|
|
||||||
|
// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
|
||||||
|
// SyscallConn() (the method in interface syscall.Conn) is explicitly
|
||||||
|
// implemented on this type,
|
||||||
|
//
|
||||||
|
// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
|
||||||
|
// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
|
||||||
|
// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
|
||||||
|
// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
|
||||||
|
// help here).
|
||||||
|
type syscallConn struct {
|
||||||
|
net.Conn
|
||||||
|
// sysConn is a type alias of syscall.Conn. It's necessary because the name
|
||||||
|
// `Conn` collides with `net.Conn`.
|
||||||
|
sysConn
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
|
||||||
|
// implements syscall.Conn. rawConn will be used to support syscall, and newConn
|
||||||
|
// will be used for read/write.
|
||||||
|
//
|
||||||
|
// This function returns newConn if rawConn doesn't implement syscall.Conn.
|
||||||
|
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
|
||||||
|
sysConn, ok := rawConn.(syscall.Conn)
|
||||||
|
if !ok {
|
||||||
|
return newConn
|
||||||
|
}
|
||||||
|
return &syscallConn{
|
||||||
|
Conn: newConn,
|
||||||
|
sysConn: sysConn,
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,8 +1,8 @@
|
||||||
// +build go1.8
|
// +build appengine
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Copyright 2017 gRPC authors.
|
* Copyright 2018 gRPC authors.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -18,12 +18,13 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package dns
|
package internal
|
||||||
|
|
||||||
import "net"
|
import (
|
||||||
|
"net"
|
||||||
var (
|
|
||||||
lookupHost = net.DefaultResolver.LookupHost
|
|
||||||
lookupSRV = net.DefaultResolver.LookupSRV
|
|
||||||
lookupTXT = net.DefaultResolver.LookupTXT
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// WrapSyscallConn returns newConn on appengine.
|
||||||
|
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
|
||||||
|
return newConn
|
||||||
|
}
|
|
@ -1,8 +1,8 @@
|
||||||
// +build go1.8
|
// +build go1.12
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Copyright 2017 gRPC authors.
|
* Copyright 2019 gRPC authors.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -20,19 +20,11 @@
|
||||||
|
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import "crypto/tls"
|
||||||
"crypto/tls"
|
|
||||||
)
|
|
||||||
|
|
||||||
// cloneTLSConfig returns a shallow clone of the exported
|
// This init function adds cipher suite constants only defined in Go 1.12.
|
||||||
// fields of cfg, ignoring the unexported sync.Once, which
|
func init() {
|
||||||
// contains a mutex and must not be copied.
|
cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256"
|
||||||
//
|
cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384"
|
||||||
// If cfg is nil, a new zero tls.Config is returned.
|
cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256"
|
||||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
|
||||||
if cfg == nil {
|
|
||||||
return &tls.Config{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg.Clone()
|
|
||||||
}
|
}
|
|
@ -0,0 +1,532 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/internal"
|
||||||
|
"google.golang.org/grpc/internal/backoff"
|
||||||
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
|
"google.golang.org/grpc/internal/transport"
|
||||||
|
"google.golang.org/grpc/keepalive"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
|
)
|
||||||
|
|
||||||
|
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||||
|
// values passed to Dial.
|
||||||
|
type dialOptions struct {
|
||||||
|
unaryInt UnaryClientInterceptor
|
||||||
|
streamInt StreamClientInterceptor
|
||||||
|
cp Compressor
|
||||||
|
dc Decompressor
|
||||||
|
bs backoff.Strategy
|
||||||
|
block bool
|
||||||
|
insecure bool
|
||||||
|
timeout time.Duration
|
||||||
|
scChan <-chan ServiceConfig
|
||||||
|
authority string
|
||||||
|
copts transport.ConnectOptions
|
||||||
|
callOptions []CallOption
|
||||||
|
// This is used by v1 balancer dial option WithBalancer to support v1
|
||||||
|
// balancer, and also by WithBalancerName dial option.
|
||||||
|
balancerBuilder balancer.Builder
|
||||||
|
// This is to support grpclb.
|
||||||
|
resolverBuilder resolver.Builder
|
||||||
|
reqHandshake envconfig.RequireHandshakeSetting
|
||||||
|
channelzParentID int64
|
||||||
|
disableServiceConfig bool
|
||||||
|
disableRetry bool
|
||||||
|
disableHealthCheck bool
|
||||||
|
healthCheckFunc internal.HealthChecker
|
||||||
|
minConnectTimeout func() time.Duration
|
||||||
|
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
||||||
|
defaultServiceConfigRawJSON *string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialOption configures how we set up the connection.
|
||||||
|
type DialOption interface {
|
||||||
|
apply(*dialOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
||||||
|
// another structure to build custom dial options.
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
type EmptyDialOption struct{}
|
||||||
|
|
||||||
|
func (EmptyDialOption) apply(*dialOptions) {}
|
||||||
|
|
||||||
|
// funcDialOption wraps a function that modifies dialOptions into an
|
||||||
|
// implementation of the DialOption interface.
|
||||||
|
type funcDialOption struct {
|
||||||
|
f func(*dialOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fdo *funcDialOption) apply(do *dialOptions) {
|
||||||
|
fdo.f(do)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
|
||||||
|
return &funcDialOption{
|
||||||
|
f: f,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithWaitForHandshake blocks until the initial settings frame is received from
|
||||||
|
// the server before assigning RPCs to the connection.
|
||||||
|
//
|
||||||
|
// Deprecated: this is the default behavior, and this option will be removed
|
||||||
|
// after the 1.18 release.
|
||||||
|
func WithWaitForHandshake() DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.reqHandshake = envconfig.RequireHandshakeOn
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithWriteBufferSize determines how much data can be batched before doing a
|
||||||
|
// write on the wire. The corresponding memory allocation for this buffer will
|
||||||
|
// be twice the size to keep syscalls low. The default value for this buffer is
|
||||||
|
// 32KB.
|
||||||
|
//
|
||||||
|
// Zero will disable the write buffer such that each write will be on underlying
|
||||||
|
// connection. Note: A Send call may not directly translate to a write.
|
||||||
|
func WithWriteBufferSize(s int) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.WriteBufferSize = s
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithReadBufferSize lets you set the size of read buffer, this determines how
|
||||||
|
// much data can be read at most for each read syscall.
|
||||||
|
//
|
||||||
|
// The default value for this buffer is 32KB. Zero will disable read buffer for
|
||||||
|
// a connection so data framer can access the underlying conn directly.
|
||||||
|
func WithReadBufferSize(s int) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.ReadBufferSize = s
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithInitialWindowSize returns a DialOption which sets the value for initial
|
||||||
|
// window size on a stream. The lower bound for window size is 64K and any value
|
||||||
|
// smaller than that will be ignored.
|
||||||
|
func WithInitialWindowSize(s int32) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.InitialWindowSize = s
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithInitialConnWindowSize returns a DialOption which sets the value for
|
||||||
|
// initial window size on a connection. The lower bound for window size is 64K
|
||||||
|
// and any value smaller than that will be ignored.
|
||||||
|
func WithInitialConnWindowSize(s int32) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.InitialConnWindowSize = s
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMaxMsgSize returns a DialOption which sets the maximum message size the
|
||||||
|
// client can receive.
|
||||||
|
//
|
||||||
|
// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
|
||||||
|
func WithMaxMsgSize(s int) DialOption {
|
||||||
|
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDefaultCallOptions returns a DialOption which sets the default
|
||||||
|
// CallOptions for calls over the connection.
|
||||||
|
func WithDefaultCallOptions(cos ...CallOption) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.callOptions = append(o.callOptions, cos...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCodec returns a DialOption which sets a codec for message marshaling and
|
||||||
|
// unmarshaling.
|
||||||
|
//
|
||||||
|
// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.
|
||||||
|
func WithCodec(c Codec) DialOption {
|
||||||
|
return WithDefaultCallOptions(CallCustomCodec(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCompressor returns a DialOption which sets a Compressor to use for
|
||||||
|
// message compression. It has lower priority than the compressor set by the
|
||||||
|
// UseCompressor CallOption.
|
||||||
|
//
|
||||||
|
// Deprecated: use UseCompressor instead.
|
||||||
|
func WithCompressor(cp Compressor) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.cp = cp
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDecompressor returns a DialOption which sets a Decompressor to use for
|
||||||
|
// incoming message decompression. If incoming response messages are encoded
|
||||||
|
// using the decompressor's Type(), it will be used. Otherwise, the message
|
||||||
|
// encoding will be used to look up the compressor registered via
|
||||||
|
// encoding.RegisterCompressor, which will then be used to decompress the
|
||||||
|
// message. If no compressor is registered for the encoding, an Unimplemented
|
||||||
|
// status error will be returned.
|
||||||
|
//
|
||||||
|
// Deprecated: use encoding.RegisterCompressor instead.
|
||||||
|
func WithDecompressor(dc Decompressor) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.dc = dc
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
|
||||||
|
// Name resolver will be ignored if this DialOption is specified.
|
||||||
|
//
|
||||||
|
// Deprecated: use the new balancer APIs in balancer package and
|
||||||
|
// WithBalancerName.
|
||||||
|
func WithBalancer(b Balancer) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.balancerBuilder = &balancerWrapperBuilder{
|
||||||
|
b: b,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
||||||
|
// with. Balancer registered with balancerName will be used. This function
|
||||||
|
// panics if no balancer was registered by balancerName.
|
||||||
|
//
|
||||||
|
// The balancer cannot be overridden by balancer option specified by service
|
||||||
|
// config.
|
||||||
|
//
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
func WithBalancerName(balancerName string) DialOption {
|
||||||
|
builder := balancer.Get(balancerName)
|
||||||
|
if builder == nil {
|
||||||
|
panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
|
||||||
|
}
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.balancerBuilder = builder
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// withResolverBuilder is only for grpclb.
|
||||||
|
func withResolverBuilder(b resolver.Builder) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.resolverBuilder = b
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithServiceConfig returns a DialOption which has a channel to read the
|
||||||
|
// service configuration.
|
||||||
|
//
|
||||||
|
// Deprecated: service config should be received through name resolver, as
|
||||||
|
// specified here.
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||||
|
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.scChan = c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
|
||||||
|
// when backing off after failed connection attempts.
|
||||||
|
func WithBackoffMaxDelay(md time.Duration) DialOption {
|
||||||
|
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBackoffConfig configures the dialer to use the provided backoff
|
||||||
|
// parameters after connection failures.
|
||||||
|
//
|
||||||
|
// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
|
||||||
|
// for use.
|
||||||
|
func WithBackoffConfig(b BackoffConfig) DialOption {
|
||||||
|
return withBackoff(backoff.Exponential{
|
||||||
|
MaxDelay: b.MaxDelay,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// withBackoff sets the backoff strategy used for connectRetryNum after a failed
|
||||||
|
// connection attempt.
|
||||||
|
//
|
||||||
|
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
|
||||||
|
func withBackoff(bs backoff.Strategy) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.bs = bs
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBlock returns a DialOption which makes caller of Dial blocks until the
|
||||||
|
// underlying connection is up. Without this, Dial returns immediately and
|
||||||
|
// connecting the server happens in background.
|
||||||
|
func WithBlock() DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.block = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithInsecure returns a DialOption which disables transport security for this
|
||||||
|
// ClientConn. Note that transport security is required unless WithInsecure is
|
||||||
|
// set.
|
||||||
|
func WithInsecure() DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.insecure = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTransportCredentials returns a DialOption which configures a connection
|
||||||
|
// level security credentials (e.g., TLS/SSL). This should not be used together
|
||||||
|
// with WithCredentialsBundle.
|
||||||
|
func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.TransportCredentials = creds
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPerRPCCredentials returns a DialOption which sets credentials and places
|
||||||
|
// auth state on each outbound RPC.
|
||||||
|
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCredentialsBundle returns a DialOption to set a credentials bundle for
|
||||||
|
// the ClientConn.WithCreds. This should not be used together with
|
||||||
|
// WithTransportCredentials.
|
||||||
|
//
|
||||||
|
// This API is experimental.
|
||||||
|
func WithCredentialsBundle(b credentials.Bundle) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.CredsBundle = b
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTimeout returns a DialOption that configures a timeout for dialing a
|
||||||
|
// ClientConn initially. This is valid if and only if WithBlock() is present.
|
||||||
|
//
|
||||||
|
// Deprecated: use DialContext and context.WithTimeout instead.
|
||||||
|
func WithTimeout(d time.Duration) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.timeout = d
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContextDialer returns a DialOption that sets a dialer to create
|
||||||
|
// connections. If FailOnNonTempDialError() is set to true, and an error is
|
||||||
|
// returned by f, gRPC checks the error's Temporary() method to decide if it
|
||||||
|
// should try to reconnect to the network address.
|
||||||
|
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.Dialer = f
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
internal.WithResolverBuilder = withResolverBuilder
|
||||||
|
internal.WithHealthCheckFunc = withHealthCheckFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDialer returns a DialOption that specifies a function to use for dialing
|
||||||
|
// network addresses. If FailOnNonTempDialError() is set to true, and an error
|
||||||
|
// is returned by f, gRPC checks the error's Temporary() method to decide if it
|
||||||
|
// should try to reconnect to the network address.
|
||||||
|
//
|
||||||
|
// Deprecated: use WithContextDialer instead
|
||||||
|
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
||||||
|
return WithContextDialer(
|
||||||
|
func(ctx context.Context, addr string) (net.Conn, error) {
|
||||||
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
|
return f(addr, time.Until(deadline))
|
||||||
|
}
|
||||||
|
return f(addr, 0)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStatsHandler returns a DialOption that specifies the stats handler for
|
||||||
|
// all the RPCs and underlying network connections in this ClientConn.
|
||||||
|
func WithStatsHandler(h stats.Handler) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.StatsHandler = h
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
|
||||||
|
// non-temporary dial errors. If f is true, and dialer returns a non-temporary
|
||||||
|
// error, gRPC will fail the connection to the network address and won't try to
|
||||||
|
// reconnect. The default value of FailOnNonTempDialError is false.
|
||||||
|
//
|
||||||
|
// FailOnNonTempDialError only affects the initial dial, and does not do
|
||||||
|
// anything useful unless you are also using WithBlock().
|
||||||
|
//
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
func FailOnNonTempDialError(f bool) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.FailOnNonTempDialError = f
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUserAgent returns a DialOption that specifies a user agent string for all
|
||||||
|
// the RPCs.
|
||||||
|
func WithUserAgent(s string) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.UserAgent = s
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
|
||||||
|
// for the client transport.
|
||||||
|
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
|
||||||
|
if kp.Time < internal.KeepaliveMinPingTime {
|
||||||
|
grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
|
||||||
|
kp.Time = internal.KeepaliveMinPingTime
|
||||||
|
}
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.KeepaliveParams = kp
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
|
||||||
|
// unary RPCs.
|
||||||
|
func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.unaryInt = f
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStreamInterceptor returns a DialOption that specifies the interceptor for
|
||||||
|
// streaming RPCs.
|
||||||
|
func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.streamInt = f
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthority returns a DialOption that specifies the value to be used as the
|
||||||
|
// :authority pseudo-header. This value only works with WithInsecure and has no
|
||||||
|
// effect if TransportCredentials are present.
|
||||||
|
func WithAuthority(a string) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.authority = a
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithChannelzParentID returns a DialOption that specifies the channelz ID of
|
||||||
|
// current ClientConn's parent. This function is used in nested channel creation
|
||||||
|
// (e.g. grpclb dial).
|
||||||
|
func WithChannelzParentID(id int64) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.channelzParentID = id
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
|
||||||
|
// service config provided by the resolver and provides a hint to the resolver
|
||||||
|
// to not fetch service configs.
|
||||||
|
//
|
||||||
|
// Note that, this dial option only disables service config from resolver. If
|
||||||
|
// default service config is provided, grpc will use the default service config.
|
||||||
|
func WithDisableServiceConfig() DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.disableServiceConfig = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDefaultServiceConfig returns a DialOption that configures the default
|
||||||
|
// service config, which will be used in cases where:
|
||||||
|
// 1. WithDisableServiceConfig is called.
|
||||||
|
// 2. Resolver does not return service config or if the resolver gets and invalid config.
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
func WithDefaultServiceConfig(s string) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.defaultServiceConfigRawJSON = &s
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableRetry returns a DialOption that disables retries, even if the
|
||||||
|
// service config enables them. This does not impact transparent retries, which
|
||||||
|
// will happen automatically if no data is written to the wire or if the RPC is
|
||||||
|
// unprocessed by the remote server.
|
||||||
|
//
|
||||||
|
// Retry support is currently disabled by default, but will be enabled by
|
||||||
|
// default in the future. Until then, it may be enabled by setting the
|
||||||
|
// environment variable "GRPC_GO_RETRY" to "on".
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
func WithDisableRetry() DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.disableRetry = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMaxHeaderListSize returns a DialOption that specifies the maximum
|
||||||
|
// (uncompressed) size of header list that the client is prepared to accept.
|
||||||
|
func WithMaxHeaderListSize(s uint32) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.copts.MaxHeaderListSize = &s
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisableHealthCheck disables the LB channel health checking for all
|
||||||
|
// SubConns of this ClientConn.
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
func WithDisableHealthCheck() DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.disableHealthCheck = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// withHealthCheckFunc replaces the default health check function with the
|
||||||
|
// provided one. It makes tests easier to change the health check function.
|
||||||
|
//
|
||||||
|
// For testing purpose only.
|
||||||
|
func withHealthCheckFunc(f internal.HealthChecker) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.healthCheckFunc = f
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultDialOptions() dialOptions {
|
||||||
|
return dialOptions{
|
||||||
|
disableRetry: !envconfig.Retry,
|
||||||
|
reqHandshake: envconfig.RequireHandshake,
|
||||||
|
healthCheckFunc: internal.HealthCheckFunc,
|
||||||
|
copts: transport.ConnectOptions{
|
||||||
|
WriteBufferSize: defaultWriteBufSize,
|
||||||
|
ReadBufferSize: defaultReadBufSize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// withGetMinConnectDeadline specifies the function that clientconn uses to
|
||||||
|
// get minConnectDeadline. This can be used to make connection attempts happen
|
||||||
|
// faster/slower.
|
||||||
|
//
|
||||||
|
// For testing purpose only.
|
||||||
|
func withMinConnectDeadline(f func() time.Duration) DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.minConnectTimeout = f
|
||||||
|
})
|
||||||
|
}
|
|
@ -102,10 +102,10 @@ func RegisterCodec(codec Codec) {
|
||||||
if codec == nil {
|
if codec == nil {
|
||||||
panic("cannot register a nil Codec")
|
panic("cannot register a nil Codec")
|
||||||
}
|
}
|
||||||
contentSubtype := strings.ToLower(codec.Name())
|
if codec.Name() == "" {
|
||||||
if contentSubtype == "" {
|
panic("cannot register Codec with empty string result for Name()")
|
||||||
panic("cannot register Codec with empty string result for String()")
|
|
||||||
}
|
}
|
||||||
|
contentSubtype := strings.ToLower(codec.Name())
|
||||||
registeredCodecs[contentSubtype] = codec
|
registeredCodecs[contentSubtype] = codec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,70 +0,0 @@
|
||||||
// +build go1.6,!go1.7
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
"google.golang.org/grpc/transport"
|
|
||||||
)
|
|
||||||
|
|
||||||
// dialContext connects to the address on the named network.
|
|
||||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
|
||||||
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
|
||||||
req.Cancel = ctx.Done()
|
|
||||||
if err := req.Write(conn); err != nil {
|
|
||||||
return fmt.Errorf("failed to write the HTTP request: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// toRPCErr converts an error into an error from the status package.
|
|
||||||
func toRPCErr(err error) error {
|
|
||||||
if err == nil || err == io.EOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, ok := status.FromError(err); ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch e := err.(type) {
|
|
||||||
case transport.StreamError:
|
|
||||||
return status.Error(e.Code, e.Desc)
|
|
||||||
case transport.ConnectionError:
|
|
||||||
return status.Error(codes.Unavailable, e.Desc)
|
|
||||||
default:
|
|
||||||
switch err {
|
|
||||||
case context.DeadlineExceeded:
|
|
||||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
|
||||||
case context.Canceled:
|
|
||||||
return status.Error(codes.Canceled, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return status.Error(codes.Unknown, err.Error())
|
|
||||||
}
|
|
|
@ -1,71 +0,0 @@
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
netctx "golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
"google.golang.org/grpc/transport"
|
|
||||||
)
|
|
||||||
|
|
||||||
// dialContext connects to the address on the named network.
|
|
||||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
|
||||||
return (&net.Dialer{}).DialContext(ctx, network, address)
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
if err := req.Write(conn); err != nil {
|
|
||||||
return fmt.Errorf("failed to write the HTTP request: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// toRPCErr converts an error into an error from the status package.
|
|
||||||
func toRPCErr(err error) error {
|
|
||||||
if err == nil || err == io.EOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, ok := status.FromError(err); ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch e := err.(type) {
|
|
||||||
case transport.StreamError:
|
|
||||||
return status.Error(e.Code, e.Desc)
|
|
||||||
case transport.ConnectionError:
|
|
||||||
return status.Error(codes.Unavailable, e.Desc)
|
|
||||||
default:
|
|
||||||
switch err {
|
|
||||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
|
||||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
|
||||||
case context.Canceled, netctx.Canceled:
|
|
||||||
return status.Error(codes.Canceled, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return status.Error(codes.Unknown, err.Error())
|
|
||||||
}
|
|
|
@ -1,341 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/connectivity"
|
|
||||||
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
lbTokeyKey = "lb-token"
|
|
||||||
defaultFallbackTimeout = 10 * time.Second
|
|
||||||
grpclbName = "grpclb"
|
|
||||||
)
|
|
||||||
|
|
||||||
func convertDuration(d *lbpb.Duration) time.Duration {
|
|
||||||
if d == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client API for LoadBalancer service.
|
|
||||||
// Mostly copied from generated pb.go file.
|
|
||||||
// To avoid circular dependency.
|
|
||||||
type loadBalancerClient struct {
|
|
||||||
cc *ClientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
|
|
||||||
desc := &StreamDesc{
|
|
||||||
StreamName: "BalanceLoad",
|
|
||||||
ServerStreams: true,
|
|
||||||
ClientStreams: true,
|
|
||||||
}
|
|
||||||
stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
x := &balanceLoadClientStream{stream}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type balanceLoadClientStream struct {
|
|
||||||
ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
|
|
||||||
return x.ClientStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
|
|
||||||
m := new(lbpb.LoadBalanceResponse)
|
|
||||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
balancer.Register(newLBBuilder())
|
|
||||||
}
|
|
||||||
|
|
||||||
// newLBBuilder creates a builder for grpclb.
|
|
||||||
func newLBBuilder() balancer.Builder {
|
|
||||||
return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
|
|
||||||
// fallbackTimeout. If no response is received from the remote balancer within
|
|
||||||
// fallbackTimeout, the backend addresses from the resolved address list will be
|
|
||||||
// used.
|
|
||||||
//
|
|
||||||
// Only call this function when a non-default fallback timeout is needed.
|
|
||||||
func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
|
|
||||||
return &lbBuilder{
|
|
||||||
fallbackTimeout: fallbackTimeout,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type lbBuilder struct {
|
|
||||||
fallbackTimeout time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *lbBuilder) Name() string {
|
|
||||||
return grpclbName
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
|
||||||
// This generates a manual resolver builder with a random scheme. This
|
|
||||||
// scheme will be used to dial to remote LB, so we can send filtered address
|
|
||||||
// updates to remote LB ClientConn using this manual resolver.
|
|
||||||
scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
|
|
||||||
r := &lbManualResolver{scheme: scheme, ccb: cc}
|
|
||||||
|
|
||||||
var target string
|
|
||||||
targetSplitted := strings.Split(cc.Target(), ":///")
|
|
||||||
if len(targetSplitted) < 2 {
|
|
||||||
target = cc.Target()
|
|
||||||
} else {
|
|
||||||
target = targetSplitted[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
lb := &lbBalancer{
|
|
||||||
cc: newLBCacheClientConn(cc),
|
|
||||||
target: target,
|
|
||||||
opt: opt,
|
|
||||||
fallbackTimeout: b.fallbackTimeout,
|
|
||||||
doneCh: make(chan struct{}),
|
|
||||||
|
|
||||||
manualResolver: r,
|
|
||||||
csEvltr: &connectivityStateEvaluator{},
|
|
||||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
|
||||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
|
||||||
picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
|
|
||||||
clientStats: &rpcStats{},
|
|
||||||
}
|
|
||||||
|
|
||||||
return lb
|
|
||||||
}
|
|
||||||
|
|
||||||
type lbBalancer struct {
|
|
||||||
cc *lbCacheClientConn
|
|
||||||
target string
|
|
||||||
opt balancer.BuildOptions
|
|
||||||
fallbackTimeout time.Duration
|
|
||||||
doneCh chan struct{}
|
|
||||||
|
|
||||||
// manualResolver is used in the remote LB ClientConn inside grpclb. When
|
|
||||||
// resolved address updates are received by grpclb, filtered updates will be
|
|
||||||
// send to remote LB ClientConn through this resolver.
|
|
||||||
manualResolver *lbManualResolver
|
|
||||||
// The ClientConn to talk to the remote balancer.
|
|
||||||
ccRemoteLB *ClientConn
|
|
||||||
|
|
||||||
// Support client side load reporting. Each picker gets a reference to this,
|
|
||||||
// and will update its content.
|
|
||||||
clientStats *rpcStats
|
|
||||||
|
|
||||||
mu sync.Mutex // guards everything following.
|
|
||||||
// The full server list including drops, used to check if the newly received
|
|
||||||
// serverList contains anything new. Each generate picker will also have
|
|
||||||
// reference to this list to do the first layer pick.
|
|
||||||
fullServerList []*lbpb.Server
|
|
||||||
// All backends addresses, with metadata set to nil. This list contains all
|
|
||||||
// backend addresses in the same order and with the same duplicates as in
|
|
||||||
// serverlist. When generating picker, a SubConn slice with the same order
|
|
||||||
// but with only READY SCs will be gerenated.
|
|
||||||
backendAddrs []resolver.Address
|
|
||||||
// Roundrobin functionalities.
|
|
||||||
csEvltr *connectivityStateEvaluator
|
|
||||||
state connectivity.State
|
|
||||||
subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
|
|
||||||
scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
|
|
||||||
picker balancer.Picker
|
|
||||||
// Support fallback to resolved backend addresses if there's no response
|
|
||||||
// from remote balancer within fallbackTimeout.
|
|
||||||
fallbackTimerExpired bool
|
|
||||||
serverListReceived bool
|
|
||||||
// resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
|
|
||||||
// when resolved address updates are received, and read in the goroutine
|
|
||||||
// handling fallback.
|
|
||||||
resolvedBackendAddrs []resolver.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker from
|
|
||||||
// it. The picker
|
|
||||||
// - always returns ErrTransientFailure if the balancer is in TransientFailure,
|
|
||||||
// - does two layer roundrobin pick otherwise.
|
|
||||||
// Caller must hold lb.mu.
|
|
||||||
func (lb *lbBalancer) regeneratePicker() {
|
|
||||||
if lb.state == connectivity.TransientFailure {
|
|
||||||
lb.picker = &errPicker{err: balancer.ErrTransientFailure}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var readySCs []balancer.SubConn
|
|
||||||
for _, a := range lb.backendAddrs {
|
|
||||||
if sc, ok := lb.subConns[a]; ok {
|
|
||||||
if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
|
|
||||||
readySCs = append(readySCs, sc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(lb.fullServerList) <= 0 {
|
|
||||||
if len(readySCs) <= 0 {
|
|
||||||
lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lb.picker = &rrPicker{subConns: readySCs}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lb.picker = &lbPicker{
|
|
||||||
serverList: lb.fullServerList,
|
|
||||||
subConns: readySCs,
|
|
||||||
stats: lb.clientStats,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
|
||||||
grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
|
|
||||||
lb.mu.Lock()
|
|
||||||
defer lb.mu.Unlock()
|
|
||||||
|
|
||||||
oldS, ok := lb.scStates[sc]
|
|
||||||
if !ok {
|
|
||||||
grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lb.scStates[sc] = s
|
|
||||||
switch s {
|
|
||||||
case connectivity.Idle:
|
|
||||||
sc.Connect()
|
|
||||||
case connectivity.Shutdown:
|
|
||||||
// When an address was removed by resolver, b called RemoveSubConn but
|
|
||||||
// kept the sc's state in scStates. Remove state for this sc here.
|
|
||||||
delete(lb.scStates, sc)
|
|
||||||
}
|
|
||||||
|
|
||||||
oldAggrState := lb.state
|
|
||||||
lb.state = lb.csEvltr.recordTransition(oldS, s)
|
|
||||||
|
|
||||||
// Regenerate picker when one of the following happens:
|
|
||||||
// - this sc became ready from not-ready
|
|
||||||
// - this sc became not-ready from ready
|
|
||||||
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
|
||||||
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
|
||||||
if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
|
|
||||||
(lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
|
|
||||||
lb.regeneratePicker()
|
|
||||||
}
|
|
||||||
|
|
||||||
lb.cc.UpdateBalancerState(lb.state, lb.picker)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
|
|
||||||
// resolved backends (backends received from resolver, not from remote balancer)
|
|
||||||
// if no connection to remote balancers was successful.
|
|
||||||
func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
|
|
||||||
timer := time.NewTimer(fallbackTimeout)
|
|
||||||
defer timer.Stop()
|
|
||||||
select {
|
|
||||||
case <-timer.C:
|
|
||||||
case <-lb.doneCh:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lb.mu.Lock()
|
|
||||||
if lb.serverListReceived {
|
|
||||||
lb.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lb.fallbackTimerExpired = true
|
|
||||||
lb.refreshSubConns(lb.resolvedBackendAddrs)
|
|
||||||
lb.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
|
|
||||||
// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
|
|
||||||
// connections.
|
|
||||||
func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
|
||||||
grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
|
|
||||||
if len(addrs) <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var remoteBalancerAddrs, backendAddrs []resolver.Address
|
|
||||||
for _, a := range addrs {
|
|
||||||
if a.Type == resolver.GRPCLB {
|
|
||||||
remoteBalancerAddrs = append(remoteBalancerAddrs, a)
|
|
||||||
} else {
|
|
||||||
backendAddrs = append(backendAddrs, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if lb.ccRemoteLB == nil {
|
|
||||||
if len(remoteBalancerAddrs) <= 0 {
|
|
||||||
grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// First time receiving resolved addresses, create a cc to remote
|
|
||||||
// balancers.
|
|
||||||
lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
|
|
||||||
// Start the fallback goroutine.
|
|
||||||
go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cc to remote balancers uses lb.manualResolver. Send the updated remote
|
|
||||||
// balancer addresses to it through manualResolver.
|
|
||||||
lb.manualResolver.NewAddress(remoteBalancerAddrs)
|
|
||||||
|
|
||||||
lb.mu.Lock()
|
|
||||||
lb.resolvedBackendAddrs = backendAddrs
|
|
||||||
// If serverListReceived is true, connection to remote balancer was
|
|
||||||
// successful and there's no need to do fallback anymore.
|
|
||||||
// If fallbackTimerExpired is false, fallback hasn't happened yet.
|
|
||||||
if !lb.serverListReceived && lb.fallbackTimerExpired {
|
|
||||||
// This means we received a new list of resolved backends, and we are
|
|
||||||
// still in fallback mode. Need to update the list of backends we are
|
|
||||||
// using to the new list of backends.
|
|
||||||
lb.refreshSubConns(lb.resolvedBackendAddrs)
|
|
||||||
}
|
|
||||||
lb.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *lbBalancer) Close() {
|
|
||||||
select {
|
|
||||||
case <-lb.doneCh:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
close(lb.doneCh)
|
|
||||||
if lb.ccRemoteLB != nil {
|
|
||||||
lb.ccRemoteLB.Close()
|
|
||||||
}
|
|
||||||
lb.cc.close()
|
|
||||||
}
|
|
|
@ -1,799 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: grpc_lb_v1/messages/messages.proto
|
|
||||||
|
|
||||||
package messages // import "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import fmt "fmt"
|
|
||||||
import math "math"
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
type Duration struct {
|
|
||||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
|
||||||
// to +315,576,000,000 inclusive.
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
|
||||||
// Signed fractions of a second at nanosecond resolution of the span
|
|
||||||
// of time. Durations less than one second are represented with a 0
|
|
||||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
|
||||||
// of one second or more, a non-zero value for the `nanos` field must be
|
|
||||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
|
||||||
// to +999,999,999 inclusive.
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Duration) Reset() { *m = Duration{} }
|
|
||||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Duration) ProtoMessage() {}
|
|
||||||
func (*Duration) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_messages_b81c731f0e83edbd, []int{0}
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Duration.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *Duration) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Duration.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Duration.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Duration) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Duration.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Duration) GetSeconds() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Seconds
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Duration) GetNanos() int32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Nanos
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type Timestamp struct {
|
|
||||||
// Represents seconds of UTC time since Unix epoch
|
|
||||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
|
||||||
// 9999-12-31T23:59:59Z inclusive.
|
|
||||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
|
||||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
|
||||||
// second values with fractions must still have non-negative nanos values
|
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
|
||||||
// inclusive.
|
|
||||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
|
||||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Timestamp) ProtoMessage() {}
|
|
||||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_messages_b81c731f0e83edbd, []int{1}
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *Timestamp) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Timestamp.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Timestamp.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Timestamp) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Timestamp) GetSeconds() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Seconds
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Timestamp) GetNanos() int32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Nanos
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type LoadBalanceRequest struct {
|
|
||||||
// Types that are valid to be assigned to LoadBalanceRequestType:
|
|
||||||
// *LoadBalanceRequest_InitialRequest
|
|
||||||
// *LoadBalanceRequest_ClientStats
|
|
||||||
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
|
|
||||||
func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*LoadBalanceRequest) ProtoMessage() {}
|
|
||||||
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_messages_b81c731f0e83edbd, []int{2}
|
|
||||||
}
|
|
||||||
func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *LoadBalanceRequest) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_LoadBalanceRequest.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *LoadBalanceRequest) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_LoadBalanceRequest.Size(m)
|
|
||||||
}
|
|
||||||
func (m *LoadBalanceRequest) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo
|
|
||||||
|
|
||||||
type isLoadBalanceRequest_LoadBalanceRequestType interface {
|
|
||||||
isLoadBalanceRequest_LoadBalanceRequestType()
|
|
||||||
}
|
|
||||||
|
|
||||||
type LoadBalanceRequest_InitialRequest struct {
|
|
||||||
InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
|
|
||||||
}
|
|
||||||
type LoadBalanceRequest_ClientStats struct {
|
|
||||||
ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
|
|
||||||
func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
|
|
||||||
|
|
||||||
func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
|
|
||||||
if m != nil {
|
|
||||||
return m.LoadBalanceRequestType
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
|
|
||||||
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
|
|
||||||
return x.InitialRequest
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LoadBalanceRequest) GetClientStats() *ClientStats {
|
|
||||||
if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
|
|
||||||
return x.ClientStats
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
|
||||||
func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
|
||||||
return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{
|
|
||||||
(*LoadBalanceRequest_InitialRequest)(nil),
|
|
||||||
(*LoadBalanceRequest_ClientStats)(nil),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
|
||||||
m := msg.(*LoadBalanceRequest)
|
|
||||||
// load_balance_request_type
|
|
||||||
switch x := m.LoadBalanceRequestType.(type) {
|
|
||||||
case *LoadBalanceRequest_InitialRequest:
|
|
||||||
b.EncodeVarint(1<<3 | proto.WireBytes)
|
|
||||||
if err := b.EncodeMessage(x.InitialRequest); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case *LoadBalanceRequest_ClientStats:
|
|
||||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
|
||||||
if err := b.EncodeMessage(x.ClientStats); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case nil:
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
|
||||||
m := msg.(*LoadBalanceRequest)
|
|
||||||
switch tag {
|
|
||||||
case 1: // load_balance_request_type.initial_request
|
|
||||||
if wire != proto.WireBytes {
|
|
||||||
return true, proto.ErrInternalBadWireType
|
|
||||||
}
|
|
||||||
msg := new(InitialLoadBalanceRequest)
|
|
||||||
err := b.DecodeMessage(msg)
|
|
||||||
m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg}
|
|
||||||
return true, err
|
|
||||||
case 2: // load_balance_request_type.client_stats
|
|
||||||
if wire != proto.WireBytes {
|
|
||||||
return true, proto.ErrInternalBadWireType
|
|
||||||
}
|
|
||||||
msg := new(ClientStats)
|
|
||||||
err := b.DecodeMessage(msg)
|
|
||||||
m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg}
|
|
||||||
return true, err
|
|
||||||
default:
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
|
|
||||||
m := msg.(*LoadBalanceRequest)
|
|
||||||
// load_balance_request_type
|
|
||||||
switch x := m.LoadBalanceRequestType.(type) {
|
|
||||||
case *LoadBalanceRequest_InitialRequest:
|
|
||||||
s := proto.Size(x.InitialRequest)
|
|
||||||
n += 1 // tag and wire
|
|
||||||
n += proto.SizeVarint(uint64(s))
|
|
||||||
n += s
|
|
||||||
case *LoadBalanceRequest_ClientStats:
|
|
||||||
s := proto.Size(x.ClientStats)
|
|
||||||
n += 1 // tag and wire
|
|
||||||
n += proto.SizeVarint(uint64(s))
|
|
||||||
n += s
|
|
||||||
case nil:
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
type InitialLoadBalanceRequest struct {
|
|
||||||
// Name of load balanced service (IE, balancer.service.com)
|
|
||||||
// length should be less than 256 bytes.
|
|
||||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
|
|
||||||
func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*InitialLoadBalanceRequest) ProtoMessage() {}
|
|
||||||
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_messages_b81c731f0e83edbd, []int{3}
|
|
||||||
}
|
|
||||||
func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_InitialLoadBalanceRequest.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *InitialLoadBalanceRequest) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_InitialLoadBalanceRequest.Size(m)
|
|
||||||
}
|
|
||||||
func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *InitialLoadBalanceRequest) GetName() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains client level statistics that are useful to load balancing. Each
|
|
||||||
// count except the timestamp should be reset to zero after reporting the stats.
|
|
||||||
type ClientStats struct {
|
|
||||||
// The timestamp of generating the report.
|
|
||||||
Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
|
|
||||||
// The total number of RPCs that started.
|
|
||||||
NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
|
|
||||||
// The total number of RPCs that finished.
|
|
||||||
NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
|
|
||||||
// The total number of RPCs that were dropped by the client because of rate
|
|
||||||
// limiting.
|
|
||||||
NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
|
|
||||||
// The total number of RPCs that were dropped by the client because of load
|
|
||||||
// balancing.
|
|
||||||
NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
|
|
||||||
// The total number of RPCs that failed to reach a server except dropped RPCs.
|
|
||||||
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
|
|
||||||
// The total number of RPCs that finished and are known to have been received
|
|
||||||
// by a server.
|
|
||||||
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ClientStats) Reset() { *m = ClientStats{} }
|
|
||||||
func (m *ClientStats) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ClientStats) ProtoMessage() {}
|
|
||||||
func (*ClientStats) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_messages_b81c731f0e83edbd, []int{4}
|
|
||||||
}
|
|
||||||
func (m *ClientStats) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ClientStats.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *ClientStats) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ClientStats.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *ClientStats) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ClientStats.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ClientStats) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ClientStats.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ClientStats proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ClientStats) GetTimestamp() *Timestamp {
|
|
||||||
if m != nil {
|
|
||||||
return m.Timestamp
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ClientStats) GetNumCallsStarted() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.NumCallsStarted
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ClientStats) GetNumCallsFinished() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.NumCallsFinished
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.NumCallsFinishedWithDropForRateLimiting
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.NumCallsFinishedWithDropForLoadBalancing
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.NumCallsFinishedWithClientFailedToSend
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.NumCallsFinishedKnownReceived
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type LoadBalanceResponse struct {
|
|
||||||
// Types that are valid to be assigned to LoadBalanceResponseType:
|
|
||||||
// *LoadBalanceResponse_InitialResponse
|
|
||||||
// *LoadBalanceResponse_ServerList
|
|
||||||
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
|
|
||||||
func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*LoadBalanceResponse) ProtoMessage() {}
|
|
||||||
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_messages_b81c731f0e83edbd, []int{5}
|
|
||||||
}
|
|
||||||
func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *LoadBalanceResponse) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_LoadBalanceResponse.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *LoadBalanceResponse) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_LoadBalanceResponse.Size(m)
|
|
||||||
}
|
|
||||||
func (m *LoadBalanceResponse) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo
|
|
||||||
|
|
||||||
type isLoadBalanceResponse_LoadBalanceResponseType interface {
|
|
||||||
isLoadBalanceResponse_LoadBalanceResponseType()
|
|
||||||
}
|
|
||||||
|
|
||||||
type LoadBalanceResponse_InitialResponse struct {
|
|
||||||
InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
|
|
||||||
}
|
|
||||||
type LoadBalanceResponse_ServerList struct {
|
|
||||||
ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
|
|
||||||
func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {}
|
|
||||||
|
|
||||||
func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
|
|
||||||
if m != nil {
|
|
||||||
return m.LoadBalanceResponseType
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
|
|
||||||
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
|
|
||||||
return x.InitialResponse
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LoadBalanceResponse) GetServerList() *ServerList {
|
|
||||||
if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
|
|
||||||
return x.ServerList
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
|
||||||
func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
|
||||||
return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{
|
|
||||||
(*LoadBalanceResponse_InitialResponse)(nil),
|
|
||||||
(*LoadBalanceResponse_ServerList)(nil),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
|
||||||
m := msg.(*LoadBalanceResponse)
|
|
||||||
// load_balance_response_type
|
|
||||||
switch x := m.LoadBalanceResponseType.(type) {
|
|
||||||
case *LoadBalanceResponse_InitialResponse:
|
|
||||||
b.EncodeVarint(1<<3 | proto.WireBytes)
|
|
||||||
if err := b.EncodeMessage(x.InitialResponse); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case *LoadBalanceResponse_ServerList:
|
|
||||||
b.EncodeVarint(2<<3 | proto.WireBytes)
|
|
||||||
if err := b.EncodeMessage(x.ServerList); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case nil:
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
|
||||||
m := msg.(*LoadBalanceResponse)
|
|
||||||
switch tag {
|
|
||||||
case 1: // load_balance_response_type.initial_response
|
|
||||||
if wire != proto.WireBytes {
|
|
||||||
return true, proto.ErrInternalBadWireType
|
|
||||||
}
|
|
||||||
msg := new(InitialLoadBalanceResponse)
|
|
||||||
err := b.DecodeMessage(msg)
|
|
||||||
m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg}
|
|
||||||
return true, err
|
|
||||||
case 2: // load_balance_response_type.server_list
|
|
||||||
if wire != proto.WireBytes {
|
|
||||||
return true, proto.ErrInternalBadWireType
|
|
||||||
}
|
|
||||||
msg := new(ServerList)
|
|
||||||
err := b.DecodeMessage(msg)
|
|
||||||
m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg}
|
|
||||||
return true, err
|
|
||||||
default:
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
|
|
||||||
m := msg.(*LoadBalanceResponse)
|
|
||||||
// load_balance_response_type
|
|
||||||
switch x := m.LoadBalanceResponseType.(type) {
|
|
||||||
case *LoadBalanceResponse_InitialResponse:
|
|
||||||
s := proto.Size(x.InitialResponse)
|
|
||||||
n += 1 // tag and wire
|
|
||||||
n += proto.SizeVarint(uint64(s))
|
|
||||||
n += s
|
|
||||||
case *LoadBalanceResponse_ServerList:
|
|
||||||
s := proto.Size(x.ServerList)
|
|
||||||
n += 1 // tag and wire
|
|
||||||
n += proto.SizeVarint(uint64(s))
|
|
||||||
n += s
|
|
||||||
case nil:
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
type InitialLoadBalanceResponse struct {
|
|
||||||
// This is an application layer redirect that indicates the client should use
|
|
||||||
// the specified server for load balancing. When this field is non-empty in
|
|
||||||
// the response, the client should open a separate connection to the
|
|
||||||
// load_balancer_delegate and call the BalanceLoad method. Its length should
|
|
||||||
// be less than 64 bytes.
|
|
||||||
LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
|
|
||||||
// This interval defines how often the client should send the client stats
|
|
||||||
// to the load balancer. Stats should only be reported when the duration is
|
|
||||||
// positive.
|
|
||||||
ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
|
|
||||||
func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*InitialLoadBalanceResponse) ProtoMessage() {}
|
|
||||||
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_messages_b81c731f0e83edbd, []int{6}
|
|
||||||
}
|
|
||||||
func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_InitialLoadBalanceResponse.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *InitialLoadBalanceResponse) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_InitialLoadBalanceResponse.Size(m)
|
|
||||||
}
|
|
||||||
func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.LoadBalancerDelegate
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
|
|
||||||
if m != nil {
|
|
||||||
return m.ClientStatsReportInterval
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ServerList struct {
|
|
||||||
// Contains a list of servers selected by the load balancer. The list will
|
|
||||||
// be updated when server resolutions change or as needed to balance load
|
|
||||||
// across more servers. The client should consume the server list in order
|
|
||||||
// unless instructed otherwise via the client_config.
|
|
||||||
Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ServerList) Reset() { *m = ServerList{} }
|
|
||||||
func (m *ServerList) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ServerList) ProtoMessage() {}
|
|
||||||
func (*ServerList) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_messages_b81c731f0e83edbd, []int{7}
|
|
||||||
}
|
|
||||||
func (m *ServerList) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ServerList.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ServerList.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *ServerList) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ServerList.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *ServerList) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ServerList.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ServerList) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ServerList.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ServerList proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ServerList) GetServers() []*Server {
|
|
||||||
if m != nil {
|
|
||||||
return m.Servers
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains server information. When none of the [drop_for_*] fields are true,
|
|
||||||
// use the other fields. When drop_for_rate_limiting is true, ignore all other
|
|
||||||
// fields. Use drop_for_load_balancing only when it is true and
|
|
||||||
// drop_for_rate_limiting is false.
|
|
||||||
type Server struct {
|
|
||||||
// A resolved address for the server, serialized in network-byte-order. It may
|
|
||||||
// either be an IPv4 or IPv6 address.
|
|
||||||
IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
|
|
||||||
// A resolved port number for the server.
|
|
||||||
Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
|
|
||||||
// An opaque but printable token given to the frontend for each pick. All
|
|
||||||
// frontend requests for that pick must include the token in its initial
|
|
||||||
// metadata. The token is used by the backend to verify the request and to
|
|
||||||
// allow the backend to report load to the gRPC LB system.
|
|
||||||
//
|
|
||||||
// Its length is variable but less than 50 bytes.
|
|
||||||
LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
|
|
||||||
// Indicates whether this particular request should be dropped by the client
|
|
||||||
// for rate limiting.
|
|
||||||
DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
|
|
||||||
// Indicates whether this particular request should be dropped by the client
|
|
||||||
// for load balancing.
|
|
||||||
DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) Reset() { *m = Server{} }
|
|
||||||
func (m *Server) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Server) ProtoMessage() {}
|
|
||||||
func (*Server) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_messages_b81c731f0e83edbd, []int{8}
|
|
||||||
}
|
|
||||||
func (m *Server) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Server.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Server.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *Server) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Server.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *Server) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Server.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Server) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Server.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Server proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Server) GetIpAddress() []byte {
|
|
||||||
if m != nil {
|
|
||||||
return m.IpAddress
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) GetPort() int32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Port
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) GetLoadBalanceToken() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.LoadBalanceToken
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) GetDropForRateLimiting() bool {
|
|
||||||
if m != nil {
|
|
||||||
return m.DropForRateLimiting
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Server) GetDropForLoadBalancing() bool {
|
|
||||||
if m != nil {
|
|
||||||
return m.DropForLoadBalancing
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
|
|
||||||
proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
|
|
||||||
proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
|
|
||||||
proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
|
|
||||||
proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
|
|
||||||
proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
|
|
||||||
proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
|
|
||||||
proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList")
|
|
||||||
proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor_messages_b81c731f0e83edbd)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_messages_b81c731f0e83edbd = []byte{
|
|
||||||
// 731 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39,
|
|
||||||
0x14, 0x26, 0x9b, 0x00, 0xc9, 0x09, 0x5a, 0xb2, 0x26, 0x0b, 0x81, 0x05, 0x89, 0x1d, 0x69, 0xd9,
|
|
||||||
0x68, 0xc5, 0x4e, 0x04, 0xd9, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43,
|
|
||||||
0x55, 0xa9, 0x52, 0x65, 0x39, 0x19, 0x33, 0x58, 0x38, 0xf6, 0xd4, 0x76, 0x82, 0xfa, 0x08, 0x7d,
|
|
||||||
0x94, 0x3e, 0x46, 0xd5, 0x67, 0xe8, 0xfb, 0x54, 0xe3, 0x99, 0xc9, 0x0c, 0x10, 0x40, 0xbd, 0x89,
|
|
||||||
0xec, 0xe3, 0xef, 0x7c, 0xdf, 0xf1, 0x89, 0xbf, 0x33, 0xe0, 0x85, 0x3a, 0x1a, 0x11, 0x31, 0x24,
|
|
||||||
0xd3, 0x83, 0xce, 0x98, 0x19, 0x43, 0x43, 0x66, 0x66, 0x0b, 0x3f, 0xd2, 0xca, 0x2a, 0x04, 0x31,
|
|
||||||
0xc6, 0x17, 0x43, 0x7f, 0x7a, 0xe0, 0x3d, 0x85, 0xea, 0xf1, 0x44, 0x53, 0xcb, 0x95, 0x44, 0x2d,
|
|
||||||
0x58, 0x36, 0x6c, 0xa4, 0x64, 0x60, 0x5a, 0xa5, 0xdd, 0x52, 0xbb, 0x8c, 0xb3, 0x2d, 0x6a, 0xc2,
|
|
||||||
0xa2, 0xa4, 0x52, 0x99, 0xd6, 0x2f, 0xbb, 0xa5, 0xf6, 0x22, 0x4e, 0x36, 0xde, 0x33, 0xa8, 0x9d,
|
|
||||||
0xf3, 0x31, 0x33, 0x96, 0x8e, 0xa3, 0x9f, 0x4e, 0xfe, 0x5a, 0x02, 0x74, 0xa6, 0x68, 0xd0, 0xa3,
|
|
||||||
0x82, 0xca, 0x11, 0xc3, 0xec, 0xe3, 0x84, 0x19, 0x8b, 0xde, 0xc0, 0x2a, 0x97, 0xdc, 0x72, 0x2a,
|
|
||||||
0x88, 0x4e, 0x42, 0x8e, 0xae, 0x7e, 0xf8, 0x97, 0x9f, 0x57, 0xed, 0x9f, 0x26, 0x90, 0xbb, 0xf9,
|
|
||||||
0xfd, 0x05, 0xfc, 0x6b, 0x9a, 0x9f, 0x31, 0x3e, 0x87, 0x95, 0x91, 0xe0, 0x4c, 0x5a, 0x62, 0x2c,
|
|
||||||
0xb5, 0x49, 0x15, 0xf5, 0xc3, 0x8d, 0x22, 0xdd, 0x91, 0x3b, 0x1f, 0xc4, 0xc7, 0xfd, 0x05, 0x5c,
|
|
||||||
0x1f, 0xe5, 0xdb, 0xde, 0x1f, 0xb0, 0x29, 0x14, 0x0d, 0xc8, 0x30, 0x91, 0xc9, 0x8a, 0x22, 0xf6,
|
|
||||||
0x53, 0xc4, 0xbc, 0x0e, 0x6c, 0xde, 0x5b, 0x09, 0x42, 0x50, 0x91, 0x74, 0xcc, 0x5c, 0xf9, 0x35,
|
|
||||||
0xec, 0xd6, 0xde, 0xe7, 0x0a, 0xd4, 0x0b, 0x62, 0xa8, 0x0b, 0x35, 0x9b, 0x75, 0x30, 0xbd, 0xe7,
|
|
||||||
0xef, 0xc5, 0xc2, 0x66, 0xed, 0xc5, 0x39, 0x0e, 0xfd, 0x03, 0xbf, 0xc9, 0xc9, 0x98, 0x8c, 0xa8,
|
|
||||||
0x10, 0x26, 0xbe, 0x93, 0xb6, 0x2c, 0x70, 0xb7, 0x2a, 0xe3, 0x55, 0x39, 0x19, 0x1f, 0xc5, 0xf1,
|
|
||||||
0x41, 0x12, 0x46, 0xfb, 0x80, 0x72, 0xec, 0x05, 0x97, 0xdc, 0x5c, 0xb2, 0xa0, 0x55, 0x76, 0xe0,
|
|
||||||
0x46, 0x06, 0x3e, 0x49, 0xe3, 0x88, 0x80, 0x7f, 0x17, 0x4d, 0xae, 0xb9, 0xbd, 0x24, 0x81, 0x56,
|
|
||||||
0x11, 0xb9, 0x50, 0x9a, 0x68, 0x6a, 0x19, 0x11, 0x7c, 0xcc, 0x2d, 0x97, 0x61, 0xab, 0xe2, 0x98,
|
|
||||||
0xfe, 0xbe, 0xcd, 0xf4, 0x8e, 0xdb, 0xcb, 0x63, 0xad, 0xa2, 0x13, 0xa5, 0x31, 0xb5, 0xec, 0x2c,
|
|
||||||
0x85, 0x23, 0x0a, 0x9d, 0x47, 0x05, 0x0a, 0xed, 0x8e, 0x15, 0x16, 0x9d, 0x42, 0xfb, 0x01, 0x85,
|
|
||||||
0xbc, 0xf7, 0xb1, 0xc4, 0x07, 0xf8, 0xf7, 0x3e, 0x89, 0xf4, 0x19, 0x5c, 0x50, 0x2e, 0x58, 0x40,
|
|
||||||
0xac, 0x22, 0x86, 0xc9, 0xa0, 0xb5, 0xe4, 0x04, 0xf6, 0xe6, 0x09, 0x24, 0x7f, 0xd5, 0x89, 0xc3,
|
|
||||||
0x9f, 0xab, 0x01, 0x93, 0x01, 0xea, 0xc3, 0x9f, 0x73, 0xe8, 0xaf, 0xa4, 0xba, 0x96, 0x44, 0xb3,
|
|
||||||
0x11, 0xe3, 0x53, 0x16, 0xb4, 0x96, 0x1d, 0xe5, 0xce, 0x6d, 0xca, 0xd7, 0x31, 0x0a, 0xa7, 0x20,
|
|
||||||
0xef, 0x5b, 0x09, 0xd6, 0x6e, 0x3c, 0x1b, 0x13, 0x29, 0x69, 0x18, 0x1a, 0x40, 0x23, 0x77, 0x40,
|
|
||||||
0x12, 0x4b, 0x9f, 0xc6, 0xde, 0x63, 0x16, 0x48, 0xd0, 0xfd, 0x05, 0xbc, 0x3a, 0xf3, 0x40, 0x4a,
|
|
||||||
0xfa, 0x04, 0xea, 0x86, 0xe9, 0x29, 0xd3, 0x44, 0x70, 0x63, 0x53, 0x0f, 0xac, 0x17, 0xf9, 0x06,
|
|
||||||
0xee, 0xf8, 0x8c, 0x3b, 0x0f, 0x81, 0x99, 0xed, 0x7a, 0xdb, 0xb0, 0x75, 0xcb, 0x01, 0x09, 0x67,
|
|
||||||
0x62, 0x81, 0x2f, 0x25, 0xd8, 0xba, 0xbf, 0x14, 0xf4, 0x1f, 0xac, 0x17, 0x93, 0x35, 0x09, 0x98,
|
|
||||||
0x60, 0x21, 0xb5, 0x99, 0x2d, 0x9a, 0x22, 0x4f, 0xd2, 0xc7, 0xe9, 0x19, 0x7a, 0x0b, 0xdb, 0x45,
|
|
||||||
0xcb, 0x12, 0xcd, 0x22, 0xa5, 0x2d, 0xe1, 0xd2, 0x32, 0x3d, 0xa5, 0x22, 0x2d, 0xbf, 0x59, 0x2c,
|
|
||||||
0x3f, 0x1b, 0x62, 0x78, 0xb3, 0xe0, 0x5e, 0xec, 0xf2, 0x4e, 0xd3, 0x34, 0xef, 0x05, 0x40, 0x7e,
|
|
||||||
0x4b, 0xb4, 0x1f, 0x0f, 0xac, 0x78, 0x17, 0x0f, 0xac, 0x72, 0xbb, 0x7e, 0x88, 0xee, 0xb6, 0x03,
|
|
||||||
0x67, 0x90, 0x57, 0x95, 0x6a, 0xb9, 0x51, 0xf1, 0xbe, 0x97, 0x60, 0x29, 0x39, 0x41, 0x3b, 0x00,
|
|
||||||
0x3c, 0x22, 0x34, 0x08, 0x34, 0x33, 0xc9, 0xc8, 0x5b, 0xc1, 0x35, 0x1e, 0xbd, 0x4c, 0x02, 0xb1,
|
|
||||||
0xfb, 0x63, 0xed, 0x74, 0xe6, 0xb9, 0x75, 0x6c, 0xc6, 0x1b, 0x9d, 0xb4, 0xea, 0x8a, 0x49, 0x67,
|
|
||||||
0xc6, 0x1a, 0x6e, 0x14, 0x1a, 0x71, 0x1e, 0xc7, 0x51, 0x17, 0xd6, 0x1f, 0x30, 0x5d, 0x15, 0xaf,
|
|
||||||
0x05, 0x73, 0x0c, 0xf6, 0x3f, 0x6c, 0x3c, 0x64, 0xa4, 0x2a, 0x6e, 0x06, 0x73, 0x4c, 0xd3, 0xeb,
|
|
||||||
0xbe, 0x3f, 0x08, 0x95, 0x0a, 0x05, 0xf3, 0x43, 0x25, 0xa8, 0x0c, 0x7d, 0xa5, 0xc3, 0x4e, 0xdc,
|
|
||||||
0x0d, 0xf7, 0x23, 0x86, 0x9d, 0x39, 0x5f, 0x95, 0xe1, 0x92, 0xfb, 0x9a, 0x74, 0x7f, 0x04, 0x00,
|
|
||||||
0x00, 0xff, 0xff, 0x8e, 0xd0, 0x70, 0xb7, 0x73, 0x06, 0x00, 0x00,
|
|
||||||
}
|
|
|
@ -1,155 +0,0 @@
|
||||||
// Copyright 2016 gRPC authors.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package grpc.lb.v1;
|
|
||||||
option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages";
|
|
||||||
|
|
||||||
message Duration {
|
|
||||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
|
||||||
// to +315,576,000,000 inclusive.
|
|
||||||
int64 seconds = 1;
|
|
||||||
|
|
||||||
// Signed fractions of a second at nanosecond resolution of the span
|
|
||||||
// of time. Durations less than one second are represented with a 0
|
|
||||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
|
||||||
// of one second or more, a non-zero value for the `nanos` field must be
|
|
||||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
|
||||||
// to +999,999,999 inclusive.
|
|
||||||
int32 nanos = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message Timestamp {
|
|
||||||
// Represents seconds of UTC time since Unix epoch
|
|
||||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
|
||||||
// 9999-12-31T23:59:59Z inclusive.
|
|
||||||
int64 seconds = 1;
|
|
||||||
|
|
||||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
|
||||||
// second values with fractions must still have non-negative nanos values
|
|
||||||
// that count forward in time. Must be from 0 to 999,999,999
|
|
||||||
// inclusive.
|
|
||||||
int32 nanos = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message LoadBalanceRequest {
|
|
||||||
oneof load_balance_request_type {
|
|
||||||
// This message should be sent on the first request to the load balancer.
|
|
||||||
InitialLoadBalanceRequest initial_request = 1;
|
|
||||||
|
|
||||||
// The client stats should be periodically reported to the load balancer
|
|
||||||
// based on the duration defined in the InitialLoadBalanceResponse.
|
|
||||||
ClientStats client_stats = 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message InitialLoadBalanceRequest {
|
|
||||||
// Name of load balanced service (IE, balancer.service.com)
|
|
||||||
// length should be less than 256 bytes.
|
|
||||||
string name = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains client level statistics that are useful to load balancing. Each
|
|
||||||
// count except the timestamp should be reset to zero after reporting the stats.
|
|
||||||
message ClientStats {
|
|
||||||
// The timestamp of generating the report.
|
|
||||||
Timestamp timestamp = 1;
|
|
||||||
|
|
||||||
// The total number of RPCs that started.
|
|
||||||
int64 num_calls_started = 2;
|
|
||||||
|
|
||||||
// The total number of RPCs that finished.
|
|
||||||
int64 num_calls_finished = 3;
|
|
||||||
|
|
||||||
// The total number of RPCs that were dropped by the client because of rate
|
|
||||||
// limiting.
|
|
||||||
int64 num_calls_finished_with_drop_for_rate_limiting = 4;
|
|
||||||
|
|
||||||
// The total number of RPCs that were dropped by the client because of load
|
|
||||||
// balancing.
|
|
||||||
int64 num_calls_finished_with_drop_for_load_balancing = 5;
|
|
||||||
|
|
||||||
// The total number of RPCs that failed to reach a server except dropped RPCs.
|
|
||||||
int64 num_calls_finished_with_client_failed_to_send = 6;
|
|
||||||
|
|
||||||
// The total number of RPCs that finished and are known to have been received
|
|
||||||
// by a server.
|
|
||||||
int64 num_calls_finished_known_received = 7;
|
|
||||||
}
|
|
||||||
|
|
||||||
message LoadBalanceResponse {
|
|
||||||
oneof load_balance_response_type {
|
|
||||||
// This message should be sent on the first response to the client.
|
|
||||||
InitialLoadBalanceResponse initial_response = 1;
|
|
||||||
|
|
||||||
// Contains the list of servers selected by the load balancer. The client
|
|
||||||
// should send requests to these servers in the specified order.
|
|
||||||
ServerList server_list = 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message InitialLoadBalanceResponse {
|
|
||||||
// This is an application layer redirect that indicates the client should use
|
|
||||||
// the specified server for load balancing. When this field is non-empty in
|
|
||||||
// the response, the client should open a separate connection to the
|
|
||||||
// load_balancer_delegate and call the BalanceLoad method. Its length should
|
|
||||||
// be less than 64 bytes.
|
|
||||||
string load_balancer_delegate = 1;
|
|
||||||
|
|
||||||
// This interval defines how often the client should send the client stats
|
|
||||||
// to the load balancer. Stats should only be reported when the duration is
|
|
||||||
// positive.
|
|
||||||
Duration client_stats_report_interval = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ServerList {
|
|
||||||
// Contains a list of servers selected by the load balancer. The list will
|
|
||||||
// be updated when server resolutions change or as needed to balance load
|
|
||||||
// across more servers. The client should consume the server list in order
|
|
||||||
// unless instructed otherwise via the client_config.
|
|
||||||
repeated Server servers = 1;
|
|
||||||
|
|
||||||
// Was google.protobuf.Duration expiration_interval.
|
|
||||||
reserved 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains server information. When none of the [drop_for_*] fields are true,
|
|
||||||
// use the other fields. When drop_for_rate_limiting is true, ignore all other
|
|
||||||
// fields. Use drop_for_load_balancing only when it is true and
|
|
||||||
// drop_for_rate_limiting is false.
|
|
||||||
message Server {
|
|
||||||
// A resolved address for the server, serialized in network-byte-order. It may
|
|
||||||
// either be an IPv4 or IPv6 address.
|
|
||||||
bytes ip_address = 1;
|
|
||||||
|
|
||||||
// A resolved port number for the server.
|
|
||||||
int32 port = 2;
|
|
||||||
|
|
||||||
// An opaque but printable token given to the frontend for each pick. All
|
|
||||||
// frontend requests for that pick must include the token in its initial
|
|
||||||
// metadata. The token is used by the backend to verify the request and to
|
|
||||||
// allow the backend to report load to the gRPC LB system.
|
|
||||||
//
|
|
||||||
// Its length is variable but less than 50 bytes.
|
|
||||||
string load_balance_token = 3;
|
|
||||||
|
|
||||||
// Indicates whether this particular request should be dropped by the client
|
|
||||||
// for rate limiting.
|
|
||||||
bool drop_for_rate_limiting = 4;
|
|
||||||
|
|
||||||
// Indicates whether this particular request should be dropped by the client
|
|
||||||
// for load balancing.
|
|
||||||
bool drop_for_load_balancing = 5;
|
|
||||||
}
|
|
|
@ -1,159 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2017 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
type rpcStats struct {
|
|
||||||
NumCallsStarted int64
|
|
||||||
NumCallsFinished int64
|
|
||||||
NumCallsFinishedWithDropForRateLimiting int64
|
|
||||||
NumCallsFinishedWithDropForLoadBalancing int64
|
|
||||||
NumCallsFinishedWithClientFailedToSend int64
|
|
||||||
NumCallsFinishedKnownReceived int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats.
|
|
||||||
func (s *rpcStats) toClientStats() *lbpb.ClientStats {
|
|
||||||
stats := &lbpb.ClientStats{
|
|
||||||
NumCallsStarted: atomic.SwapInt64(&s.NumCallsStarted, 0),
|
|
||||||
NumCallsFinished: atomic.SwapInt64(&s.NumCallsFinished, 0),
|
|
||||||
NumCallsFinishedWithDropForRateLimiting: atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0),
|
|
||||||
NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0),
|
|
||||||
NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0),
|
|
||||||
NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0),
|
|
||||||
}
|
|
||||||
return stats
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *rpcStats) dropForRateLimiting() {
|
|
||||||
atomic.AddInt64(&s.NumCallsStarted, 1)
|
|
||||||
atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1)
|
|
||||||
atomic.AddInt64(&s.NumCallsFinished, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *rpcStats) dropForLoadBalancing() {
|
|
||||||
atomic.AddInt64(&s.NumCallsStarted, 1)
|
|
||||||
atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1)
|
|
||||||
atomic.AddInt64(&s.NumCallsFinished, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *rpcStats) failedToSend() {
|
|
||||||
atomic.AddInt64(&s.NumCallsStarted, 1)
|
|
||||||
atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1)
|
|
||||||
atomic.AddInt64(&s.NumCallsFinished, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *rpcStats) knownReceived() {
|
|
||||||
atomic.AddInt64(&s.NumCallsStarted, 1)
|
|
||||||
atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1)
|
|
||||||
atomic.AddInt64(&s.NumCallsFinished, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
type errPicker struct {
|
|
||||||
// Pick always returns this err.
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
|
||||||
return nil, nil, p.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// rrPicker does roundrobin on subConns. It's typically used when there's no
|
|
||||||
// response from remote balancer, and grpclb falls back to the resolved
|
|
||||||
// backends.
|
|
||||||
//
|
|
||||||
// It guaranteed that len(subConns) > 0.
|
|
||||||
type rrPicker struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
|
|
||||||
subConnsNext int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
sc := p.subConns[p.subConnsNext]
|
|
||||||
p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
|
|
||||||
return sc, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// lbPicker does two layers of picks:
|
|
||||||
//
|
|
||||||
// First layer: roundrobin on all servers in serverList, including drops and backends.
|
|
||||||
// - If it picks a drop, the RPC will fail as being dropped.
|
|
||||||
// - If it picks a backend, do a second layer pick to pick the real backend.
|
|
||||||
//
|
|
||||||
// Second layer: roundrobin on all READY backends.
|
|
||||||
//
|
|
||||||
// It's guaranteed that len(serverList) > 0.
|
|
||||||
type lbPicker struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
serverList []*lbpb.Server
|
|
||||||
serverListNext int
|
|
||||||
subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
|
|
||||||
subConnsNext int
|
|
||||||
|
|
||||||
stats *rpcStats
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
|
|
||||||
// Layer one roundrobin on serverList.
|
|
||||||
s := p.serverList[p.serverListNext]
|
|
||||||
p.serverListNext = (p.serverListNext + 1) % len(p.serverList)
|
|
||||||
|
|
||||||
// If it's a drop, return an error and fail the RPC.
|
|
||||||
if s.DropForRateLimiting {
|
|
||||||
p.stats.dropForRateLimiting()
|
|
||||||
return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
|
|
||||||
}
|
|
||||||
if s.DropForLoadBalancing {
|
|
||||||
p.stats.dropForLoadBalancing()
|
|
||||||
return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
|
|
||||||
}
|
|
||||||
|
|
||||||
// If not a drop but there's no ready subConns.
|
|
||||||
if len(p.subConns) <= 0 {
|
|
||||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the next ready subConn in the list, also collect rpc stats.
|
|
||||||
sc := p.subConns[p.subConnsNext]
|
|
||||||
p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
|
|
||||||
done := func(info balancer.DoneInfo) {
|
|
||||||
if !info.BytesSent {
|
|
||||||
p.stats.failedToSend()
|
|
||||||
} else if info.BytesReceived {
|
|
||||||
p.stats.knownReceived()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sc, done, nil
|
|
||||||
}
|
|
|
@ -1,266 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2017 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/channelz"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/connectivity"
|
|
||||||
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
)
|
|
||||||
|
|
||||||
// processServerList updates balaner's internal state, create/remove SubConns
|
|
||||||
// and regenerates picker using the received serverList.
|
|
||||||
func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
|
|
||||||
grpclog.Infof("lbBalancer: processing server list: %+v", l)
|
|
||||||
lb.mu.Lock()
|
|
||||||
defer lb.mu.Unlock()
|
|
||||||
|
|
||||||
// Set serverListReceived to true so fallback will not take effect if it has
|
|
||||||
// not hit timeout.
|
|
||||||
lb.serverListReceived = true
|
|
||||||
|
|
||||||
// If the new server list == old server list, do nothing.
|
|
||||||
if reflect.DeepEqual(lb.fullServerList, l.Servers) {
|
|
||||||
grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lb.fullServerList = l.Servers
|
|
||||||
|
|
||||||
var backendAddrs []resolver.Address
|
|
||||||
for _, s := range l.Servers {
|
|
||||||
if s.DropForLoadBalancing || s.DropForRateLimiting {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken)
|
|
||||||
ip := net.IP(s.IpAddress)
|
|
||||||
ipStr := ip.String()
|
|
||||||
if ip.To4() == nil {
|
|
||||||
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
|
|
||||||
// net.SplitHostPort() will return too many colons error.
|
|
||||||
ipStr = fmt.Sprintf("[%s]", ipStr)
|
|
||||||
}
|
|
||||||
addr := resolver.Address{
|
|
||||||
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
|
|
||||||
Metadata: &md,
|
|
||||||
}
|
|
||||||
|
|
||||||
backendAddrs = append(backendAddrs, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call refreshSubConns to create/remove SubConns.
|
|
||||||
lb.refreshSubConns(backendAddrs)
|
|
||||||
// Regenerate and update picker no matter if there's update on backends (if
|
|
||||||
// any SubConn will be newed/removed). Because since the full serverList was
|
|
||||||
// different, there might be updates in drops or pick weights(different
|
|
||||||
// number of duplicates). We need to update picker with the fulllist.
|
|
||||||
//
|
|
||||||
// Now with cache, even if SubConn was newed/removed, there might be no
|
|
||||||
// state changes.
|
|
||||||
lb.regeneratePicker()
|
|
||||||
lb.cc.UpdateBalancerState(lb.state, lb.picker)
|
|
||||||
}
|
|
||||||
|
|
||||||
// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
|
|
||||||
// indicating whether the backendAddrs are different from the cached
|
|
||||||
// backendAddrs (whether any SubConn was newed/removed).
|
|
||||||
// Caller must hold lb.mu.
|
|
||||||
func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
|
|
||||||
lb.backendAddrs = nil
|
|
||||||
var backendsUpdated bool
|
|
||||||
// addrsSet is the set converted from backendAddrs, it's used to quick
|
|
||||||
// lookup for an address.
|
|
||||||
addrsSet := make(map[resolver.Address]struct{})
|
|
||||||
// Create new SubConns.
|
|
||||||
for _, addr := range backendAddrs {
|
|
||||||
addrWithoutMD := addr
|
|
||||||
addrWithoutMD.Metadata = nil
|
|
||||||
addrsSet[addrWithoutMD] = struct{}{}
|
|
||||||
lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD)
|
|
||||||
|
|
||||||
if _, ok := lb.subConns[addrWithoutMD]; !ok {
|
|
||||||
backendsUpdated = true
|
|
||||||
|
|
||||||
// Use addrWithMD to create the SubConn.
|
|
||||||
sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
|
|
||||||
if _, ok := lb.scStates[sc]; !ok {
|
|
||||||
// Only set state of new sc to IDLE. The state could already be
|
|
||||||
// READY for cached SubConns.
|
|
||||||
lb.scStates[sc] = connectivity.Idle
|
|
||||||
}
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for a, sc := range lb.subConns {
|
|
||||||
// a was removed by resolver.
|
|
||||||
if _, ok := addrsSet[a]; !ok {
|
|
||||||
backendsUpdated = true
|
|
||||||
|
|
||||||
lb.cc.RemoveSubConn(sc)
|
|
||||||
delete(lb.subConns, a)
|
|
||||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
|
||||||
// The entry will be deleted in HandleSubConnStateChange.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return backendsUpdated
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error {
|
|
||||||
for {
|
|
||||||
reply, err := s.Recv()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("grpclb: failed to recv server list: %v", err)
|
|
||||||
}
|
|
||||||
if serverList := reply.GetServerList(); serverList != nil {
|
|
||||||
lb.processServerList(serverList)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) {
|
|
||||||
ticker := time.NewTicker(interval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
case <-s.Context().Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
stats := lb.clientStats.toClientStats()
|
|
||||||
t := time.Now()
|
|
||||||
stats.Timestamp = &lbpb.Timestamp{
|
|
||||||
Seconds: t.Unix(),
|
|
||||||
Nanos: int32(t.Nanosecond()),
|
|
||||||
}
|
|
||||||
if err := s.Send(&lbpb.LoadBalanceRequest{
|
|
||||||
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
|
|
||||||
ClientStats: stats,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *lbBalancer) callRemoteBalancer() error {
|
|
||||||
lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
stream, err := lbClient.BalanceLoad(ctx, FailFast(false))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// grpclb handshake on the stream.
|
|
||||||
initReq := &lbpb.LoadBalanceRequest{
|
|
||||||
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
|
|
||||||
InitialRequest: &lbpb.InitialLoadBalanceRequest{
|
|
||||||
Name: lb.target,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if err := stream.Send(initReq); err != nil {
|
|
||||||
return fmt.Errorf("grpclb: failed to send init request: %v", err)
|
|
||||||
}
|
|
||||||
reply, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("grpclb: failed to recv init response: %v", err)
|
|
||||||
}
|
|
||||||
initResp := reply.GetInitialResponse()
|
|
||||||
if initResp == nil {
|
|
||||||
return fmt.Errorf("grpclb: reply from remote balancer did not include initial response")
|
|
||||||
}
|
|
||||||
if initResp.LoadBalancerDelegate != "" {
|
|
||||||
return fmt.Errorf("grpclb: Delegation is not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
|
|
||||||
lb.sendLoadReport(stream, d)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return lb.readServerList(stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *lbBalancer) watchRemoteBalancer() {
|
|
||||||
for {
|
|
||||||
err := lb.callRemoteBalancer()
|
|
||||||
select {
|
|
||||||
case <-lb.doneCh:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
|
|
||||||
var dopts []DialOption
|
|
||||||
if creds := lb.opt.DialCreds; creds != nil {
|
|
||||||
if err := creds.OverrideServerName(remoteLBName); err == nil {
|
|
||||||
dopts = append(dopts, WithTransportCredentials(creds))
|
|
||||||
} else {
|
|
||||||
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err)
|
|
||||||
dopts = append(dopts, WithInsecure())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dopts = append(dopts, WithInsecure())
|
|
||||||
}
|
|
||||||
if lb.opt.Dialer != nil {
|
|
||||||
// WithDialer takes a different type of function, so we instead use a
|
|
||||||
// special DialOption here.
|
|
||||||
dopts = append(dopts, withContextDialer(lb.opt.Dialer))
|
|
||||||
}
|
|
||||||
// Explicitly set pickfirst as the balancer.
|
|
||||||
dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
|
|
||||||
dopts = append(dopts, withResolverBuilder(lb.manualResolver))
|
|
||||||
if channelz.IsOn() {
|
|
||||||
dopts = append(dopts, WithChannelzParentID(lb.opt.ChannelzParentID))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DialContext using manualResolver.Scheme, which is a random scheme generated
|
|
||||||
// when init grpclb. The target name is not important.
|
|
||||||
cc, err := DialContext(context.Background(), "grpclb:///grpclb.server", dopts...)
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Fatalf("failed to dial: %v", err)
|
|
||||||
}
|
|
||||||
lb.ccRemoteLB = cc
|
|
||||||
go lb.watchRemoteBalancer()
|
|
||||||
}
|
|
|
@ -1,214 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/connectivity"
|
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The parent ClientConn should re-resolve when grpclb loses connection to the
|
|
||||||
// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
|
|
||||||
// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
|
|
||||||
// ResolveNow, and eventually results in re-resolve happening in parent
|
|
||||||
// ClientConn's resolver (DNS for example).
|
|
||||||
//
|
|
||||||
// parent
|
|
||||||
// ClientConn
|
|
||||||
// +-----------------------------------------------------------------+
|
|
||||||
// | parent +---------------------------------+ |
|
|
||||||
// | DNS ClientConn | grpclb | |
|
|
||||||
// | resolver balancerWrapper | | |
|
|
||||||
// | + + | grpclb grpclb | |
|
|
||||||
// | | | | ManualResolver ClientConn | |
|
|
||||||
// | | | | + + | |
|
|
||||||
// | | | | | | Transient | |
|
|
||||||
// | | | | | | Failure | |
|
|
||||||
// | | | | | <--------- | | |
|
|
||||||
// | | | <--------------- | ResolveNow | | |
|
|
||||||
// | | <--------- | ResolveNow | | | | |
|
|
||||||
// | | ResolveNow | | | | | |
|
|
||||||
// | | | | | | | |
|
|
||||||
// | + + | + + | |
|
|
||||||
// | +---------------------------------+ |
|
|
||||||
// +-----------------------------------------------------------------+
|
|
||||||
|
|
||||||
// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
|
|
||||||
// resolver with a special ResolveNow() function.
|
|
||||||
//
|
|
||||||
// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
|
|
||||||
// so when grpclb client lose contact with remote balancers, the parent
|
|
||||||
// ClientConn's resolver will re-resolve.
|
|
||||||
type lbManualResolver struct {
|
|
||||||
scheme string
|
|
||||||
ccr resolver.ClientConn
|
|
||||||
|
|
||||||
ccb balancer.ClientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) {
|
|
||||||
r.ccr = cc
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lbManualResolver) Scheme() string {
|
|
||||||
return r.scheme
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveNow calls resolveNow on the parent ClientConn.
|
|
||||||
func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) {
|
|
||||||
r.ccb.ResolveNow(o)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close is a noop for Resolver.
|
|
||||||
func (*lbManualResolver) Close() {}
|
|
||||||
|
|
||||||
// NewAddress calls cc.NewAddress.
|
|
||||||
func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
|
|
||||||
r.ccr.NewAddress(addrs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServiceConfig calls cc.NewServiceConfig.
|
|
||||||
func (r *lbManualResolver) NewServiceConfig(sc string) {
|
|
||||||
r.ccr.NewServiceConfig(sc)
|
|
||||||
}
|
|
||||||
|
|
||||||
const subConnCacheTime = time.Second * 10
|
|
||||||
|
|
||||||
// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache.
|
|
||||||
// SubConns will be kept in cache for subConnCacheTime before being removed.
|
|
||||||
//
|
|
||||||
// Its new and remove methods are updated to do cache first.
|
|
||||||
type lbCacheClientConn struct {
|
|
||||||
cc balancer.ClientConn
|
|
||||||
timeout time.Duration
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
// subConnCache only keeps subConns that are being deleted.
|
|
||||||
subConnCache map[resolver.Address]*subConnCacheEntry
|
|
||||||
subConnToAddr map[balancer.SubConn]resolver.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
type subConnCacheEntry struct {
|
|
||||||
sc balancer.SubConn
|
|
||||||
|
|
||||||
cancel func()
|
|
||||||
abortDeleting bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn {
|
|
||||||
return &lbCacheClientConn{
|
|
||||||
cc: cc,
|
|
||||||
timeout: subConnCacheTime,
|
|
||||||
subConnCache: make(map[resolver.Address]*subConnCacheEntry),
|
|
||||||
subConnToAddr: make(map[balancer.SubConn]resolver.Address),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
|
||||||
if len(addrs) != 1 {
|
|
||||||
return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs))
|
|
||||||
}
|
|
||||||
addrWithoutMD := addrs[0]
|
|
||||||
addrWithoutMD.Metadata = nil
|
|
||||||
|
|
||||||
ccc.mu.Lock()
|
|
||||||
defer ccc.mu.Unlock()
|
|
||||||
if entry, ok := ccc.subConnCache[addrWithoutMD]; ok {
|
|
||||||
// If entry is in subConnCache, the SubConn was being deleted.
|
|
||||||
// cancel function will never be nil.
|
|
||||||
entry.cancel()
|
|
||||||
delete(ccc.subConnCache, addrWithoutMD)
|
|
||||||
return entry.sc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
scNew, err := ccc.cc.NewSubConn(addrs, opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ccc.subConnToAddr[scNew] = addrWithoutMD
|
|
||||||
return scNew, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) {
|
|
||||||
ccc.mu.Lock()
|
|
||||||
defer ccc.mu.Unlock()
|
|
||||||
addr, ok := ccc.subConnToAddr[sc]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if entry, ok := ccc.subConnCache[addr]; ok {
|
|
||||||
if entry.sc != sc {
|
|
||||||
// This could happen if NewSubConn was called multiple times for the
|
|
||||||
// same address, and those SubConns are all removed. We remove sc
|
|
||||||
// immediately here.
|
|
||||||
delete(ccc.subConnToAddr, sc)
|
|
||||||
ccc.cc.RemoveSubConn(sc)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
entry := &subConnCacheEntry{
|
|
||||||
sc: sc,
|
|
||||||
}
|
|
||||||
ccc.subConnCache[addr] = entry
|
|
||||||
|
|
||||||
timer := time.AfterFunc(ccc.timeout, func() {
|
|
||||||
ccc.mu.Lock()
|
|
||||||
if entry.abortDeleting {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ccc.cc.RemoveSubConn(sc)
|
|
||||||
delete(ccc.subConnToAddr, sc)
|
|
||||||
delete(ccc.subConnCache, addr)
|
|
||||||
ccc.mu.Unlock()
|
|
||||||
})
|
|
||||||
entry.cancel = func() {
|
|
||||||
if !timer.Stop() {
|
|
||||||
// If stop was not successful, the timer has fired (this can only
|
|
||||||
// happen in a race). But the deleting function is blocked on ccc.mu
|
|
||||||
// because the mutex was held by the caller of this function.
|
|
||||||
//
|
|
||||||
// Set abortDeleting to true to abort the deleting function. When
|
|
||||||
// the lock is released, the deleting function will acquire the
|
|
||||||
// lock, check the value of abortDeleting and return.
|
|
||||||
entry.abortDeleting = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ccc *lbCacheClientConn) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
|
||||||
ccc.cc.UpdateBalancerState(s, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ccc *lbCacheClientConn) close() {
|
|
||||||
ccc.mu.Lock()
|
|
||||||
// Only cancel all existing timers. There's no need to remove SubConns.
|
|
||||||
for _, entry := range ccc.subConnCache {
|
|
||||||
entry.cancel()
|
|
||||||
}
|
|
||||||
ccc.mu.Unlock()
|
|
||||||
}
|
|
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
// Package grpclog defines logging for grpc.
|
// Package grpclog defines logging for grpc.
|
||||||
//
|
//
|
||||||
// All logs in transport package only go to verbose level 2.
|
// All logs in transport and grpclb packages only go to verbose level 2.
|
||||||
// All logs in other packages in grpc are logged in spite of the verbosity level.
|
// All logs in other packages in grpc are logged in spite of the verbosity level.
|
||||||
//
|
//
|
||||||
// In the default logger,
|
// In the default logger,
|
||||||
|
|
|
@ -0,0 +1,107 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package health
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
"google.golang.org/grpc/internal"
|
||||||
|
"google.golang.org/grpc/internal/backoff"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxDelay = 120 * time.Second
|
||||||
|
|
||||||
|
var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay}
|
||||||
|
var backoffFunc = func(ctx context.Context, retries int) bool {
|
||||||
|
d := backoffStrategy.Backoff(retries)
|
||||||
|
timer := time.NewTimer(d)
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
return true
|
||||||
|
case <-ctx.Done():
|
||||||
|
timer.Stop()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
internal.HealthCheckFunc = clientHealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error {
|
||||||
|
tryCnt := 0
|
||||||
|
|
||||||
|
retryConnection:
|
||||||
|
for {
|
||||||
|
// Backs off if the connection has failed in some way without receiving a message in the previous retry.
|
||||||
|
if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
tryCnt++
|
||||||
|
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
rawS, err := newStream()
|
||||||
|
if err != nil {
|
||||||
|
continue retryConnection
|
||||||
|
}
|
||||||
|
|
||||||
|
s, ok := rawS.(grpc.ClientStream)
|
||||||
|
// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
|
||||||
|
if !ok {
|
||||||
|
reportHealth(true)
|
||||||
|
return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF {
|
||||||
|
// Stream should have been closed, so we can safely continue to create a new stream.
|
||||||
|
continue retryConnection
|
||||||
|
}
|
||||||
|
s.CloseSend()
|
||||||
|
|
||||||
|
resp := new(healthpb.HealthCheckResponse)
|
||||||
|
for {
|
||||||
|
err = s.RecvMsg(resp)
|
||||||
|
|
||||||
|
// Reports healthy for the LBing purposes if health check is not implemented in the server.
|
||||||
|
if status.Code(err) == codes.Unimplemented {
|
||||||
|
reportHealth(true)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
|
||||||
|
if err != nil {
|
||||||
|
reportHealth(false)
|
||||||
|
continue retryConnection
|
||||||
|
}
|
||||||
|
|
||||||
|
// As a message has been received, removes the need for backoff for the next retry by reseting the try count.
|
||||||
|
tryCnt = 0
|
||||||
|
reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: grpc_health_v1/health.proto
|
// source: grpc/health/v1/health.proto
|
||||||
|
|
||||||
package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
|
package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
|
||||||
|
@ -26,31 +26,34 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
type HealthCheckResponse_ServingStatus int32
|
type HealthCheckResponse_ServingStatus int32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
|
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
|
||||||
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
|
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
|
||||||
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
|
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
|
||||||
|
HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3
|
||||||
)
|
)
|
||||||
|
|
||||||
var HealthCheckResponse_ServingStatus_name = map[int32]string{
|
var HealthCheckResponse_ServingStatus_name = map[int32]string{
|
||||||
0: "UNKNOWN",
|
0: "UNKNOWN",
|
||||||
1: "SERVING",
|
1: "SERVING",
|
||||||
2: "NOT_SERVING",
|
2: "NOT_SERVING",
|
||||||
|
3: "SERVICE_UNKNOWN",
|
||||||
}
|
}
|
||||||
var HealthCheckResponse_ServingStatus_value = map[string]int32{
|
var HealthCheckResponse_ServingStatus_value = map[string]int32{
|
||||||
"UNKNOWN": 0,
|
"UNKNOWN": 0,
|
||||||
"SERVING": 1,
|
"SERVING": 1,
|
||||||
"NOT_SERVING": 2,
|
"NOT_SERVING": 2,
|
||||||
|
"SERVICE_UNKNOWN": 3,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x HealthCheckResponse_ServingStatus) String() string {
|
func (x HealthCheckResponse_ServingStatus) String() string {
|
||||||
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
|
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
|
||||||
}
|
}
|
||||||
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_health_8e5b8a3074428511, []int{1, 0}
|
return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
type HealthCheckRequest struct {
|
type HealthCheckRequest struct {
|
||||||
Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
|
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
@ -60,7 +63,7 @@ func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
|
||||||
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
|
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
|
||||||
func (*HealthCheckRequest) ProtoMessage() {}
|
func (*HealthCheckRequest) ProtoMessage() {}
|
||||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
|
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_health_8e5b8a3074428511, []int{0}
|
return fileDescriptor_health_6b1a06aa67f91efd, []int{0}
|
||||||
}
|
}
|
||||||
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
|
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
|
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
|
||||||
|
@ -88,7 +91,7 @@ func (m *HealthCheckRequest) GetService() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type HealthCheckResponse struct {
|
type HealthCheckResponse struct {
|
||||||
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
@ -98,7 +101,7 @@ func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
|
||||||
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
|
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
|
||||||
func (*HealthCheckResponse) ProtoMessage() {}
|
func (*HealthCheckResponse) ProtoMessage() {}
|
||||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
|
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_health_8e5b8a3074428511, []int{1}
|
return fileDescriptor_health_6b1a06aa67f91efd, []int{1}
|
||||||
}
|
}
|
||||||
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
|
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
|
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
|
||||||
|
@ -139,10 +142,29 @@ var _ grpc.ClientConn
|
||||||
// is compatible with the grpc package it is being compiled against.
|
// is compatible with the grpc package it is being compiled against.
|
||||||
const _ = grpc.SupportPackageIsVersion4
|
const _ = grpc.SupportPackageIsVersion4
|
||||||
|
|
||||||
// Client API for Health service
|
// HealthClient is the client API for Health service.
|
||||||
|
//
|
||||||
|
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||||
type HealthClient interface {
|
type HealthClient interface {
|
||||||
|
// If the requested service is unknown, the call will fail with status
|
||||||
|
// NOT_FOUND.
|
||||||
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
|
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
|
||||||
|
// Performs a watch for the serving status of the requested service.
|
||||||
|
// The server will immediately send back a message indicating the current
|
||||||
|
// serving status. It will then subsequently send a new message whenever
|
||||||
|
// the service's serving status changes.
|
||||||
|
//
|
||||||
|
// If the requested service is unknown when the call is received, the
|
||||||
|
// server will send a message setting the serving status to
|
||||||
|
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
|
||||||
|
// future point, the serving status of the service becomes known, the
|
||||||
|
// server will send a new message with the service's serving status.
|
||||||
|
//
|
||||||
|
// If the call terminates with status UNIMPLEMENTED, then clients
|
||||||
|
// should assume this method is not supported and should not retry the
|
||||||
|
// call. If the call terminates with any other status (including OK),
|
||||||
|
// clients should retry the call with appropriate exponential backoff.
|
||||||
|
Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type healthClient struct {
|
type healthClient struct {
|
||||||
|
@ -155,17 +177,66 @@ func NewHealthClient(cc *grpc.ClientConn) HealthClient {
|
||||||
|
|
||||||
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
|
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
|
||||||
out := new(HealthCheckResponse)
|
out := new(HealthCheckResponse)
|
||||||
err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...)
|
err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server API for Health service
|
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
|
||||||
|
stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
x := &healthWatchClient{stream}
|
||||||
|
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := x.ClientStream.CloseSend(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Health_WatchClient interface {
|
||||||
|
Recv() (*HealthCheckResponse, error)
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type healthWatchClient struct {
|
||||||
|
grpc.ClientStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
|
||||||
|
m := new(HealthCheckResponse)
|
||||||
|
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HealthServer is the server API for Health service.
|
||||||
type HealthServer interface {
|
type HealthServer interface {
|
||||||
|
// If the requested service is unknown, the call will fail with status
|
||||||
|
// NOT_FOUND.
|
||||||
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
|
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
|
||||||
|
// Performs a watch for the serving status of the requested service.
|
||||||
|
// The server will immediately send back a message indicating the current
|
||||||
|
// serving status. It will then subsequently send a new message whenever
|
||||||
|
// the service's serving status changes.
|
||||||
|
//
|
||||||
|
// If the requested service is unknown when the call is received, the
|
||||||
|
// server will send a message setting the serving status to
|
||||||
|
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
|
||||||
|
// future point, the serving status of the service becomes known, the
|
||||||
|
// server will send a new message with the service's serving status.
|
||||||
|
//
|
||||||
|
// If the call terminates with status UNIMPLEMENTED, then clients
|
||||||
|
// should assume this method is not supported and should not retry the
|
||||||
|
// call. If the call terminates with any other status (including OK),
|
||||||
|
// clients should retry the call with appropriate exponential backoff.
|
||||||
|
Watch(*HealthCheckRequest, Health_WatchServer) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
|
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
|
||||||
|
@ -190,6 +261,27 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf
|
||||||
return interceptor(ctx, in, info, handler)
|
return interceptor(ctx, in, info, handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||||
|
m := new(HealthCheckRequest)
|
||||||
|
if err := stream.RecvMsg(m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Health_WatchServer interface {
|
||||||
|
Send(*HealthCheckResponse) error
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type healthWatchServer struct {
|
||||||
|
grpc.ServerStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
|
||||||
|
return x.ServerStream.SendMsg(m)
|
||||||
|
}
|
||||||
|
|
||||||
var _Health_serviceDesc = grpc.ServiceDesc{
|
var _Health_serviceDesc = grpc.ServiceDesc{
|
||||||
ServiceName: "grpc.health.v1.Health",
|
ServiceName: "grpc.health.v1.Health",
|
||||||
HandlerType: (*HealthServer)(nil),
|
HandlerType: (*HealthServer)(nil),
|
||||||
|
@ -199,29 +291,37 @@ var _Health_serviceDesc = grpc.ServiceDesc{
|
||||||
Handler: _Health_Check_Handler,
|
Handler: _Health_Check_Handler,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc.StreamDesc{},
|
Streams: []grpc.StreamDesc{
|
||||||
Metadata: "grpc_health_v1/health.proto",
|
{
|
||||||
|
StreamName: "Watch",
|
||||||
|
Handler: _Health_Watch_Handler,
|
||||||
|
ServerStreams: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Metadata: "grpc/health/v1/health.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor_health_8e5b8a3074428511) }
|
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) }
|
||||||
|
|
||||||
var fileDescriptor_health_8e5b8a3074428511 = []byte{
|
var fileDescriptor_health_6b1a06aa67f91efd = []byte{
|
||||||
// 269 bytes of a gzipped FileDescriptorProto
|
// 297 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
||||||
0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a,
|
0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
|
||||||
0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21,
|
0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
|
||||||
0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48,
|
0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
|
||||||
0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33,
|
0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
|
||||||
0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55,
|
0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
|
||||||
0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f,
|
0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
|
||||||
0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41,
|
0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
|
||||||
0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3,
|
0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
|
||||||
0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85,
|
0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
|
||||||
0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b,
|
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
|
||||||
0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44,
|
0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
|
||||||
0xb8, 0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28,
|
0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
|
||||||
0x70, 0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12,
|
0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
|
||||||
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x41, 0x1a, 0xa0, 0x71, 0xa0, 0x8f, 0x1a, 0x33, 0xab,
|
0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
|
||||||
0x98, 0xf8, 0xdc, 0x41, 0xa6, 0x41, 0x8c, 0xd0, 0x0b, 0x33, 0x4c, 0x62, 0x03, 0x47, 0x92, 0x31,
|
0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
|
||||||
0x20, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x70, 0xc4, 0xa7, 0xc3, 0x01, 0x00, 0x00,
|
0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
|
||||||
|
0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
// Copyright 2015, gRPC Authors
|
|
||||||
// All rights reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// The canonical version of this proto can be found at
|
|
||||||
// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package grpc.health.v1;
|
|
||||||
|
|
||||||
option csharp_namespace = "Grpc.Health.V1";
|
|
||||||
option go_package = "google.golang.org/grpc/health/grpc_health_v1";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option java_outer_classname = "HealthProto";
|
|
||||||
option java_package = "io.grpc.health.v1";
|
|
||||||
|
|
||||||
message HealthCheckRequest {
|
|
||||||
string service = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message HealthCheckResponse {
|
|
||||||
enum ServingStatus {
|
|
||||||
UNKNOWN = 0;
|
|
||||||
SERVING = 1;
|
|
||||||
NOT_SERVING = 2;
|
|
||||||
}
|
|
||||||
ServingStatus status = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
service Health {
|
|
||||||
rpc Check(HealthCheckRequest) returns (HealthCheckResponse);
|
|
||||||
}
|
|
|
@ -1,72 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2017 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
//go:generate protoc --go_out=plugins=grpc,paths=source_relative:. grpc_health_v1/health.proto
|
|
||||||
|
|
||||||
// Package health provides some utility functions to health-check a server. The implementation
|
|
||||||
// is based on protobuf. Users need to write their own implementations if other IDLs are used.
|
|
||||||
package health
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Server implements `service Health`.
|
|
||||||
type Server struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
// statusMap stores the serving status of the services this Server monitors.
|
|
||||||
statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServer returns a new Server.
|
|
||||||
func NewServer() *Server {
|
|
||||||
return &Server{
|
|
||||||
statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check implements `service Health`.
|
|
||||||
func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
if in.Service == "" {
|
|
||||||
// check the server overall health status.
|
|
||||||
return &healthpb.HealthCheckResponse{
|
|
||||||
Status: healthpb.HealthCheckResponse_SERVING,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
if status, ok := s.statusMap[in.Service]; ok {
|
|
||||||
return &healthpb.HealthCheckResponse{
|
|
||||||
Status: status,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return nil, status.Error(codes.NotFound, "unknown service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetServingStatus is called when need to reset the serving status of a service
|
|
||||||
// or insert a new service entry into the statusMap.
|
|
||||||
func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.statusMap[service] = status
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
|
@ -0,0 +1,165 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2017 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
//go:generate ./regenerate.sh
|
||||||
|
|
||||||
|
// Package health provides a service that exposes server's health and it must be
|
||||||
|
// imported to enable support for client-side health checks.
|
||||||
|
package health
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server implements `service Health`.
|
||||||
|
type Server struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
// If shutdown is true, it's expected all serving status is NOT_SERVING, and
|
||||||
|
// will stay in NOT_SERVING.
|
||||||
|
shutdown bool
|
||||||
|
// statusMap stores the serving status of the services this Server monitors.
|
||||||
|
statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
|
||||||
|
updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServer returns a new Server.
|
||||||
|
func NewServer() *Server {
|
||||||
|
return &Server{
|
||||||
|
statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING},
|
||||||
|
updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check implements `service Health`.
|
||||||
|
func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if servingStatus, ok := s.statusMap[in.Service]; ok {
|
||||||
|
return &healthpb.HealthCheckResponse{
|
||||||
|
Status: servingStatus,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, status.Error(codes.NotFound, "unknown service")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch implements `service Health`.
|
||||||
|
func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error {
|
||||||
|
service := in.Service
|
||||||
|
// update channel is used for getting service status updates.
|
||||||
|
update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1)
|
||||||
|
s.mu.Lock()
|
||||||
|
// Puts the initial status to the channel.
|
||||||
|
if servingStatus, ok := s.statusMap[service]; ok {
|
||||||
|
update <- servingStatus
|
||||||
|
} else {
|
||||||
|
update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN
|
||||||
|
}
|
||||||
|
|
||||||
|
// Registers the update channel to the correct place in the updates map.
|
||||||
|
if _, ok := s.updates[service]; !ok {
|
||||||
|
s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus)
|
||||||
|
}
|
||||||
|
s.updates[service][stream] = update
|
||||||
|
defer func() {
|
||||||
|
s.mu.Lock()
|
||||||
|
delete(s.updates[service], stream)
|
||||||
|
s.mu.Unlock()
|
||||||
|
}()
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// Status updated. Sends the up-to-date status to the client.
|
||||||
|
case servingStatus := <-update:
|
||||||
|
if lastSentStatus == servingStatus {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lastSentStatus = servingStatus
|
||||||
|
err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus})
|
||||||
|
if err != nil {
|
||||||
|
return status.Error(codes.Canceled, "Stream has ended.")
|
||||||
|
}
|
||||||
|
// Context done. Removes the update channel from the updates map.
|
||||||
|
case <-stream.Context().Done():
|
||||||
|
return status.Error(codes.Canceled, "Stream has ended.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetServingStatus is called when need to reset the serving status of a service
|
||||||
|
// or insert a new service entry into the statusMap.
|
||||||
|
func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if s.shutdown {
|
||||||
|
grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.setServingStatusLocked(service, servingStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
|
||||||
|
s.statusMap[service] = servingStatus
|
||||||
|
for _, update := range s.updates[service] {
|
||||||
|
// Clears previous updates, that are not sent to the client, from the channel.
|
||||||
|
// This can happen if the client is not reading and the server gets flow control limited.
|
||||||
|
select {
|
||||||
|
case <-update:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
// Puts the most recent update to the channel.
|
||||||
|
update <- servingStatus
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown sets all serving status to NOT_SERVING, and configures the server to
|
||||||
|
// ignore all future status changes.
|
||||||
|
//
|
||||||
|
// This changes serving status for all services. To set status for a perticular
|
||||||
|
// services, call SetServingStatus().
|
||||||
|
func (s *Server) Shutdown() {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.shutdown = true
|
||||||
|
for service := range s.statusMap {
|
||||||
|
s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume sets all serving status to SERVING, and configures the server to
|
||||||
|
// accept all future status changes.
|
||||||
|
//
|
||||||
|
// This changes serving status for all services. To set status for a perticular
|
||||||
|
// services, call SetServingStatus().
|
||||||
|
func (s *Server) Resume() {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
s.shutdown = false
|
||||||
|
for service := range s.statusMap {
|
||||||
|
s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING)
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,7 +19,7 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
|
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2017 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package backoff implement the backoff strategy for gRPC.
|
||||||
|
//
|
||||||
|
// This is kept in internal until the gRPC project decides whether or not to
|
||||||
|
// allow alternative backoff strategies.
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Strategy defines the methodology for backing off after a grpc connection
|
||||||
|
// failure.
|
||||||
|
//
|
||||||
|
type Strategy interface {
|
||||||
|
// Backoff returns the amount of time to wait before the next retry given
|
||||||
|
// the number of consecutive failures.
|
||||||
|
Backoff(retries int) time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// baseDelay is the amount of time to wait before retrying after the first
|
||||||
|
// failure.
|
||||||
|
baseDelay = 1.0 * time.Second
|
||||||
|
// factor is applied to the backoff after each retry.
|
||||||
|
factor = 1.6
|
||||||
|
// jitter provides a range to randomize backoff delays.
|
||||||
|
jitter = 0.2
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exponential implements exponential backoff algorithm as defined in
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||||
|
type Exponential struct {
|
||||||
|
// MaxDelay is the upper bound of backoff delay.
|
||||||
|
MaxDelay time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backoff returns the amount of time to wait before the next retry given the
|
||||||
|
// number of retries.
|
||||||
|
func (bc Exponential) Backoff(retries int) time.Duration {
|
||||||
|
if retries == 0 {
|
||||||
|
return baseDelay
|
||||||
|
}
|
||||||
|
backoff, max := float64(baseDelay), float64(bc.MaxDelay)
|
||||||
|
for backoff < max && retries > 0 {
|
||||||
|
backoff *= factor
|
||||||
|
retries--
|
||||||
|
}
|
||||||
|
if backoff > max {
|
||||||
|
backoff = max
|
||||||
|
}
|
||||||
|
// Randomize backoff delays so that if a cluster of requests start at
|
||||||
|
// the same time, they won't operate in lockstep.
|
||||||
|
backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
|
||||||
|
if backoff < 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return time.Duration(backoff)
|
||||||
|
}
|
|
@ -0,0 +1,46 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2019 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package balancerload defines APIs to parse server loads in trailers. The
|
||||||
|
// parsed loads are sent to balancers in DoneInfo.
|
||||||
|
package balancerload
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parser converts loads from metadata into a concrete type.
|
||||||
|
type Parser interface {
|
||||||
|
// Parse parses loads from metadata.
|
||||||
|
Parse(md metadata.MD) interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var parser Parser
|
||||||
|
|
||||||
|
// SetParser sets the load parser.
|
||||||
|
//
|
||||||
|
// Not mutex-protected, should be called before any gRPC functions.
|
||||||
|
func SetParser(lr Parser) {
|
||||||
|
parser = lr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse calls parser.Read().
|
||||||
|
func Parse(md metadata.MD) interface{} {
|
||||||
|
if parser == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return parser.Parse(md)
|
||||||
|
}
|
|
@ -0,0 +1,167 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package binarylog implementation binary logging as defined in
|
||||||
|
// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
|
||||||
|
package binarylog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Logger is the global binary logger. It can be used to get binary logger for
|
||||||
|
// each method.
|
||||||
|
type Logger interface {
|
||||||
|
getMethodLogger(methodName string) *MethodLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
// binLogger is the global binary logger for the binary. One of this should be
|
||||||
|
// built at init time from the configuration (environment varialbe or flags).
|
||||||
|
//
|
||||||
|
// It is used to get a methodLogger for each individual method.
|
||||||
|
var binLogger Logger
|
||||||
|
|
||||||
|
// SetLogger sets the binarg logger.
|
||||||
|
//
|
||||||
|
// Only call this at init time.
|
||||||
|
func SetLogger(l Logger) {
|
||||||
|
binLogger = l
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMethodLogger returns the methodLogger for the given methodName.
|
||||||
|
//
|
||||||
|
// methodName should be in the format of "/service/method".
|
||||||
|
//
|
||||||
|
// Each methodLogger returned by this method is a new instance. This is to
|
||||||
|
// generate sequence id within the call.
|
||||||
|
func GetMethodLogger(methodName string) *MethodLogger {
|
||||||
|
if binLogger == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return binLogger.getMethodLogger(methodName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
const envStr = "GRPC_BINARY_LOG_FILTER"
|
||||||
|
configStr := os.Getenv(envStr)
|
||||||
|
binLogger = NewLoggerFromConfigString(configStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
type methodLoggerConfig struct {
|
||||||
|
// Max length of header and message.
|
||||||
|
hdr, msg uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type logger struct {
|
||||||
|
all *methodLoggerConfig
|
||||||
|
services map[string]*methodLoggerConfig
|
||||||
|
methods map[string]*methodLoggerConfig
|
||||||
|
|
||||||
|
blacklist map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
||||||
|
// using the set* functions.
|
||||||
|
func newEmptyLogger() *logger {
|
||||||
|
return &logger{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set method logger for "*".
|
||||||
|
func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
|
||||||
|
if l.all != nil {
|
||||||
|
return fmt.Errorf("conflicting global rules found")
|
||||||
|
}
|
||||||
|
l.all = ml
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set method logger for "service/*".
|
||||||
|
//
|
||||||
|
// New methodLogger with same service overrides the old one.
|
||||||
|
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
|
||||||
|
if _, ok := l.services[service]; ok {
|
||||||
|
return fmt.Errorf("conflicting rules for service %v found", service)
|
||||||
|
}
|
||||||
|
if l.services == nil {
|
||||||
|
l.services = make(map[string]*methodLoggerConfig)
|
||||||
|
}
|
||||||
|
l.services[service] = ml
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set method logger for "service/method".
|
||||||
|
//
|
||||||
|
// New methodLogger with same method overrides the old one.
|
||||||
|
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
|
||||||
|
if _, ok := l.blacklist[method]; ok {
|
||||||
|
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||||
|
}
|
||||||
|
if _, ok := l.methods[method]; ok {
|
||||||
|
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||||
|
}
|
||||||
|
if l.methods == nil {
|
||||||
|
l.methods = make(map[string]*methodLoggerConfig)
|
||||||
|
}
|
||||||
|
l.methods[method] = ml
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set blacklist method for "-service/method".
|
||||||
|
func (l *logger) setBlacklist(method string) error {
|
||||||
|
if _, ok := l.blacklist[method]; ok {
|
||||||
|
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||||
|
}
|
||||||
|
if _, ok := l.methods[method]; ok {
|
||||||
|
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||||
|
}
|
||||||
|
if l.blacklist == nil {
|
||||||
|
l.blacklist = make(map[string]struct{})
|
||||||
|
}
|
||||||
|
l.blacklist[method] = struct{}{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMethodLogger returns the methodLogger for the given methodName.
|
||||||
|
//
|
||||||
|
// methodName should be in the format of "/service/method".
|
||||||
|
//
|
||||||
|
// Each methodLogger returned by this method is a new instance. This is to
|
||||||
|
// generate sequence id within the call.
|
||||||
|
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
|
||||||
|
s, m, err := parseMethodName(methodName)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if ml, ok := l.methods[s+"/"+m]; ok {
|
||||||
|
return newMethodLogger(ml.hdr, ml.msg)
|
||||||
|
}
|
||||||
|
if _, ok := l.blacklist[s+"/"+m]; ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if ml, ok := l.services[s]; ok {
|
||||||
|
return newMethodLogger(ml.hdr, ml.msg)
|
||||||
|
}
|
||||||
|
if l.all == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return newMethodLogger(l.all.hdr, l.all.msg)
|
||||||
|
}
|
42
vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
generated
vendored
Normal file
42
vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This file contains exported variables/functions that are exported for testing
|
||||||
|
// only.
|
||||||
|
//
|
||||||
|
// An ideal way for this would be to put those in a *_test.go but in binarylog
|
||||||
|
// package. But this doesn't work with staticcheck with go module. Error was:
|
||||||
|
// "MdToMetadataProto not declared by package binarylog". This could be caused
|
||||||
|
// by the way staticcheck looks for files for a certain package, which doesn't
|
||||||
|
// support *_test.go files.
|
||||||
|
//
|
||||||
|
// Move those to binary_test.go when staticcheck is fixed.
|
||||||
|
|
||||||
|
package binarylog
|
||||||
|
|
||||||
|
var (
|
||||||
|
// AllLogger is a logger that logs all headers/messages for all RPCs. It's
|
||||||
|
// for testing only.
|
||||||
|
AllLogger = NewLoggerFromConfigString("*")
|
||||||
|
// MdToMetadataProto converts metadata to a binary logging proto message.
|
||||||
|
// It's for testing only.
|
||||||
|
MdToMetadataProto = mdToMetadataProto
|
||||||
|
// AddrToProto converts an address to a binary logging proto message. It's
|
||||||
|
// for testing only.
|
||||||
|
AddrToProto = addrToProto
|
||||||
|
)
|
|
@ -0,0 +1,210 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package binarylog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewLoggerFromConfigString reads the string and build a logger. It can be used
|
||||||
|
// to build a new logger and assign it to binarylog.Logger.
|
||||||
|
//
|
||||||
|
// Example filter config strings:
|
||||||
|
// - "" Nothing will be logged
|
||||||
|
// - "*" All headers and messages will be fully logged.
|
||||||
|
// - "*{h}" Only headers will be logged.
|
||||||
|
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||||
|
// - "Foo/*" Logs every method in service Foo
|
||||||
|
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||||
|
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||||
|
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||||
|
// Foo.
|
||||||
|
//
|
||||||
|
// If two configs exist for one certain method or service, the one specified
|
||||||
|
// later overrides the privous config.
|
||||||
|
func NewLoggerFromConfigString(s string) Logger {
|
||||||
|
if s == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
l := newEmptyLogger()
|
||||||
|
methods := strings.Split(s, ",")
|
||||||
|
for _, method := range methods {
|
||||||
|
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
|
||||||
|
grpclog.Warningf("failed to parse binary log config: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
|
||||||
|
// it to the right map in the logger.
|
||||||
|
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||||
|
// "" is invalid.
|
||||||
|
if config == "" {
|
||||||
|
return errors.New("empty string is not a valid method binary logging config")
|
||||||
|
}
|
||||||
|
|
||||||
|
// "-service/method", blacklist, no * or {} allowed.
|
||||||
|
if config[0] == '-' {
|
||||||
|
s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||||
|
}
|
||||||
|
if m == "*" {
|
||||||
|
return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
|
||||||
|
}
|
||||||
|
if suffix != "" {
|
||||||
|
return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
|
||||||
|
}
|
||||||
|
if err := l.setBlacklist(s + "/" + m); err != nil {
|
||||||
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// "*{h:256;m:256}"
|
||||||
|
if config[0] == '*' {
|
||||||
|
hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||||
|
}
|
||||||
|
if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||||
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s, m, suffix, err := parseMethodConfigAndSuffix(config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||||
|
}
|
||||||
|
hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
||||||
|
}
|
||||||
|
if m == "*" {
|
||||||
|
if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||||
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||||
|
return fmt.Errorf("invalid config: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TODO: this const is only used by env_config now. But could be useful for
|
||||||
|
// other config. Move to binarylog.go if necessary.
|
||||||
|
maxUInt = ^uint64(0)
|
||||||
|
|
||||||
|
// For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
|
||||||
|
// expected output.
|
||||||
|
longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
|
||||||
|
|
||||||
|
// For suffix from above, "{h:123,m:123}". See test for expected output.
|
||||||
|
optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123".
|
||||||
|
headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$`
|
||||||
|
messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$`
|
||||||
|
headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr)
|
||||||
|
headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr)
|
||||||
|
messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr)
|
||||||
|
headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Turn "service/method{h;m}" into "service", "method", "{h;m}".
|
||||||
|
func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
|
||||||
|
// Regexp result:
|
||||||
|
//
|
||||||
|
// in: "p.s/m{h:123,m:123}",
|
||||||
|
// out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
|
||||||
|
match := longMethodConfigRegexp.FindStringSubmatch(c)
|
||||||
|
if match == nil {
|
||||||
|
return "", "", "", fmt.Errorf("%q contains invalid substring", c)
|
||||||
|
}
|
||||||
|
service = match[1]
|
||||||
|
method = match[2]
|
||||||
|
suffix = match[3]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Turn "{h:123;m:345}" into 123, 345.
|
||||||
|
//
|
||||||
|
// Return maxUInt if length is unspecified.
|
||||||
|
func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
|
||||||
|
if c == "" {
|
||||||
|
return maxUInt, maxUInt, nil
|
||||||
|
}
|
||||||
|
// Header config only.
|
||||||
|
if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||||
|
if s := match[1]; s != "" {
|
||||||
|
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||||
|
}
|
||||||
|
return hdrLenStr, 0, nil
|
||||||
|
}
|
||||||
|
return maxUInt, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message config only.
|
||||||
|
if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||||
|
if s := match[1]; s != "" {
|
||||||
|
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||||
|
}
|
||||||
|
return 0, msgLenStr, nil
|
||||||
|
}
|
||||||
|
return 0, maxUInt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header and message config both.
|
||||||
|
if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||||
|
// Both hdr and msg are specified, but one or two of them might be empty.
|
||||||
|
hdrLenStr = maxUInt
|
||||||
|
msgLenStr = maxUInt
|
||||||
|
if s := match[1]; s != "" {
|
||||||
|
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s := match[2]; s != "" {
|
||||||
|
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hdrLenStr, msgLenStr, nil
|
||||||
|
}
|
||||||
|
return 0, 0, fmt.Errorf("%q contains invalid substring", c)
|
||||||
|
}
|
423
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
Normal file
423
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
Normal file
|
@ -0,0 +1,423 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package binarylog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
type callIDGenerator struct {
|
||||||
|
id uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *callIDGenerator) next() uint64 {
|
||||||
|
id := atomic.AddUint64(&g.id, 1)
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset is for testing only, and doesn't need to be thread safe.
|
||||||
|
func (g *callIDGenerator) reset() {
|
||||||
|
g.id = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var idGen callIDGenerator
|
||||||
|
|
||||||
|
// MethodLogger is the sub-logger for each method.
|
||||||
|
type MethodLogger struct {
|
||||||
|
headerMaxLen, messageMaxLen uint64
|
||||||
|
|
||||||
|
callID uint64
|
||||||
|
idWithinCallGen *callIDGenerator
|
||||||
|
|
||||||
|
sink Sink // TODO(blog): make this plugable.
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMethodLogger(h, m uint64) *MethodLogger {
|
||||||
|
return &MethodLogger{
|
||||||
|
headerMaxLen: h,
|
||||||
|
messageMaxLen: m,
|
||||||
|
|
||||||
|
callID: idGen.next(),
|
||||||
|
idWithinCallGen: &callIDGenerator{},
|
||||||
|
|
||||||
|
sink: defaultSink, // TODO(blog): make it plugable.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log creates a proto binary log entry, and logs it to the sink.
|
||||||
|
func (ml *MethodLogger) Log(c LogEntryConfig) {
|
||||||
|
m := c.toProto()
|
||||||
|
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||||
|
m.Timestamp = timestamp
|
||||||
|
m.CallId = ml.callID
|
||||||
|
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||||||
|
|
||||||
|
switch pay := m.Payload.(type) {
|
||||||
|
case *pb.GrpcLogEntry_ClientHeader:
|
||||||
|
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
||||||
|
case *pb.GrpcLogEntry_ServerHeader:
|
||||||
|
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
||||||
|
case *pb.GrpcLogEntry_Message:
|
||||||
|
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
ml.sink.Write(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||||
|
if ml.headerMaxLen == maxUInt {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
bytesLimit = ml.headerMaxLen
|
||||||
|
index int
|
||||||
|
)
|
||||||
|
// At the end of the loop, index will be the first entry where the total
|
||||||
|
// size is greater than the limit:
|
||||||
|
//
|
||||||
|
// len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
|
||||||
|
for ; index < len(mdPb.Entry); index++ {
|
||||||
|
entry := mdPb.Entry[index]
|
||||||
|
if entry.Key == "grpc-trace-bin" {
|
||||||
|
// "grpc-trace-bin" is a special key. It's kept in the log entry,
|
||||||
|
// but not counted towards the size limit.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
currentEntryLen := uint64(len(entry.Value))
|
||||||
|
if currentEntryLen > bytesLimit {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
bytesLimit -= currentEntryLen
|
||||||
|
}
|
||||||
|
truncated = index < len(mdPb.Entry)
|
||||||
|
mdPb.Entry = mdPb.Entry[:index]
|
||||||
|
return truncated
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||||
|
if ml.messageMaxLen == maxUInt {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
msgPb.Data = msgPb.Data[:ml.messageMaxLen]
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogEntryConfig represents the configuration for binary log entry.
|
||||||
|
type LogEntryConfig interface {
|
||||||
|
toProto() *pb.GrpcLogEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
||||||
|
type ClientHeader struct {
|
||||||
|
OnClientSide bool
|
||||||
|
Header metadata.MD
|
||||||
|
MethodName string
|
||||||
|
Authority string
|
||||||
|
Timeout time.Duration
|
||||||
|
// PeerAddr is required only when it's on server side.
|
||||||
|
PeerAddr net.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||||
|
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
||||||
|
// function will set the fields when necessary.
|
||||||
|
clientHeader := &pb.ClientHeader{
|
||||||
|
Metadata: mdToMetadataProto(c.Header),
|
||||||
|
MethodName: c.MethodName,
|
||||||
|
Authority: c.Authority,
|
||||||
|
}
|
||||||
|
if c.Timeout > 0 {
|
||||||
|
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||||||
|
}
|
||||||
|
ret := &pb.GrpcLogEntry{
|
||||||
|
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||||
|
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||||
|
ClientHeader: clientHeader,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if c.OnClientSide {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||||
|
} else {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||||
|
}
|
||||||
|
if c.PeerAddr != nil {
|
||||||
|
ret.Peer = addrToProto(c.PeerAddr)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerHeader configs the binary log entry to be a ServerHeader entry.
|
||||||
|
type ServerHeader struct {
|
||||||
|
OnClientSide bool
|
||||||
|
Header metadata.MD
|
||||||
|
// PeerAddr is required only when it's on client side.
|
||||||
|
PeerAddr net.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
|
||||||
|
ret := &pb.GrpcLogEntry{
|
||||||
|
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||||
|
Payload: &pb.GrpcLogEntry_ServerHeader{
|
||||||
|
ServerHeader: &pb.ServerHeader{
|
||||||
|
Metadata: mdToMetadataProto(c.Header),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if c.OnClientSide {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||||
|
} else {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||||
|
}
|
||||||
|
if c.PeerAddr != nil {
|
||||||
|
ret.Peer = addrToProto(c.PeerAddr)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientMessage configs the binary log entry to be a ClientMessage entry.
|
||||||
|
type ClientMessage struct {
|
||||||
|
OnClientSide bool
|
||||||
|
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||||
|
// supported.
|
||||||
|
Message interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||||
|
var (
|
||||||
|
data []byte
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if m, ok := c.Message.(proto.Message); ok {
|
||||||
|
data, err = proto.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||||
|
}
|
||||||
|
} else if b, ok := c.Message.([]byte); ok {
|
||||||
|
data = b
|
||||||
|
} else {
|
||||||
|
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||||
|
}
|
||||||
|
ret := &pb.GrpcLogEntry{
|
||||||
|
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||||
|
Payload: &pb.GrpcLogEntry_Message{
|
||||||
|
Message: &pb.Message{
|
||||||
|
Length: uint32(len(data)),
|
||||||
|
Data: data,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if c.OnClientSide {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||||
|
} else {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMessage configs the binary log entry to be a ServerMessage entry.
|
||||||
|
type ServerMessage struct {
|
||||||
|
OnClientSide bool
|
||||||
|
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||||
|
// supported.
|
||||||
|
Message interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||||
|
var (
|
||||||
|
data []byte
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if m, ok := c.Message.(proto.Message); ok {
|
||||||
|
data, err = proto.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||||
|
}
|
||||||
|
} else if b, ok := c.Message.([]byte); ok {
|
||||||
|
data = b
|
||||||
|
} else {
|
||||||
|
grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||||
|
}
|
||||||
|
ret := &pb.GrpcLogEntry{
|
||||||
|
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||||
|
Payload: &pb.GrpcLogEntry_Message{
|
||||||
|
Message: &pb.Message{
|
||||||
|
Length: uint32(len(data)),
|
||||||
|
Data: data,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if c.OnClientSide {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||||
|
} else {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
|
||||||
|
type ClientHalfClose struct {
|
||||||
|
OnClientSide bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
|
||||||
|
ret := &pb.GrpcLogEntry{
|
||||||
|
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||||
|
Payload: nil, // No payload here.
|
||||||
|
}
|
||||||
|
if c.OnClientSide {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||||
|
} else {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
|
||||||
|
type ServerTrailer struct {
|
||||||
|
OnClientSide bool
|
||||||
|
Trailer metadata.MD
|
||||||
|
// Err is the status error.
|
||||||
|
Err error
|
||||||
|
// PeerAddr is required only when it's on client side and the RPC is trailer
|
||||||
|
// only.
|
||||||
|
PeerAddr net.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||||
|
st, ok := status.FromError(c.Err)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Info("binarylogging: error in trailer is not a status error")
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
detailsBytes []byte
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
stProto := st.Proto()
|
||||||
|
if stProto != nil && len(stProto.Details) != 0 {
|
||||||
|
detailsBytes, err = proto.Marshal(stProto)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret := &pb.GrpcLogEntry{
|
||||||
|
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||||
|
Payload: &pb.GrpcLogEntry_Trailer{
|
||||||
|
Trailer: &pb.Trailer{
|
||||||
|
Metadata: mdToMetadataProto(c.Trailer),
|
||||||
|
StatusCode: uint32(st.Code()),
|
||||||
|
StatusMessage: st.Message(),
|
||||||
|
StatusDetails: detailsBytes,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if c.OnClientSide {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||||
|
} else {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||||
|
}
|
||||||
|
if c.PeerAddr != nil {
|
||||||
|
ret.Peer = addrToProto(c.PeerAddr)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel configs the binary log entry to be a Cancel entry.
|
||||||
|
type Cancel struct {
|
||||||
|
OnClientSide bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cancel) toProto() *pb.GrpcLogEntry {
|
||||||
|
ret := &pb.GrpcLogEntry{
|
||||||
|
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||||
|
Payload: nil,
|
||||||
|
}
|
||||||
|
if c.OnClientSide {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||||
|
} else {
|
||||||
|
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// metadataKeyOmit returns whether the metadata entry with this key should be
|
||||||
|
// omitted.
|
||||||
|
func metadataKeyOmit(key string) bool {
|
||||||
|
switch key {
|
||||||
|
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
|
||||||
|
return true
|
||||||
|
case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.HasPrefix(key, "grpc-")
|
||||||
|
}
|
||||||
|
|
||||||
|
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||||
|
ret := &pb.Metadata{}
|
||||||
|
for k, vv := range md {
|
||||||
|
if metadataKeyOmit(k) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, v := range vv {
|
||||||
|
ret.Entry = append(ret.Entry,
|
||||||
|
&pb.MetadataEntry{
|
||||||
|
Key: k,
|
||||||
|
Value: []byte(v),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func addrToProto(addr net.Addr) *pb.Address {
|
||||||
|
ret := &pb.Address{}
|
||||||
|
switch a := addr.(type) {
|
||||||
|
case *net.TCPAddr:
|
||||||
|
if a.IP.To4() != nil {
|
||||||
|
ret.Type = pb.Address_TYPE_IPV4
|
||||||
|
} else if a.IP.To16() != nil {
|
||||||
|
ret.Type = pb.Address_TYPE_IPV6
|
||||||
|
} else {
|
||||||
|
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||||
|
// Do not set address and port fields.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ret.Address = a.IP.String()
|
||||||
|
ret.IpPort = uint32(a.Port)
|
||||||
|
case *net.UnixAddr:
|
||||||
|
ret.Type = pb.Address_TYPE_UNIX
|
||||||
|
ret.Address = a.String()
|
||||||
|
default:
|
||||||
|
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
|
@ -0,0 +1,162 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package binarylog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetDefaultSink sets the sink where binary logs will be written to.
|
||||||
|
//
|
||||||
|
// Not thread safe. Only set during initialization.
|
||||||
|
func SetDefaultSink(s Sink) {
|
||||||
|
if defaultSink != nil {
|
||||||
|
defaultSink.Close()
|
||||||
|
}
|
||||||
|
defaultSink = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sink writes log entry into the binary log sink.
|
||||||
|
type Sink interface {
|
||||||
|
// Write will be called to write the log entry into the sink.
|
||||||
|
//
|
||||||
|
// It should be thread-safe so it can be called in parallel.
|
||||||
|
Write(*pb.GrpcLogEntry) error
|
||||||
|
// Close will be called when the Sink is replaced by a new Sink.
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type noopSink struct{}
|
||||||
|
|
||||||
|
func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
|
||||||
|
func (ns *noopSink) Close() error { return nil }
|
||||||
|
|
||||||
|
// newWriterSink creates a binary log sink with the given writer.
|
||||||
|
//
|
||||||
|
// Write() marshalls the proto message and writes it to the given writer. Each
|
||||||
|
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||||
|
//
|
||||||
|
// No buffer is done, Close() doesn't try to close the writer.
|
||||||
|
func newWriterSink(w io.Writer) *writerSink {
|
||||||
|
return &writerSink{out: w}
|
||||||
|
}
|
||||||
|
|
||||||
|
type writerSink struct {
|
||||||
|
out io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||||||
|
b, err := proto.Marshal(e)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
|
||||||
|
}
|
||||||
|
hdr := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
|
||||||
|
if _, err := ws.out.Write(hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := ws.out.Write(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ws *writerSink) Close() error { return nil }
|
||||||
|
|
||||||
|
type bufWriteCloserSink struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
closer io.Closer
|
||||||
|
out *writerSink // out is built on buf.
|
||||||
|
buf *bufio.Writer // buf is kept for flush.
|
||||||
|
|
||||||
|
writeStartOnce sync.Once
|
||||||
|
writeTicker *time.Ticker
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
|
||||||
|
// Start the write loop when Write is called.
|
||||||
|
fs.writeStartOnce.Do(fs.startFlushGoroutine)
|
||||||
|
fs.mu.Lock()
|
||||||
|
if err := fs.out.Write(e); err != nil {
|
||||||
|
fs.mu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fs.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
bufFlushDuration = 60 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fs *bufWriteCloserSink) startFlushGoroutine() {
|
||||||
|
fs.writeTicker = time.NewTicker(bufFlushDuration)
|
||||||
|
go func() {
|
||||||
|
for range fs.writeTicker.C {
|
||||||
|
fs.mu.Lock()
|
||||||
|
fs.buf.Flush()
|
||||||
|
fs.mu.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *bufWriteCloserSink) Close() error {
|
||||||
|
if fs.writeTicker != nil {
|
||||||
|
fs.writeTicker.Stop()
|
||||||
|
}
|
||||||
|
fs.mu.Lock()
|
||||||
|
fs.buf.Flush()
|
||||||
|
fs.closer.Close()
|
||||||
|
fs.out.Close()
|
||||||
|
fs.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBufWriteCloserSink(o io.WriteCloser) Sink {
|
||||||
|
bufW := bufio.NewWriter(o)
|
||||||
|
return &bufWriteCloserSink{
|
||||||
|
closer: o,
|
||||||
|
out: newWriterSink(bufW),
|
||||||
|
buf: bufW,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTempFileSink creates a temp file and returns a Sink that writes to this
|
||||||
|
// file.
|
||||||
|
func NewTempFileSink() (Sink, error) {
|
||||||
|
tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create temp file: %v", err)
|
||||||
|
}
|
||||||
|
return newBufWriteCloserSink(tempFile), nil
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package binarylog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseMethodName splits service and method from the input. It expects format
|
||||||
|
// "/service/method".
|
||||||
|
//
|
||||||
|
// TODO: move to internal/grpcutil.
|
||||||
|
func parseMethodName(methodName string) (service, method string, _ error) {
|
||||||
|
if !strings.HasPrefix(methodName, "/") {
|
||||||
|
return "", "", errors.New("invalid method name: should start with /")
|
||||||
|
}
|
||||||
|
methodName = methodName[1:]
|
||||||
|
|
||||||
|
pos := strings.LastIndex(methodName, "/")
|
||||||
|
if pos < 0 {
|
||||||
|
return "", "", errors.New("invalid method name: suffix /method is missing")
|
||||||
|
}
|
||||||
|
return methodName[:pos], methodName[pos+1:], nil
|
||||||
|
}
|
|
@ -27,16 +27,22 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultMaxTraceEntry int32 = 30
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
db dbWrapper
|
db dbWrapper
|
||||||
idGen idGenerator
|
idGen idGenerator
|
||||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||||
EntryPerPage = 50
|
EntryPerPage = int64(50)
|
||||||
curState int32
|
curState int32
|
||||||
|
maxTraceEntry = defaultMaxTraceEntry
|
||||||
)
|
)
|
||||||
|
|
||||||
// TurnOn turns on channelz data collection.
|
// TurnOn turns on channelz data collection.
|
||||||
|
@ -52,6 +58,22 @@ func IsOn() bool {
|
||||||
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
|
||||||
|
// Setting it to 0 will disable channel tracing.
|
||||||
|
func SetMaxTraceEntry(i int32) {
|
||||||
|
atomic.StoreInt32(&maxTraceEntry, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
|
||||||
|
func ResetMaxTraceEntryToDefault() {
|
||||||
|
atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMaxTraceEntry() int {
|
||||||
|
i := atomic.LoadInt32(&maxTraceEntry)
|
||||||
|
return int(i)
|
||||||
|
}
|
||||||
|
|
||||||
// dbWarpper wraps around a reference to internal channelz data storage, and
|
// dbWarpper wraps around a reference to internal channelz data storage, and
|
||||||
// provide synchronized functionality to set and get the reference.
|
// provide synchronized functionality to set and get the reference.
|
||||||
type dbWrapper struct {
|
type dbWrapper struct {
|
||||||
|
@ -91,20 +113,20 @@ func NewChannelzStorage() {
|
||||||
// boolean indicating whether there's more top channels to be queried for.
|
// boolean indicating whether there's more top channels to be queried for.
|
||||||
//
|
//
|
||||||
// The arg id specifies that only top channel with id at or above it will be included
|
// The arg id specifies that only top channel with id at or above it will be included
|
||||||
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||||
// sorted in ascending id order.
|
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||||
func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||||
return db.get().GetTopChannels(id)
|
return db.get().GetTopChannels(id, maxResults)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetServers returns a slice of server's ServerMetric, along with a
|
// GetServers returns a slice of server's ServerMetric, along with a
|
||||||
// boolean indicating whether there's more servers to be queried for.
|
// boolean indicating whether there's more servers to be queried for.
|
||||||
//
|
//
|
||||||
// The arg id specifies that only server with id at or above it will be included
|
// The arg id specifies that only server with id at or above it will be included
|
||||||
// in the result. The returned slice is up to a length of EntryPerPage, and is
|
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||||
// sorted in ascending id order.
|
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||||
func GetServers(id int64) ([]*ServerMetric, bool) {
|
func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
|
||||||
return db.get().GetServers(id)
|
return db.get().GetServers(id, maxResults)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetServerSockets returns a slice of server's (identified by id) normal socket's
|
// GetServerSockets returns a slice of server's (identified by id) normal socket's
|
||||||
|
@ -112,10 +134,10 @@ func GetServers(id int64) ([]*ServerMetric, bool) {
|
||||||
// be queried for.
|
// be queried for.
|
||||||
//
|
//
|
||||||
// The arg startID specifies that only sockets with id at or above it will be
|
// The arg startID specifies that only sockets with id at or above it will be
|
||||||
// included in the result. The returned slice is up to a length of EntryPerPage,
|
// included in the result. The returned slice is up to a length of the arg maxResults
|
||||||
// and is sorted in ascending id order.
|
// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||||
func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||||
return db.get().GetServerSockets(id, startID)
|
return db.get().GetServerSockets(id, startID, maxResults)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetChannel returns the ChannelMetric for the channel (identified by id).
|
// GetChannel returns the ChannelMetric for the channel (identified by id).
|
||||||
|
@ -133,6 +155,11 @@ func GetSocket(id int64) *SocketMetric {
|
||||||
return db.get().GetSocket(id)
|
return db.get().GetSocket(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetServer returns the ServerMetric for the server (identified by id).
|
||||||
|
func GetServer(id int64) *ServerMetric {
|
||||||
|
return db.get().GetServer(id)
|
||||||
|
}
|
||||||
|
|
||||||
// RegisterChannel registers the given channel c in channelz database with ref
|
// RegisterChannel registers the given channel c in channelz database with ref
|
||||||
// as its reference name, and add it to the child list of its parent (identified
|
// as its reference name, and add it to the child list of its parent (identified
|
||||||
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
||||||
|
@ -146,6 +173,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
||||||
nestedChans: make(map[int64]string),
|
nestedChans: make(map[int64]string),
|
||||||
id: id,
|
id: id,
|
||||||
pid: pid,
|
pid: pid,
|
||||||
|
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||||
}
|
}
|
||||||
if pid == 0 {
|
if pid == 0 {
|
||||||
db.get().addChannel(id, cn, true, pid, ref)
|
db.get().addChannel(id, cn, true, pid, ref)
|
||||||
|
@ -170,6 +198,7 @@ func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||||
sockets: make(map[int64]string),
|
sockets: make(map[int64]string),
|
||||||
id: id,
|
id: id,
|
||||||
pid: pid,
|
pid: pid,
|
||||||
|
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||||
}
|
}
|
||||||
db.get().addSubChannel(id, sc, pid, ref)
|
db.get().addSubChannel(id, sc, pid, ref)
|
||||||
return id
|
return id
|
||||||
|
@ -226,6 +255,24 @@ func RemoveEntry(id int64) {
|
||||||
db.get().removeEntry(id)
|
db.get().removeEntry(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
|
||||||
|
// to the channel trace.
|
||||||
|
// The Parent field is optional. It is used for event that will be recorded in the entity's parent
|
||||||
|
// trace also.
|
||||||
|
type TraceEventDesc struct {
|
||||||
|
Desc string
|
||||||
|
Severity Severity
|
||||||
|
Parent *TraceEventDesc
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
|
||||||
|
func AddTraceEvent(id int64, desc *TraceEventDesc) {
|
||||||
|
if getMaxTraceEntry() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
db.get().traceEvent(id, desc)
|
||||||
|
}
|
||||||
|
|
||||||
// channelMap is the storage data structure for channelz.
|
// channelMap is the storage data structure for channelz.
|
||||||
// Methods of channelMap can be divided in two two categories with respect to locking.
|
// Methods of channelMap can be divided in two two categories with respect to locking.
|
||||||
// 1. Methods acquire the global lock.
|
// 1. Methods acquire the global lock.
|
||||||
|
@ -251,6 +298,7 @@ func (c *channelMap) addServer(id int64, s *server) {
|
||||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
cn.cm = c
|
cn.cm = c
|
||||||
|
cn.trace.cm = c
|
||||||
c.channels[id] = cn
|
c.channels[id] = cn
|
||||||
if isTopChannel {
|
if isTopChannel {
|
||||||
c.topLevelChannels[id] = struct{}{}
|
c.topLevelChannels[id] = struct{}{}
|
||||||
|
@ -263,6 +311,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in
|
||||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
sc.cm = c
|
sc.cm = c
|
||||||
|
sc.trace.cm = c
|
||||||
c.subChannels[id] = sc
|
c.subChannels[id] = sc
|
||||||
c.findEntry(pid).addChild(id, sc)
|
c.findEntry(pid).addChild(id, sc)
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
|
@ -284,16 +333,25 @@ func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeEntry triggers the removal of an entry, which may not indeed delete the
|
// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
|
||||||
// entry, if it has to wait on the deletion of its children, or may lead to a chain
|
// wait on the deletion of its children and until no other entity's channel trace references it.
|
||||||
// of entry deletion. For example, deleting the last socket of a gracefully shutting
|
// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
|
||||||
// down server will lead to the server being also deleted.
|
// shutting down server will lead to the server being also deleted.
|
||||||
func (c *channelMap) removeEntry(id int64) {
|
func (c *channelMap) removeEntry(id int64) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
c.findEntry(id).triggerDelete()
|
c.findEntry(id).triggerDelete()
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// c.mu must be held by the caller
|
||||||
|
func (c *channelMap) decrTraceRefCount(id int64) {
|
||||||
|
e := c.findEntry(id)
|
||||||
|
if v, ok := e.(tracedChannel); ok {
|
||||||
|
v.decrTraceRefCount()
|
||||||
|
e.deleteSelfIfReady()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// c.mu must be held by the caller.
|
// c.mu must be held by the caller.
|
||||||
func (c *channelMap) findEntry(id int64) entry {
|
func (c *channelMap) findEntry(id int64) entry {
|
||||||
var v entry
|
var v entry
|
||||||
|
@ -347,6 +405,39 @@ func (c *channelMap) deleteEntry(id int64) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
|
||||||
|
c.mu.Lock()
|
||||||
|
child := c.findEntry(id)
|
||||||
|
childTC, ok := child.(tracedChannel)
|
||||||
|
if !ok {
|
||||||
|
c.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
|
||||||
|
if desc.Parent != nil {
|
||||||
|
parent := c.findEntry(child.getParentID())
|
||||||
|
var chanType RefChannelType
|
||||||
|
switch child.(type) {
|
||||||
|
case *channel:
|
||||||
|
chanType = RefChannel
|
||||||
|
case *subChannel:
|
||||||
|
chanType = RefSubChannel
|
||||||
|
}
|
||||||
|
if parentTC, ok := parent.(tracedChannel); ok {
|
||||||
|
parentTC.getChannelTrace().append(&TraceEvent{
|
||||||
|
Desc: desc.Parent.Desc,
|
||||||
|
Severity: desc.Parent.Severity,
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
RefID: id,
|
||||||
|
RefName: childTC.getRefName(),
|
||||||
|
RefType: chanType,
|
||||||
|
})
|
||||||
|
childTC.incrTraceRefCount()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
type int64Slice []int64
|
type int64Slice []int64
|
||||||
|
|
||||||
func (s int64Slice) Len() int { return len(s) }
|
func (s int64Slice) Len() int { return len(s) }
|
||||||
|
@ -361,29 +452,32 @@ func copyMap(m map[int64]string) map[int64]string {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func min(a, b int) int {
|
func min(a, b int64) int64 {
|
||||||
if a < b {
|
if a < b {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||||
|
if maxResults <= 0 {
|
||||||
|
maxResults = EntryPerPage
|
||||||
|
}
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
l := len(c.topLevelChannels)
|
l := int64(len(c.topLevelChannels))
|
||||||
ids := make([]int64, 0, l)
|
ids := make([]int64, 0, l)
|
||||||
cns := make([]*channel, 0, min(l, EntryPerPage))
|
cns := make([]*channel, 0, min(l, maxResults))
|
||||||
|
|
||||||
for k := range c.topLevelChannels {
|
for k := range c.topLevelChannels {
|
||||||
ids = append(ids, k)
|
ids = append(ids, k)
|
||||||
}
|
}
|
||||||
sort.Sort(int64Slice(ids))
|
sort.Sort(int64Slice(ids))
|
||||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||||
count := 0
|
count := int64(0)
|
||||||
var end bool
|
var end bool
|
||||||
var t []*ChannelMetric
|
var t []*ChannelMetric
|
||||||
for i, v := range ids[idx:] {
|
for i, v := range ids[idx:] {
|
||||||
if count == EntryPerPage {
|
if count == maxResults {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if cn, ok := c.channels[v]; ok {
|
if cn, ok := c.channels[v]; ok {
|
||||||
|
@ -408,25 +502,29 @@ func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
|
||||||
t[i].ChannelData = cn.c.ChannelzMetric()
|
t[i].ChannelData = cn.c.ChannelzMetric()
|
||||||
t[i].ID = cn.id
|
t[i].ID = cn.id
|
||||||
t[i].RefName = cn.refName
|
t[i].RefName = cn.refName
|
||||||
|
t[i].Trace = cn.trace.dumpData()
|
||||||
}
|
}
|
||||||
return t, end
|
return t, end
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
|
func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
|
||||||
|
if maxResults <= 0 {
|
||||||
|
maxResults = EntryPerPage
|
||||||
|
}
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
l := len(c.servers)
|
l := int64(len(c.servers))
|
||||||
ids := make([]int64, 0, l)
|
ids := make([]int64, 0, l)
|
||||||
ss := make([]*server, 0, min(l, EntryPerPage))
|
ss := make([]*server, 0, min(l, maxResults))
|
||||||
for k := range c.servers {
|
for k := range c.servers {
|
||||||
ids = append(ids, k)
|
ids = append(ids, k)
|
||||||
}
|
}
|
||||||
sort.Sort(int64Slice(ids))
|
sort.Sort(int64Slice(ids))
|
||||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||||
count := 0
|
count := int64(0)
|
||||||
var end bool
|
var end bool
|
||||||
var s []*ServerMetric
|
var s []*ServerMetric
|
||||||
for i, v := range ids[idx:] {
|
for i, v := range ids[idx:] {
|
||||||
if count == EntryPerPage {
|
if count == maxResults {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if svr, ok := c.servers[v]; ok {
|
if svr, ok := c.servers[v]; ok {
|
||||||
|
@ -454,7 +552,10 @@ func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
|
||||||
return s, end
|
return s, end
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
|
func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||||
|
if maxResults <= 0 {
|
||||||
|
maxResults = EntryPerPage
|
||||||
|
}
|
||||||
var svr *server
|
var svr *server
|
||||||
var ok bool
|
var ok bool
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
|
@ -464,18 +565,18 @@ func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric,
|
||||||
return nil, true
|
return nil, true
|
||||||
}
|
}
|
||||||
svrskts := svr.sockets
|
svrskts := svr.sockets
|
||||||
l := len(svrskts)
|
l := int64(len(svrskts))
|
||||||
ids := make([]int64, 0, l)
|
ids := make([]int64, 0, l)
|
||||||
sks := make([]*normalSocket, 0, min(l, EntryPerPage))
|
sks := make([]*normalSocket, 0, min(l, maxResults))
|
||||||
for k := range svrskts {
|
for k := range svrskts {
|
||||||
ids = append(ids, k)
|
ids = append(ids, k)
|
||||||
}
|
}
|
||||||
sort.Sort((int64Slice(ids)))
|
sort.Sort(int64Slice(ids))
|
||||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
|
||||||
count := 0
|
count := int64(0)
|
||||||
var end bool
|
var end bool
|
||||||
for i, v := range ids[idx:] {
|
for i, v := range ids[idx:] {
|
||||||
if count == EntryPerPage {
|
if count == maxResults {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if ns, ok := c.normalSockets[v]; ok {
|
if ns, ok := c.normalSockets[v]; ok {
|
||||||
|
@ -514,10 +615,14 @@ func (c *channelMap) GetChannel(id int64) *ChannelMetric {
|
||||||
}
|
}
|
||||||
cm.NestedChans = copyMap(cn.nestedChans)
|
cm.NestedChans = copyMap(cn.nestedChans)
|
||||||
cm.SubChans = copyMap(cn.subChans)
|
cm.SubChans = copyMap(cn.subChans)
|
||||||
|
// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
|
||||||
|
// holding the lock to prevent potential data race.
|
||||||
|
chanCopy := cn.c
|
||||||
c.mu.RUnlock()
|
c.mu.RUnlock()
|
||||||
cm.ChannelData = cn.c.ChannelzMetric()
|
cm.ChannelData = chanCopy.ChannelzMetric()
|
||||||
cm.ID = cn.id
|
cm.ID = cn.id
|
||||||
cm.RefName = cn.refName
|
cm.RefName = cn.refName
|
||||||
|
cm.Trace = cn.trace.dumpData()
|
||||||
return cm
|
return cm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -532,10 +637,14 @@ func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cm.Sockets = copyMap(sc.sockets)
|
cm.Sockets = copyMap(sc.sockets)
|
||||||
|
// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
|
||||||
|
// holding the lock to prevent potential data race.
|
||||||
|
chanCopy := sc.c
|
||||||
c.mu.RUnlock()
|
c.mu.RUnlock()
|
||||||
cm.ChannelData = sc.c.ChannelzMetric()
|
cm.ChannelData = chanCopy.ChannelzMetric()
|
||||||
cm.ID = sc.id
|
cm.ID = sc.id
|
||||||
cm.RefName = sc.refName
|
cm.RefName = sc.refName
|
||||||
|
cm.Trace = sc.trace.dumpData()
|
||||||
return cm
|
return cm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -560,6 +669,23 @@ func (c *channelMap) GetSocket(id int64) *SocketMetric {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *channelMap) GetServer(id int64) *ServerMetric {
|
||||||
|
sm := &ServerMetric{}
|
||||||
|
var svr *server
|
||||||
|
var ok bool
|
||||||
|
c.mu.RLock()
|
||||||
|
if svr, ok = c.servers[id]; !ok {
|
||||||
|
c.mu.RUnlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sm.ListenSockets = copyMap(svr.listenSockets)
|
||||||
|
c.mu.RUnlock()
|
||||||
|
sm.ID = svr.id
|
||||||
|
sm.RefName = svr.refName
|
||||||
|
sm.ServerData = svr.s.ChannelzMetric()
|
||||||
|
return sm
|
||||||
|
}
|
||||||
|
|
||||||
type idGenerator struct {
|
type idGenerator struct {
|
||||||
id int64
|
id int64
|
||||||
}
|
}
|
|
@ -20,9 +20,12 @@ package channelz
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -39,6 +42,8 @@ type entry interface {
|
||||||
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
|
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
|
||||||
// list is now empty. If both conditions are met, then delete self from database.
|
// list is now empty. If both conditions are met, then delete self from database.
|
||||||
deleteSelfIfReady()
|
deleteSelfIfReady()
|
||||||
|
// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
|
||||||
|
getParentID() int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// dummyEntry is a fake entry to handle entry not found case.
|
// dummyEntry is a fake entry to handle entry not found case.
|
||||||
|
@ -72,6 +77,10 @@ func (*dummyEntry) deleteSelfIfReady() {
|
||||||
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (*dummyEntry) getParentID() int64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
// ChannelMetric defines the info channelz provides for a specific Channel, which
|
// ChannelMetric defines the info channelz provides for a specific Channel, which
|
||||||
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
|
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
|
||||||
// child list, etc.
|
// child list, etc.
|
||||||
|
@ -94,6 +103,8 @@ type ChannelMetric struct {
|
||||||
// Note current grpc implementation doesn't allow channel having sockets directly,
|
// Note current grpc implementation doesn't allow channel having sockets directly,
|
||||||
// therefore, this is field is unused.
|
// therefore, this is field is unused.
|
||||||
Sockets map[int64]string
|
Sockets map[int64]string
|
||||||
|
// Trace contains the most recent traced events.
|
||||||
|
Trace *ChannelTrace
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
|
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
|
||||||
|
@ -120,6 +131,8 @@ type SubChannelMetric struct {
|
||||||
// Sockets tracks the socket type children of this subchannel in the format of a map
|
// Sockets tracks the socket type children of this subchannel in the format of a map
|
||||||
// from socket channelz id to corresponding reference string.
|
// from socket channelz id to corresponding reference string.
|
||||||
Sockets map[int64]string
|
Sockets map[int64]string
|
||||||
|
// Trace contains the most recent traced events.
|
||||||
|
Trace *ChannelTrace
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChannelInternalMetric defines the struct that the implementor of Channel interface
|
// ChannelInternalMetric defines the struct that the implementor of Channel interface
|
||||||
|
@ -137,7 +150,35 @@ type ChannelInternalMetric struct {
|
||||||
CallsFailed int64
|
CallsFailed int64
|
||||||
// The last time a call was started on the channel.
|
// The last time a call was started on the channel.
|
||||||
LastCallStartedTimestamp time.Time
|
LastCallStartedTimestamp time.Time
|
||||||
//TODO: trace
|
}
|
||||||
|
|
||||||
|
// ChannelTrace stores traced events on a channel/subchannel and related info.
|
||||||
|
type ChannelTrace struct {
|
||||||
|
// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
|
||||||
|
EventNum int64
|
||||||
|
// CreationTime is the creation time of the trace.
|
||||||
|
CreationTime time.Time
|
||||||
|
// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
|
||||||
|
// oldest one)
|
||||||
|
Events []*TraceEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
// TraceEvent represent a single trace event
|
||||||
|
type TraceEvent struct {
|
||||||
|
// Desc is a simple description of the trace event.
|
||||||
|
Desc string
|
||||||
|
// Severity states the severity of this trace event.
|
||||||
|
Severity Severity
|
||||||
|
// Timestamp is the event time.
|
||||||
|
Timestamp time.Time
|
||||||
|
// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
|
||||||
|
// involved in this event.
|
||||||
|
// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
|
||||||
|
RefID int64
|
||||||
|
// RefName is the reference name for the entity that gets referenced in the event.
|
||||||
|
RefName string
|
||||||
|
// RefType indicates the referenced entity type, i.e Channel or SubChannel.
|
||||||
|
RefType RefChannelType
|
||||||
}
|
}
|
||||||
|
|
||||||
// Channel is the interface that should be satisfied in order to be tracked by
|
// Channel is the interface that should be satisfied in order to be tracked by
|
||||||
|
@ -146,6 +187,12 @@ type Channel interface {
|
||||||
ChannelzMetric() *ChannelInternalMetric
|
ChannelzMetric() *ChannelInternalMetric
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type dummyChannel struct{}
|
||||||
|
|
||||||
|
func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
|
||||||
|
return &ChannelInternalMetric{}
|
||||||
|
}
|
||||||
|
|
||||||
type channel struct {
|
type channel struct {
|
||||||
refName string
|
refName string
|
||||||
c Channel
|
c Channel
|
||||||
|
@ -155,6 +202,10 @@ type channel struct {
|
||||||
id int64
|
id int64
|
||||||
pid int64
|
pid int64
|
||||||
cm *channelMap
|
cm *channelMap
|
||||||
|
trace *channelTrace
|
||||||
|
// traceRefCount is the number of trace events that reference this channel.
|
||||||
|
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
|
||||||
|
traceRefCount int32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channel) addChild(id int64, e entry) {
|
func (c *channel) addChild(id int64, e entry) {
|
||||||
|
@ -179,25 +230,96 @@ func (c *channel) triggerDelete() {
|
||||||
c.deleteSelfIfReady()
|
c.deleteSelfIfReady()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channel) deleteSelfIfReady() {
|
func (c *channel) getParentID() int64 {
|
||||||
|
return c.pid
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
|
||||||
|
// deleting the channel reference from its parent's child list.
|
||||||
|
//
|
||||||
|
// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
|
||||||
|
// corresponding grpc object has been invoked, and the channel does not have any children left.
|
||||||
|
//
|
||||||
|
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||||
|
func (c *channel) deleteSelfFromTree() (deleted bool) {
|
||||||
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
c.cm.deleteEntry(c.id)
|
|
||||||
// not top channel
|
// not top channel
|
||||||
if c.pid != 0 {
|
if c.pid != 0 {
|
||||||
c.cm.findEntry(c.pid).deleteChild(c.id)
|
c.cm.findEntry(c.pid).deleteChild(c.id)
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
|
||||||
|
// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
|
||||||
|
// channel, and its memory will be garbage collected.
|
||||||
|
//
|
||||||
|
// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
|
||||||
|
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||||
|
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||||
|
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||||
|
//
|
||||||
|
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||||
|
//
|
||||||
|
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||||
|
func (c *channel) deleteSelfFromMap() (delete bool) {
|
||||||
|
if c.getTraceRefCount() != 0 {
|
||||||
|
c.c = &dummyChannel{}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||||||
|
// The delete process includes two steps:
|
||||||
|
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||||
|
// parent's child list.
|
||||||
|
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||||
|
// will return entry not found error.
|
||||||
|
func (c *channel) deleteSelfIfReady() {
|
||||||
|
if !c.deleteSelfFromTree() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !c.deleteSelfFromMap() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.cm.deleteEntry(c.id)
|
||||||
|
c.trace.clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channel) getChannelTrace() *channelTrace {
|
||||||
|
return c.trace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channel) incrTraceRefCount() {
|
||||||
|
atomic.AddInt32(&c.traceRefCount, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channel) decrTraceRefCount() {
|
||||||
|
atomic.AddInt32(&c.traceRefCount, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channel) getTraceRefCount() int {
|
||||||
|
i := atomic.LoadInt32(&c.traceRefCount)
|
||||||
|
return int(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channel) getRefName() string {
|
||||||
|
return c.refName
|
||||||
}
|
}
|
||||||
|
|
||||||
type subChannel struct {
|
type subChannel struct {
|
||||||
refName string
|
refName string
|
||||||
c Channel
|
c Channel
|
||||||
closeCalled bool
|
closeCalled bool
|
||||||
sockets map[int64]string
|
sockets map[int64]string
|
||||||
id int64
|
id int64
|
||||||
pid int64
|
pid int64
|
||||||
cm *channelMap
|
cm *channelMap
|
||||||
|
trace *channelTrace
|
||||||
|
traceRefCount int32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *subChannel) addChild(id int64, e entry) {
|
func (sc *subChannel) addChild(id int64, e entry) {
|
||||||
|
@ -218,12 +340,82 @@ func (sc *subChannel) triggerDelete() {
|
||||||
sc.deleteSelfIfReady()
|
sc.deleteSelfIfReady()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *subChannel) deleteSelfIfReady() {
|
func (sc *subChannel) getParentID() int64 {
|
||||||
|
return sc.pid
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
|
||||||
|
// means deleting the subchannel reference from its parent's child list.
|
||||||
|
//
|
||||||
|
// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
|
||||||
|
// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
|
||||||
|
//
|
||||||
|
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||||
|
func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
|
||||||
if !sc.closeCalled || len(sc.sockets) != 0 {
|
if !sc.closeCalled || len(sc.sockets) != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
|
||||||
|
// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
|
||||||
|
// the subchannel, and its memory will be garbage collected.
|
||||||
|
//
|
||||||
|
// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
|
||||||
|
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||||
|
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||||
|
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||||
|
//
|
||||||
|
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||||
|
//
|
||||||
|
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||||
|
func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
||||||
|
if sc.getTraceRefCount() != 0 {
|
||||||
|
// free the grpc struct (i.e. addrConn)
|
||||||
|
sc.c = &dummyChannel{}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||||||
|
// The delete process includes two steps:
|
||||||
|
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||||
|
// its parent's child list.
|
||||||
|
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||||
|
// by id will return entry not found error.
|
||||||
|
func (sc *subChannel) deleteSelfIfReady() {
|
||||||
|
if !sc.deleteSelfFromTree() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !sc.deleteSelfFromMap() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sc.cm.deleteEntry(sc.id)
|
sc.cm.deleteEntry(sc.id)
|
||||||
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
sc.trace.clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *subChannel) getChannelTrace() *channelTrace {
|
||||||
|
return sc.trace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *subChannel) incrTraceRefCount() {
|
||||||
|
atomic.AddInt32(&sc.traceRefCount, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *subChannel) decrTraceRefCount() {
|
||||||
|
atomic.AddInt32(&sc.traceRefCount, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *subChannel) getTraceRefCount() int {
|
||||||
|
i := atomic.LoadInt32(&sc.traceRefCount)
|
||||||
|
return int(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sc *subChannel) getRefName() string {
|
||||||
|
return sc.refName
|
||||||
}
|
}
|
||||||
|
|
||||||
// SocketMetric defines the info channelz provides for a specific Socket, which
|
// SocketMetric defines the info channelz provides for a specific Socket, which
|
||||||
|
@ -281,9 +473,9 @@ type SocketInternalMetric struct {
|
||||||
RemoteAddr net.Addr
|
RemoteAddr net.Addr
|
||||||
// Optional, represents the name of the remote endpoint, if different than
|
// Optional, represents the name of the remote endpoint, if different than
|
||||||
// the original target name.
|
// the original target name.
|
||||||
RemoteName string
|
RemoteName string
|
||||||
//TODO: socket options
|
SocketOptions *SocketOptionData
|
||||||
//TODO: Security
|
Security credentials.ChannelzSecurityValue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Socket is the interface that should be satisfied in order to be tracked by
|
// Socket is the interface that should be satisfied in order to be tracked by
|
||||||
|
@ -317,6 +509,10 @@ func (ls *listenSocket) deleteSelfIfReady() {
|
||||||
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ls *listenSocket) getParentID() int64 {
|
||||||
|
return ls.pid
|
||||||
|
}
|
||||||
|
|
||||||
type normalSocket struct {
|
type normalSocket struct {
|
||||||
refName string
|
refName string
|
||||||
s Socket
|
s Socket
|
||||||
|
@ -342,6 +538,10 @@ func (ns *normalSocket) deleteSelfIfReady() {
|
||||||
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ns *normalSocket) getParentID() int64 {
|
||||||
|
return ns.pid
|
||||||
|
}
|
||||||
|
|
||||||
// ServerMetric defines the info channelz provides for a specific Server, which
|
// ServerMetric defines the info channelz provides for a specific Server, which
|
||||||
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
|
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
|
||||||
// child list, etc.
|
// child list, etc.
|
||||||
|
@ -369,7 +569,6 @@ type ServerInternalMetric struct {
|
||||||
CallsFailed int64
|
CallsFailed int64
|
||||||
// The last time a call was started on the server.
|
// The last time a call was started on the server.
|
||||||
LastCallStartedTimestamp time.Time
|
LastCallStartedTimestamp time.Time
|
||||||
//TODO: trace
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server is the interface to be satisfied in order to be tracked by channelz as
|
// Server is the interface to be satisfied in order to be tracked by channelz as
|
||||||
|
@ -416,3 +615,88 @@ func (s *server) deleteSelfIfReady() {
|
||||||
}
|
}
|
||||||
s.cm.deleteEntry(s.id)
|
s.cm.deleteEntry(s.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *server) getParentID() int64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type tracedChannel interface {
|
||||||
|
getChannelTrace() *channelTrace
|
||||||
|
incrTraceRefCount()
|
||||||
|
decrTraceRefCount()
|
||||||
|
getRefName() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type channelTrace struct {
|
||||||
|
cm *channelMap
|
||||||
|
createdTime time.Time
|
||||||
|
eventCount int64
|
||||||
|
mu sync.Mutex
|
||||||
|
events []*TraceEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelTrace) append(e *TraceEvent) {
|
||||||
|
c.mu.Lock()
|
||||||
|
if len(c.events) == getMaxTraceEntry() {
|
||||||
|
del := c.events[0]
|
||||||
|
c.events = c.events[1:]
|
||||||
|
if del.RefID != 0 {
|
||||||
|
// start recursive cleanup in a goroutine to not block the call originated from grpc.
|
||||||
|
go func() {
|
||||||
|
// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
|
||||||
|
c.cm.mu.Lock()
|
||||||
|
c.cm.decrTraceRefCount(del.RefID)
|
||||||
|
c.cm.mu.Unlock()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.Timestamp = time.Now()
|
||||||
|
c.events = append(c.events, e)
|
||||||
|
c.eventCount++
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelTrace) clear() {
|
||||||
|
c.mu.Lock()
|
||||||
|
for _, e := range c.events {
|
||||||
|
if e.RefID != 0 {
|
||||||
|
// caller should have already held the c.cm.mu lock.
|
||||||
|
c.cm.decrTraceRefCount(e.RefID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Severity is the severity level of a trace event.
|
||||||
|
// The canonical enumeration of all valid values is here:
|
||||||
|
// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
|
||||||
|
type Severity int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// CtUNKNOWN indicates unknown severity of a trace event.
|
||||||
|
CtUNKNOWN Severity = iota
|
||||||
|
// CtINFO indicates info level severity of a trace event.
|
||||||
|
CtINFO
|
||||||
|
// CtWarning indicates warning level severity of a trace event.
|
||||||
|
CtWarning
|
||||||
|
// CtError indicates error level severity of a trace event.
|
||||||
|
CtError
|
||||||
|
)
|
||||||
|
|
||||||
|
// RefChannelType is the type of the entity being referenced in a trace event.
|
||||||
|
type RefChannelType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RefChannel indicates the referenced entity is a Channel.
|
||||||
|
RefChannel RefChannelType = iota
|
||||||
|
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||||
|
RefSubChannel
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *channelTrace) dumpData() *ChannelTrace {
|
||||||
|
c.mu.Lock()
|
||||||
|
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
||||||
|
ct.Events = c.events[:len(c.events)]
|
||||||
|
c.mu.Unlock()
|
||||||
|
return ct
|
||||||
|
}
|
|
@ -0,0 +1,53 @@
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package channelz
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SocketOptionData defines the struct to hold socket option data, and related
|
||||||
|
// getter function to obtain info from fd.
|
||||||
|
type SocketOptionData struct {
|
||||||
|
Linger *unix.Linger
|
||||||
|
RecvTimeout *unix.Timeval
|
||||||
|
SendTimeout *unix.Timeval
|
||||||
|
TCPInfo *unix.TCPInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getsockopt defines the function to get socket options requested by channelz.
|
||||||
|
// It is to be passed to syscall.RawConn.Control().
|
||||||
|
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||||
|
if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
|
||||||
|
s.Linger = v
|
||||||
|
}
|
||||||
|
if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
|
||||||
|
s.RecvTimeout = v
|
||||||
|
}
|
||||||
|
if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
|
||||||
|
s.SendTimeout = v
|
||||||
|
}
|
||||||
|
if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
|
||||||
|
s.TCPInfo = v
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
// +build !linux appengine
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package channelz
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var once sync.Once
|
||||||
|
|
||||||
|
// SocketOptionData defines the struct to hold socket option data, and related
|
||||||
|
// getter function to obtain info from fd.
|
||||||
|
// Windows OS doesn't support Socket Option
|
||||||
|
type SocketOptionData struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getsockopt defines the function to get socket options requested by channelz.
|
||||||
|
// It is to be passed to syscall.RawConn.Control().
|
||||||
|
// Windows OS doesn't support Socket Option
|
||||||
|
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||||
|
once.Do(func() {
|
||||||
|
grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,8 +1,8 @@
|
||||||
// +build go1.6,!go1.8
|
// +build linux,!appengine
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Copyright 2017 gRPC authors.
|
* Copyright 2018 gRPC authors.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -18,17 +18,22 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package naming
|
package channelz
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"syscall"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// GetSocketOption gets the socket option info of the conn.
|
||||||
lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
|
func GetSocketOption(socket interface{}) *SocketOptionData {
|
||||||
lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
|
c, ok := socket.(syscall.Conn)
|
||||||
return net.LookupSRV(service, proto, name)
|
if !ok {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
)
|
data := &SocketOptionData{}
|
||||||
|
if rawConn, err := c.SyscallConn(); err == nil {
|
||||||
|
rawConn.Control(data.Getsockopt)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,3 +1,5 @@
|
||||||
|
// +build !linux appengine
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Copyright 2018 gRPC authors.
|
* Copyright 2018 gRPC authors.
|
||||||
|
@ -16,22 +18,9 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package grpc
|
package channelz
|
||||||
|
|
||||||
import (
|
// GetSocketOption gets the socket option info of the conn.
|
||||||
"os"
|
func GetSocketOption(c interface{}) *SocketOptionData {
|
||||||
"strings"
|
return nil
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
envConfigPrefix = "GRPC_GO_"
|
|
||||||
envConfigStickinessStr = envConfigPrefix + "STICKINESS"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
envConfigStickinessOn bool
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
envConfigStickinessOn = strings.EqualFold(os.Getenv(envConfigStickinessStr), "on")
|
|
||||||
}
|
}
|
|
@ -0,0 +1,64 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package envconfig contains grpc settings configured by environment variables.
|
||||||
|
package envconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
prefix = "GRPC_GO_"
|
||||||
|
retryStr = prefix + "RETRY"
|
||||||
|
requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequireHandshakeSetting describes the settings for handshaking.
|
||||||
|
type RequireHandshakeSetting int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RequireHandshakeOn indicates to wait for handshake before considering a
|
||||||
|
// connection ready/successful.
|
||||||
|
RequireHandshakeOn RequireHandshakeSetting = iota
|
||||||
|
// RequireHandshakeOff indicates to not wait for handshake before
|
||||||
|
// considering a connection ready/successful.
|
||||||
|
RequireHandshakeOff
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
||||||
|
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
||||||
|
// RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE
|
||||||
|
// environment variable.
|
||||||
|
//
|
||||||
|
// Will be removed after the 1.18 release.
|
||||||
|
RequireHandshake = RequireHandshakeOn
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
|
||||||
|
case "on":
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
RequireHandshake = RequireHandshakeOn
|
||||||
|
case "off":
|
||||||
|
RequireHandshake = RequireHandshakeOff
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package grpcrand implements math/rand functions in a concurrent-safe way
|
||||||
|
// with a global random source, independent of math/rand's global source.
|
||||||
|
package grpcrand
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
r = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
mu sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
// Int63n implements rand.Int63n on the grpcrand global source.
|
||||||
|
func Int63n(n int64) int64 {
|
||||||
|
mu.Lock()
|
||||||
|
res := r.Int63n(n)
|
||||||
|
mu.Unlock()
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// Intn implements rand.Intn on the grpcrand global source.
|
||||||
|
func Intn(n int) int {
|
||||||
|
mu.Lock()
|
||||||
|
res := r.Intn(n)
|
||||||
|
mu.Unlock()
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 implements rand.Float64 on the grpcrand global source.
|
||||||
|
func Float64() float64 {
|
||||||
|
mu.Lock()
|
||||||
|
res := r.Float64()
|
||||||
|
mu.Unlock()
|
||||||
|
return res
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package grpcsync implements additional synchronization primitives built upon
|
||||||
|
// the sync package.
|
||||||
|
package grpcsync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event represents a one-time event that may occur in the future.
|
||||||
|
type Event struct {
|
||||||
|
fired int32
|
||||||
|
c chan struct{}
|
||||||
|
o sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fire causes e to complete. It is safe to call multiple times, and
|
||||||
|
// concurrently. It returns true iff this call to Fire caused the signaling
|
||||||
|
// channel returned by Done to close.
|
||||||
|
func (e *Event) Fire() bool {
|
||||||
|
ret := false
|
||||||
|
e.o.Do(func() {
|
||||||
|
atomic.StoreInt32(&e.fired, 1)
|
||||||
|
close(e.c)
|
||||||
|
ret = true
|
||||||
|
})
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done returns a channel that will be closed when Fire is called.
|
||||||
|
func (e *Event) Done() <-chan struct{} {
|
||||||
|
return e.c
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFired returns true if Fire has been called.
|
||||||
|
func (e *Event) HasFired() bool {
|
||||||
|
return atomic.LoadInt32(&e.fired) == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEvent returns a new, ready-to-use Event.
|
||||||
|
func NewEvent() *Event {
|
||||||
|
return &Event{c: make(chan struct{})}
|
||||||
|
}
|
|
@ -15,13 +15,40 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package internal contains gRPC-internal code for testing, to avoid polluting
|
// Package internal contains gRPC-internal code, to avoid polluting
|
||||||
// the godoc of the top-level grpc package.
|
// the godoc of the top-level grpc package. It must not import any grpc
|
||||||
|
// symbols to avoid circular dependencies.
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
|
import (
|
||||||
// It must be called before Serve and requires TLS credentials.
|
"context"
|
||||||
//
|
"time"
|
||||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
)
|
||||||
// for circular dependency reasons.
|
|
||||||
var TestingUseHandlerImpl func(grpcServer interface{})
|
var (
|
||||||
|
// WithResolverBuilder is exported by dialoptions.go
|
||||||
|
WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
|
||||||
|
// WithHealthCheckFunc is not exported by dialoptions.go
|
||||||
|
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
|
||||||
|
// HealthCheckFunc is used to provide client-side LB channel health checking
|
||||||
|
HealthCheckFunc HealthChecker
|
||||||
|
// BalancerUnregister is exported by package balancer to unregister a balancer.
|
||||||
|
BalancerUnregister func(name string)
|
||||||
|
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||||
|
// default, but tests may wish to set it lower for convenience.
|
||||||
|
KeepaliveMinPingTime = 10 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||||
|
type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
|
||||||
|
|
||||||
|
const (
|
||||||
|
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
|
||||||
|
CredsBundleModeFallback = "fallback"
|
||||||
|
// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
|
||||||
|
// mode.
|
||||||
|
CredsBundleModeBalancer = "balancer"
|
||||||
|
// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
|
||||||
|
// that supports backend returned by grpclb balancer.
|
||||||
|
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
|
||||||
|
)
|
||||||
|
|
|
@ -0,0 +1,114 @@
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package syscall provides functionalities that grpc uses to get low-level operating system
|
||||||
|
// stats/info.
|
||||||
|
package syscall
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetCPUTime returns the how much CPU time has passed since the start of this process.
|
||||||
|
func GetCPUTime() int64 {
|
||||||
|
var ts unix.Timespec
|
||||||
|
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
|
||||||
|
grpclog.Fatal(err)
|
||||||
|
}
|
||||||
|
return ts.Nano()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
|
||||||
|
type Rusage syscall.Rusage
|
||||||
|
|
||||||
|
// GetRusage returns the resource usage of current process.
|
||||||
|
func GetRusage() (rusage *Rusage) {
|
||||||
|
rusage = new(Rusage)
|
||||||
|
syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
|
||||||
|
// between two Rusage structs.
|
||||||
|
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
|
||||||
|
f := (*syscall.Rusage)(first)
|
||||||
|
l := (*syscall.Rusage)(latest)
|
||||||
|
var (
|
||||||
|
utimeDiffs = l.Utime.Sec - f.Utime.Sec
|
||||||
|
utimeDiffus = l.Utime.Usec - f.Utime.Usec
|
||||||
|
stimeDiffs = l.Stime.Sec - f.Stime.Sec
|
||||||
|
stimeDiffus = l.Stime.Usec - f.Stime.Usec
|
||||||
|
)
|
||||||
|
|
||||||
|
uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
|
||||||
|
sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
|
||||||
|
|
||||||
|
return uTimeElapsed, sTimeElapsed
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTCPUserTimeout sets the TCP user timeout on a connection's socket
|
||||||
|
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
|
||||||
|
tcpconn, ok := conn.(*net.TCPConn)
|
||||||
|
if !ok {
|
||||||
|
// not a TCP connection. exit early
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
rawConn, err := tcpconn.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error getting raw connection: %v", err)
|
||||||
|
}
|
||||||
|
err = rawConn.Control(func(fd uintptr) {
|
||||||
|
err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond))
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error setting option on socket: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTCPUserTimeout gets the TCP user timeout on a connection's socket
|
||||||
|
func GetTCPUserTimeout(conn net.Conn) (opt int, err error) {
|
||||||
|
tcpconn, ok := conn.(*net.TCPConn)
|
||||||
|
if !ok {
|
||||||
|
err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rawConn, err := tcpconn.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("error getting raw connection: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = rawConn.Control(func(fd uintptr) {
|
||||||
|
opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("error getting option on socket: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
73
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
Normal file
73
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
// +build !linux appengine
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package syscall
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var once sync.Once
|
||||||
|
|
||||||
|
func log() {
|
||||||
|
once.Do(func() {
|
||||||
|
grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCPUTime returns the how much CPU time has passed since the start of this process.
|
||||||
|
// It always returns 0 under non-linux or appengine environment.
|
||||||
|
func GetCPUTime() int64 {
|
||||||
|
log()
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rusage is an empty struct under non-linux or appengine environment.
|
||||||
|
type Rusage struct{}
|
||||||
|
|
||||||
|
// GetRusage is a no-op function under non-linux or appengine environment.
|
||||||
|
func GetRusage() (rusage *Rusage) {
|
||||||
|
log()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
|
||||||
|
// between two Rusage structs. It a no-op function for non-linux or appengine environment.
|
||||||
|
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
|
||||||
|
log()
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
|
||||||
|
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
|
||||||
|
log()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
|
||||||
|
// a negative return value indicates the operation is not supported
|
||||||
|
func GetTCPUserTimeout(conn net.Conn) (int, error) {
|
||||||
|
log()
|
||||||
|
return -1, nil
|
||||||
|
}
|
|
@ -24,9 +24,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// bdpLimit is the maximum value the flow control windows
|
// bdpLimit is the maximum value the flow control windows will be increased
|
||||||
// will be increased to.
|
// to. TCP typically limits this to 4MB, but some systems go up to 16MB.
|
||||||
bdpLimit = (1 << 20) * 4
|
// Since this is only a limit, it is safe to make it optimistic.
|
||||||
|
bdpLimit = (1 << 20) * 16
|
||||||
// alpha is a constant factor used to keep a moving average
|
// alpha is a constant factor used to keep a moving average
|
||||||
// of RTTs.
|
// of RTTs.
|
||||||
alpha = 0.9
|
alpha = 0.9
|
|
@ -28,6 +28,10 @@ import (
|
||||||
"golang.org/x/net/http2/hpack"
|
"golang.org/x/net/http2/hpack"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
|
||||||
|
e.SetMaxDynamicTableSizeLimit(v)
|
||||||
|
}
|
||||||
|
|
||||||
type itemNode struct {
|
type itemNode struct {
|
||||||
it interface{}
|
it interface{}
|
||||||
next *itemNode
|
next *itemNode
|
||||||
|
@ -80,6 +84,13 @@ func (il *itemList) isEmpty() bool {
|
||||||
// the control buffer of transport. They represent different aspects of
|
// the control buffer of transport. They represent different aspects of
|
||||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||||
|
|
||||||
|
// registerStream is used to register an incoming stream with loopy writer.
|
||||||
|
type registerStream struct {
|
||||||
|
streamID uint32
|
||||||
|
wq *writeQuota
|
||||||
|
}
|
||||||
|
|
||||||
|
// headerFrame is also used to register stream on the client-side.
|
||||||
type headerFrame struct {
|
type headerFrame struct {
|
||||||
streamID uint32
|
streamID uint32
|
||||||
hf []hpack.HeaderField
|
hf []hpack.HeaderField
|
||||||
|
@ -93,7 +104,6 @@ type headerFrame struct {
|
||||||
|
|
||||||
type cleanupStream struct {
|
type cleanupStream struct {
|
||||||
streamID uint32
|
streamID uint32
|
||||||
idPtr *uint32
|
|
||||||
rst bool
|
rst bool
|
||||||
rstCode http2.ErrCode
|
rstCode http2.ErrCode
|
||||||
onWrite func()
|
onWrite func()
|
||||||
|
@ -127,9 +137,6 @@ type outgoingSettings struct {
|
||||||
ss []http2.Setting
|
ss []http2.Setting
|
||||||
}
|
}
|
||||||
|
|
||||||
type settingsAck struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
type incomingGoAway struct {
|
type incomingGoAway struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,6 +225,12 @@ func (l *outStreamList) dequeue() *outStream {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// controlBuffer is a way to pass information to loopy.
|
||||||
|
// Information is passed as specific struct types called control frames.
|
||||||
|
// A control frame not only represents data, messages or headers to be sent out
|
||||||
|
// but can also be used to instruct loopy to update its internal state.
|
||||||
|
// It shouldn't be confused with an HTTP2 frame, although some of the control frames
|
||||||
|
// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
|
||||||
type controlBuffer struct {
|
type controlBuffer struct {
|
||||||
ch chan struct{}
|
ch chan struct{}
|
||||||
done <-chan struct{}
|
done <-chan struct{}
|
||||||
|
@ -268,6 +281,21 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note argument f should never be nil.
|
||||||
|
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.err != nil {
|
||||||
|
c.mu.Unlock()
|
||||||
|
return false, c.err
|
||||||
|
}
|
||||||
|
if !f(it) { // f wasn't successful
|
||||||
|
c.mu.Unlock()
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||||
for {
|
for {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
|
@ -324,13 +352,29 @@ const (
|
||||||
serverSide
|
serverSide
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Loopy receives frames from the control buffer.
|
||||||
|
// Each frame is handled individually; most of the work done by loopy goes
|
||||||
|
// into handling data frames. Loopy maintains a queue of active streams, and each
|
||||||
|
// stream maintains a queue of data frames; as loopy receives data frames
|
||||||
|
// it gets added to the queue of the relevant stream.
|
||||||
|
// Loopy goes over this list of active streams by processing one node every iteration,
|
||||||
|
// thereby closely resemebling to a round-robin scheduling over all streams. While
|
||||||
|
// processing a stream, loopy writes out data bytes from this stream capped by the min
|
||||||
|
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
|
||||||
type loopyWriter struct {
|
type loopyWriter struct {
|
||||||
side side
|
side side
|
||||||
cbuf *controlBuffer
|
cbuf *controlBuffer
|
||||||
sendQuota uint32
|
sendQuota uint32
|
||||||
oiws uint32 // outbound initial window size.
|
oiws uint32 // outbound initial window size.
|
||||||
estdStreams map[uint32]*outStream // Established streams.
|
// estdStreams is map of all established streams that are not cleaned-up yet.
|
||||||
activeStreams *outStreamList // Streams that are sending data.
|
// On client-side, this is all streams whose headers were sent out.
|
||||||
|
// On server-side, this is all streams whose headers were received.
|
||||||
|
estdStreams map[uint32]*outStream // Established streams.
|
||||||
|
// activeStreams is a linked-list of all streams that have data to send and some
|
||||||
|
// stream-level flow control quota.
|
||||||
|
// Each of these streams internally have a list of data items(and perhaps trailers
|
||||||
|
// on the server-side) to be sent out.
|
||||||
|
activeStreams *outStreamList
|
||||||
framer *framer
|
framer *framer
|
||||||
hBuf *bytes.Buffer // The buffer for HPACK encoding.
|
hBuf *bytes.Buffer // The buffer for HPACK encoding.
|
||||||
hEnc *hpack.Encoder // HPACK encoder.
|
hEnc *hpack.Encoder // HPACK encoder.
|
||||||
|
@ -361,44 +405,62 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
|
||||||
const minBatchSize = 1000
|
const minBatchSize = 1000
|
||||||
|
|
||||||
// run should be run in a separate goroutine.
|
// run should be run in a separate goroutine.
|
||||||
func (l *loopyWriter) run() {
|
// It reads control frames from controlBuf and processes them by:
|
||||||
var (
|
// 1. Updating loopy's internal state, or/and
|
||||||
it interface{}
|
// 2. Writing out HTTP2 frames on the wire.
|
||||||
err error
|
//
|
||||||
isEmpty bool
|
// Loopy keeps all active streams with data to send in a linked-list.
|
||||||
)
|
// All streams in the activeStreams linked-list must have both:
|
||||||
|
// 1. Data to send, and
|
||||||
|
// 2. Stream level flow control quota available.
|
||||||
|
//
|
||||||
|
// In each iteration of run loop, other than processing the incoming control
|
||||||
|
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
|
||||||
|
// This results in writing of HTTP2 frames into an underlying write buffer.
|
||||||
|
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
|
||||||
|
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||||||
|
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||||||
|
func (l *loopyWriter) run() (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
errorf("transport: loopyWriter.run returning. Err: %v", err)
|
if err == ErrConnClosing {
|
||||||
|
// Don't log ErrConnClosing as error since it happens
|
||||||
|
// 1. When the connection is closed by some other known issue.
|
||||||
|
// 2. User closed the connection.
|
||||||
|
// 3. A graceful close of connection.
|
||||||
|
infof("transport: loopyWriter.run returning. %v", err)
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
for {
|
for {
|
||||||
it, err = l.cbuf.get(true)
|
it, err := l.cbuf.get(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
if err = l.handle(it); err != nil {
|
if err = l.handle(it); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
if _, err = l.processData(); err != nil {
|
if _, err = l.processData(); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
gosched := true
|
gosched := true
|
||||||
hasdata:
|
hasdata:
|
||||||
for {
|
for {
|
||||||
it, err = l.cbuf.get(false)
|
it, err := l.cbuf.get(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
if it != nil {
|
if it != nil {
|
||||||
if err = l.handle(it); err != nil {
|
if err = l.handle(it); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
if _, err = l.processData(); err != nil {
|
if _, err = l.processData(); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
continue hasdata
|
continue hasdata
|
||||||
}
|
}
|
||||||
if isEmpty, err = l.processData(); err != nil {
|
isEmpty, err := l.processData()
|
||||||
return
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
if !isEmpty {
|
if !isEmpty {
|
||||||
continue hasdata
|
continue hasdata
|
||||||
|
@ -450,30 +512,39 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
||||||
return l.framer.fr.WriteSettingsAck()
|
return l.framer.fr.WriteSettingsAck()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||||
|
str := &outStream{
|
||||||
|
id: h.streamID,
|
||||||
|
state: empty,
|
||||||
|
itl: &itemList{},
|
||||||
|
wq: h.wq,
|
||||||
|
}
|
||||||
|
l.estdStreams[h.streamID] = str
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||||
if l.side == serverSide {
|
if l.side == serverSide {
|
||||||
if h.endStream { // Case 1.A: Server wants to close stream.
|
str, ok := l.estdStreams[h.streamID]
|
||||||
// Make sure it's not a trailers only response.
|
if !ok {
|
||||||
if str, ok := l.estdStreams[h.streamID]; ok {
|
warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||||
if str.state != empty { // either active or waiting on stream quota.
|
return nil
|
||||||
// add it str's list of items.
|
|
||||||
str.itl.enqueue(h)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return l.cleanupStreamHandler(h.cleanup)
|
|
||||||
}
|
}
|
||||||
// Case 1.B: Server is responding back with headers.
|
// Case 1.A: Server is responding back with headers.
|
||||||
str := &outStream{
|
if !h.endStream {
|
||||||
state: empty,
|
return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
|
||||||
itl: &itemList{},
|
|
||||||
wq: h.wq,
|
|
||||||
}
|
}
|
||||||
l.estdStreams[h.streamID] = str
|
// else: Case 1.B: Server wants to close stream.
|
||||||
return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
|
|
||||||
|
if str.state != empty { // either active or waiting on stream quota.
|
||||||
|
// add it str's list of items.
|
||||||
|
str.itl.enqueue(h)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return l.cleanupStreamHandler(h.cleanup)
|
||||||
}
|
}
|
||||||
// Case 2: Client wants to originate stream.
|
// Case 2: Client wants to originate stream.
|
||||||
str := &outStream{
|
str := &outStream{
|
||||||
|
@ -632,6 +703,8 @@ func (l *loopyWriter) handle(i interface{}) error {
|
||||||
return l.outgoingSettingsHandler(i)
|
return l.outgoingSettingsHandler(i)
|
||||||
case *headerFrame:
|
case *headerFrame:
|
||||||
return l.headerHandler(i)
|
return l.headerHandler(i)
|
||||||
|
case *registerStream:
|
||||||
|
return l.registerStreamHandler(i)
|
||||||
case *cleanupStream:
|
case *cleanupStream:
|
||||||
return l.cleanupStreamHandler(i)
|
return l.cleanupStreamHandler(i)
|
||||||
case *incomingGoAway:
|
case *incomingGoAway:
|
||||||
|
@ -664,26 +737,37 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case http2.SettingHeaderTableSize:
|
||||||
|
updateHeaderTblSize(l.hEnc, s.Val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// processData removes the first stream from active streams, writes out at most 16KB
|
||||||
|
// of its data and then puts it at the end of activeStreams if there's still more data
|
||||||
|
// to be sent and stream has some stream-level flow control.
|
||||||
func (l *loopyWriter) processData() (bool, error) {
|
func (l *loopyWriter) processData() (bool, error) {
|
||||||
if l.sendQuota == 0 {
|
if l.sendQuota == 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
str := l.activeStreams.dequeue()
|
str := l.activeStreams.dequeue() // Remove the first stream.
|
||||||
if str == nil {
|
if str == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
dataItem := str.itl.peek().(*dataFrame)
|
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
|
||||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 {
|
// A data item is represented by a dataFrame, since it later translates into
|
||||||
|
// multiple HTTP2 data frames.
|
||||||
|
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
|
||||||
|
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
|
||||||
|
// maximum possilbe HTTP2 frame size.
|
||||||
|
|
||||||
|
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
|
||||||
// Client sends out empty data frame with endStream = true
|
// Client sends out empty data frame with endStream = true
|
||||||
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
|
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
str.itl.dequeue()
|
str.itl.dequeue() // remove the empty data item from stream
|
||||||
if str.itl.isEmpty() {
|
if str.itl.isEmpty() {
|
||||||
str.state = empty
|
str.state = empty
|
||||||
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
|
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
|
||||||
|
@ -712,21 +796,20 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||||
if len(buf) < size {
|
if len(buf) < size {
|
||||||
size = len(buf)
|
size = len(buf)
|
||||||
}
|
}
|
||||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 {
|
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
|
||||||
str.state = waitingOnStreamQuota
|
str.state = waitingOnStreamQuota
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if strQuota < size {
|
} else if strQuota < size {
|
||||||
size = strQuota
|
size = strQuota
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.sendQuota < uint32(size) {
|
if l.sendQuota < uint32(size) { // connection-level flow control.
|
||||||
size = int(l.sendQuota)
|
size = int(l.sendQuota)
|
||||||
}
|
}
|
||||||
// Now that outgoing flow controls are checked we can replenish str's write quota
|
// Now that outgoing flow controls are checked we can replenish str's write quota
|
||||||
str.wq.replenish(size)
|
str.wq.replenish(size)
|
||||||
var endStream bool
|
var endStream bool
|
||||||
// This last data message on this stream and all
|
// If this is the last data message on this stream and all of it can be written in this iteration.
|
||||||
// of it can be written in this go.
|
|
||||||
if dataItem.endStream && size == len(buf) {
|
if dataItem.endStream && size == len(buf) {
|
||||||
// buf contains either data or it contains header but data is empty.
|
// buf contains either data or it contains header but data is empty.
|
||||||
if idx == 1 || len(dataItem.d) == 0 {
|
if idx == 1 || len(dataItem.d) == 0 {
|
|
@ -0,0 +1,49 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package transport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The default value of flow control window size in HTTP2 spec.
|
||||||
|
defaultWindowSize = 65535
|
||||||
|
// The initial window size for flow control.
|
||||||
|
initialWindowSize = defaultWindowSize // for an RPC
|
||||||
|
infinity = time.Duration(math.MaxInt64)
|
||||||
|
defaultClientKeepaliveTime = infinity
|
||||||
|
defaultClientKeepaliveTimeout = 20 * time.Second
|
||||||
|
defaultMaxStreamsClient = 100
|
||||||
|
defaultMaxConnectionIdle = infinity
|
||||||
|
defaultMaxConnectionAge = infinity
|
||||||
|
defaultMaxConnectionAgeGrace = infinity
|
||||||
|
defaultServerKeepaliveTime = 2 * time.Hour
|
||||||
|
defaultServerKeepaliveTimeout = 20 * time.Second
|
||||||
|
defaultKeepalivePolicyMinTime = 5 * time.Minute
|
||||||
|
// max window limit set by HTTP2 Specs.
|
||||||
|
maxWindowSize = math.MaxInt32
|
||||||
|
// defaultWriteQuota is the default value for number of data
|
||||||
|
// bytes that each stream can schedule before some of it being
|
||||||
|
// flushed out.
|
||||||
|
defaultWriteQuota = 64 * 1024
|
||||||
|
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||||||
|
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||||||
|
)
|
|
@ -23,30 +23,6 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// The default value of flow control window size in HTTP2 spec.
|
|
||||||
defaultWindowSize = 65535
|
|
||||||
// The initial window size for flow control.
|
|
||||||
initialWindowSize = defaultWindowSize // for an RPC
|
|
||||||
infinity = time.Duration(math.MaxInt64)
|
|
||||||
defaultClientKeepaliveTime = infinity
|
|
||||||
defaultClientKeepaliveTimeout = 20 * time.Second
|
|
||||||
defaultMaxStreamsClient = 100
|
|
||||||
defaultMaxConnectionIdle = infinity
|
|
||||||
defaultMaxConnectionAge = infinity
|
|
||||||
defaultMaxConnectionAgeGrace = infinity
|
|
||||||
defaultServerKeepaliveTime = 2 * time.Hour
|
|
||||||
defaultServerKeepaliveTimeout = 20 * time.Second
|
|
||||||
defaultKeepalivePolicyMinTime = 5 * time.Minute
|
|
||||||
// max window limit set by HTTP2 Specs.
|
|
||||||
maxWindowSize = math.MaxInt32
|
|
||||||
// defaultWriteQuota is the default value for number of data
|
|
||||||
// bytes that each stream can schedule before some of it being
|
|
||||||
// flushed out.
|
|
||||||
defaultWriteQuota = 64 * 1024
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// writeQuota is a soft limit on the amount of data a stream can
|
// writeQuota is a soft limit on the amount of data a stream can
|
||||||
|
@ -58,14 +34,20 @@ type writeQuota struct {
|
||||||
ch chan struct{}
|
ch chan struct{}
|
||||||
// done is triggered in error case.
|
// done is triggered in error case.
|
||||||
done <-chan struct{}
|
done <-chan struct{}
|
||||||
|
// replenish is called by loopyWriter to give quota back to.
|
||||||
|
// It is implemented as a field so that it can be updated
|
||||||
|
// by tests.
|
||||||
|
replenish func(n int)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
|
func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
|
||||||
return &writeQuota{
|
w := &writeQuota{
|
||||||
quota: sz,
|
quota: sz,
|
||||||
ch: make(chan struct{}, 1),
|
ch: make(chan struct{}, 1),
|
||||||
done: done,
|
done: done,
|
||||||
}
|
}
|
||||||
|
w.replenish = w.realReplenish
|
||||||
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writeQuota) get(sz int32) error {
|
func (w *writeQuota) get(sz int32) error {
|
||||||
|
@ -83,7 +65,7 @@ func (w *writeQuota) get(sz int32) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writeQuota) replenish(n int) {
|
func (w *writeQuota) realReplenish(n int) {
|
||||||
sz := int32(n)
|
sz := int32(n)
|
||||||
a := atomic.AddInt32(&w.quota, sz)
|
a := atomic.AddInt32(&w.quota, sz)
|
||||||
b := a - sz
|
b := a - sz
|
|
@ -24,6 +24,7 @@
|
||||||
package transport
|
package transport
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -34,7 +35,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
@ -63,9 +63,6 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||||
if _, ok := w.(http.Flusher); !ok {
|
if _, ok := w.(http.Flusher); !ok {
|
||||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||||||
}
|
}
|
||||||
if _, ok := w.(http.CloseNotifier); !ok {
|
|
||||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
|
|
||||||
}
|
|
||||||
|
|
||||||
st := &serverHandlerTransport{
|
st := &serverHandlerTransport{
|
||||||
rw: w,
|
rw: w,
|
||||||
|
@ -80,7 +77,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||||
to, err := decodeTimeout(v)
|
to, err := decodeTimeout(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err)
|
return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
|
||||||
}
|
}
|
||||||
st.timeoutSet = true
|
st.timeoutSet = true
|
||||||
st.timeout = to
|
st.timeout = to
|
||||||
|
@ -98,7 +95,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||||
for _, v := range vv {
|
for _, v := range vv {
|
||||||
v, err := decodeMetadataHeader(k, v)
|
v, err := decodeMetadataHeader(k, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err)
|
return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
|
||||||
}
|
}
|
||||||
metakv = append(metakv, k, v)
|
metakv = append(metakv, k, v)
|
||||||
}
|
}
|
||||||
|
@ -176,17 +173,11 @@ func (a strAddr) String() string { return string(a) }
|
||||||
|
|
||||||
// do runs fn in the ServeHTTP goroutine.
|
// do runs fn in the ServeHTTP goroutine.
|
||||||
func (ht *serverHandlerTransport) do(fn func()) error {
|
func (ht *serverHandlerTransport) do(fn func()) error {
|
||||||
// Avoid a panic writing to closed channel. Imperfect but maybe good enough.
|
|
||||||
select {
|
select {
|
||||||
case <-ht.closedCh:
|
case <-ht.closedCh:
|
||||||
return ErrConnClosing
|
return ErrConnClosing
|
||||||
default:
|
case ht.writes <- fn:
|
||||||
select {
|
return nil
|
||||||
case ht.writes <- fn:
|
|
||||||
return nil
|
|
||||||
case <-ht.closedCh:
|
|
||||||
return ErrConnClosing
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,9 +228,8 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||||
if ht.stats != nil {
|
if ht.stats != nil {
|
||||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||||
}
|
}
|
||||||
ht.Close()
|
|
||||||
close(ht.writes)
|
|
||||||
}
|
}
|
||||||
|
ht.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,9 +264,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts
|
||||||
ht.writeCommonHeaders(s)
|
ht.writeCommonHeaders(s)
|
||||||
ht.rw.Write(hdr)
|
ht.rw.Write(hdr)
|
||||||
ht.rw.Write(data)
|
ht.rw.Write(data)
|
||||||
if !opts.Delay {
|
ht.rw.(http.Flusher).Flush()
|
||||||
ht.rw.(http.Flusher).Flush()
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,7 +297,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
||||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||||
|
|
||||||
ctx := contextFromRequest(ht.req)
|
ctx := ht.req.Context()
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
if ht.timeoutSet {
|
if ht.timeoutSet {
|
||||||
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
|
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
|
||||||
|
@ -317,22 +305,16 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||||
ctx, cancel = context.WithCancel(ctx)
|
ctx, cancel = context.WithCancel(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestOver is closed when either the request's context is done
|
// requestOver is closed when the status has been written via WriteStatus.
|
||||||
// or the status has been written via WriteStatus.
|
|
||||||
requestOver := make(chan struct{})
|
requestOver := make(chan struct{})
|
||||||
|
|
||||||
// clientGone receives a single value if peer is gone, either
|
|
||||||
// because the underlying connection is dead or because the
|
|
||||||
// peer sends an http2 RST_STREAM.
|
|
||||||
clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
|
|
||||||
go func() {
|
go func() {
|
||||||
select {
|
select {
|
||||||
case <-requestOver:
|
case <-requestOver:
|
||||||
return
|
|
||||||
case <-ht.closedCh:
|
case <-ht.closedCh:
|
||||||
case <-clientGone:
|
case <-ht.req.Context().Done():
|
||||||
}
|
}
|
||||||
cancel()
|
cancel()
|
||||||
|
ht.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
req := ht.req
|
req := ht.req
|
||||||
|
@ -409,10 +391,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||||
func (ht *serverHandlerTransport) runStream() {
|
func (ht *serverHandlerTransport) runStream() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case fn, ok := <-ht.writes:
|
case fn := <-ht.writes:
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fn()
|
fn()
|
||||||
case <-ht.closedCh:
|
case <-ht.closedCh:
|
||||||
return
|
return
|
||||||
|
@ -434,18 +413,18 @@ func (ht *serverHandlerTransport) Drain() {
|
||||||
// * io.EOF
|
// * io.EOF
|
||||||
// * io.ErrUnexpectedEOF
|
// * io.ErrUnexpectedEOF
|
||||||
// * of type transport.ConnectionError
|
// * of type transport.ConnectionError
|
||||||
// * of type transport.StreamError
|
// * an error from the status package
|
||||||
func mapRecvMsgError(err error) error {
|
func mapRecvMsgError(err error) error {
|
||||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if se, ok := err.(http2.StreamError); ok {
|
if se, ok := err.(http2.StreamError); ok {
|
||||||
if code, ok := http2ErrConvTab[se.Code]; ok {
|
if code, ok := http2ErrConvTab[se.Code]; ok {
|
||||||
return StreamError{
|
return status.Error(code, se.Error())
|
||||||
Code: code,
|
|
||||||
Desc: se.Error(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if strings.Contains(err.Error(), "body closed by handler") {
|
||||||
|
return status.Error(codes.Canceled, err.Error())
|
||||||
|
}
|
||||||
return connectionErrorf(true, err, err.Error())
|
return connectionErrorf(true, err, err.Error())
|
||||||
}
|
}
|
|
@ -19,21 +19,24 @@
|
||||||
package transport
|
package transport
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/net/http2/hpack"
|
"golang.org/x/net/http2/hpack"
|
||||||
|
|
||||||
"google.golang.org/grpc/channelz"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/internal/channelz"
|
||||||
|
"google.golang.org/grpc/internal/syscall"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/peer"
|
"google.golang.org/grpc/peer"
|
||||||
|
@ -72,22 +75,26 @@ type http2Client struct {
|
||||||
|
|
||||||
isSecure bool
|
isSecure bool
|
||||||
|
|
||||||
creds []credentials.PerRPCCredentials
|
perRPCCreds []credentials.PerRPCCredentials
|
||||||
|
|
||||||
// Boolean to keep track of reading activity on transport.
|
// Boolean to keep track of reading activity on transport.
|
||||||
// 1 is true and 0 is false.
|
// 1 is true and 0 is false.
|
||||||
activity uint32 // Accessed atomically.
|
activity uint32 // Accessed atomically.
|
||||||
kp keepalive.ClientParameters
|
kp keepalive.ClientParameters
|
||||||
|
keepaliveEnabled bool
|
||||||
|
|
||||||
statsHandler stats.Handler
|
statsHandler stats.Handler
|
||||||
|
|
||||||
initialWindowSize int32
|
initialWindowSize int32
|
||||||
|
|
||||||
|
// configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
|
||||||
|
maxSendHeaderListSize *uint32
|
||||||
|
|
||||||
bdpEst *bdpEstimator
|
bdpEst *bdpEstimator
|
||||||
// onSuccess is a callback that client transport calls upon
|
// onPrefaceReceipt is a callback that client transport calls upon
|
||||||
// receiving server preface to signal that a succefull HTTP2
|
// receiving server preface to signal that a succefull HTTP2
|
||||||
// connection was established.
|
// connection was established.
|
||||||
onSuccess func()
|
onPrefaceReceipt func()
|
||||||
|
|
||||||
maxConcurrentStreams uint32
|
maxConcurrentStreams uint32
|
||||||
streamQuota int64
|
streamQuota int64
|
||||||
|
@ -106,26 +113,17 @@ type http2Client struct {
|
||||||
|
|
||||||
// Fields below are for channelz metric collection.
|
// Fields below are for channelz metric collection.
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID int64 // channelz unique identification number
|
||||||
czmu sync.RWMutex
|
czData *channelzData
|
||||||
kpCount int64
|
|
||||||
// The number of streams that have started, including already finished ones.
|
onGoAway func(GoAwayReason)
|
||||||
streamsStarted int64
|
onClose func()
|
||||||
// The number of streams that have ended successfully by receiving EoS bit set
|
|
||||||
// frame from server.
|
|
||||||
streamsSucceeded int64
|
|
||||||
streamsFailed int64
|
|
||||||
lastStreamCreated time.Time
|
|
||||||
msgSent int64
|
|
||||||
msgRecv int64
|
|
||||||
lastMsgSent time.Time
|
|
||||||
lastMsgRecv time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
|
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
|
||||||
if fn != nil {
|
if fn != nil {
|
||||||
return fn(ctx, addr)
|
return fn(ctx, addr)
|
||||||
}
|
}
|
||||||
return dialContext(ctx, "tcp", addr)
|
return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isTemporary(err error) bool {
|
func isTemporary(err error) bool {
|
||||||
|
@ -147,7 +145,7 @@ func isTemporary(err error) bool {
|
||||||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||||
// and starts to receive messages on it. Non-nil error returns if construction
|
// and starts to receive messages on it. Non-nil error returns if construction
|
||||||
// fails.
|
// fails.
|
||||||
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) {
|
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||||
scheme := "http"
|
scheme := "http"
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -169,18 +167,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||||
conn.Close()
|
conn.Close()
|
||||||
}
|
}
|
||||||
}(conn)
|
}(conn)
|
||||||
var (
|
|
||||||
isSecure bool
|
|
||||||
authInfo credentials.AuthInfo
|
|
||||||
)
|
|
||||||
if creds := opts.TransportCredentials; creds != nil {
|
|
||||||
scheme = "https"
|
|
||||||
conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
|
|
||||||
}
|
|
||||||
isSecure = true
|
|
||||||
}
|
|
||||||
kp := opts.KeepaliveParams
|
kp := opts.KeepaliveParams
|
||||||
// Validate keepalive parameters.
|
// Validate keepalive parameters.
|
||||||
if kp.Time == 0 {
|
if kp.Time == 0 {
|
||||||
|
@ -189,19 +175,47 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||||
if kp.Timeout == 0 {
|
if kp.Timeout == 0 {
|
||||||
kp.Timeout = defaultClientKeepaliveTimeout
|
kp.Timeout = defaultClientKeepaliveTimeout
|
||||||
}
|
}
|
||||||
|
keepaliveEnabled := false
|
||||||
|
if kp.Time != infinity {
|
||||||
|
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
||||||
|
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||||
|
}
|
||||||
|
keepaliveEnabled = true
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
isSecure bool
|
||||||
|
authInfo credentials.AuthInfo
|
||||||
|
)
|
||||||
|
transportCreds := opts.TransportCredentials
|
||||||
|
perRPCCreds := opts.PerRPCCredentials
|
||||||
|
|
||||||
|
if b := opts.CredsBundle; b != nil {
|
||||||
|
if t := b.TransportCredentials(); t != nil {
|
||||||
|
transportCreds = t
|
||||||
|
}
|
||||||
|
if t := b.PerRPCCredentials(); t != nil {
|
||||||
|
perRPCCreds = append(perRPCCreds, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if transportCreds != nil {
|
||||||
|
scheme = "https"
|
||||||
|
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
|
||||||
|
}
|
||||||
|
isSecure = true
|
||||||
|
}
|
||||||
dynamicWindow := true
|
dynamicWindow := true
|
||||||
icwz := int32(initialWindowSize)
|
icwz := int32(initialWindowSize)
|
||||||
if opts.InitialConnWindowSize >= defaultWindowSize {
|
if opts.InitialConnWindowSize >= defaultWindowSize {
|
||||||
icwz = opts.InitialConnWindowSize
|
icwz = opts.InitialConnWindowSize
|
||||||
dynamicWindow = false
|
dynamicWindow = false
|
||||||
}
|
}
|
||||||
writeBufSize := defaultWriteBufSize
|
writeBufSize := opts.WriteBufferSize
|
||||||
if opts.WriteBufferSize > 0 {
|
readBufSize := opts.ReadBufferSize
|
||||||
writeBufSize = opts.WriteBufferSize
|
maxHeaderListSize := defaultClientMaxHeaderListSize
|
||||||
}
|
if opts.MaxHeaderListSize != nil {
|
||||||
readBufSize := defaultReadBufSize
|
maxHeaderListSize = *opts.MaxHeaderListSize
|
||||||
if opts.ReadBufferSize > 0 {
|
|
||||||
readBufSize = opts.ReadBufferSize
|
|
||||||
}
|
}
|
||||||
t := &http2Client{
|
t := &http2Client{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
|
@ -217,20 +231,24 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||||
writerDone: make(chan struct{}),
|
writerDone: make(chan struct{}),
|
||||||
goAway: make(chan struct{}),
|
goAway: make(chan struct{}),
|
||||||
awakenKeepalive: make(chan struct{}, 1),
|
awakenKeepalive: make(chan struct{}, 1),
|
||||||
framer: newFramer(conn, writeBufSize, readBufSize),
|
framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
|
||||||
fc: &trInFlow{limit: uint32(icwz)},
|
fc: &trInFlow{limit: uint32(icwz)},
|
||||||
scheme: scheme,
|
scheme: scheme,
|
||||||
activeStreams: make(map[uint32]*Stream),
|
activeStreams: make(map[uint32]*Stream),
|
||||||
isSecure: isSecure,
|
isSecure: isSecure,
|
||||||
creds: opts.PerRPCCredentials,
|
perRPCCreds: perRPCCreds,
|
||||||
kp: kp,
|
kp: kp,
|
||||||
statsHandler: opts.StatsHandler,
|
statsHandler: opts.StatsHandler,
|
||||||
initialWindowSize: initialWindowSize,
|
initialWindowSize: initialWindowSize,
|
||||||
onSuccess: onSuccess,
|
onPrefaceReceipt: onPrefaceReceipt,
|
||||||
nextID: 1,
|
nextID: 1,
|
||||||
maxConcurrentStreams: defaultMaxStreamsClient,
|
maxConcurrentStreams: defaultMaxStreamsClient,
|
||||||
streamQuota: defaultMaxStreamsClient,
|
streamQuota: defaultMaxStreamsClient,
|
||||||
streamsQuotaAvailable: make(chan struct{}, 1),
|
streamsQuotaAvailable: make(chan struct{}, 1),
|
||||||
|
czData: new(channelzData),
|
||||||
|
onGoAway: onGoAway,
|
||||||
|
onClose: onClose,
|
||||||
|
keepaliveEnabled: keepaliveEnabled,
|
||||||
}
|
}
|
||||||
t.controlBuf = newControlBuffer(t.ctxDone)
|
t.controlBuf = newControlBuffer(t.ctxDone)
|
||||||
if opts.InitialWindowSize >= defaultWindowSize {
|
if opts.InitialWindowSize >= defaultWindowSize {
|
||||||
|
@ -257,12 +275,16 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||||
t.statsHandler.HandleConn(t.ctx, connBegin)
|
t.statsHandler.HandleConn(t.ctx, connBegin)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "")
|
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||||
|
}
|
||||||
|
if t.keepaliveEnabled {
|
||||||
|
go t.keepalive()
|
||||||
}
|
}
|
||||||
// Start the reader goroutine for incoming message. Each transport has
|
// Start the reader goroutine for incoming message. Each transport has
|
||||||
// a dedicated goroutine which reads HTTP2 frame from network. Then it
|
// a dedicated goroutine which reads HTTP2 frame from network. Then it
|
||||||
// dispatches the frame to the corresponding stream entity.
|
// dispatches the frame to the corresponding stream entity.
|
||||||
go t.reader()
|
go t.reader()
|
||||||
|
|
||||||
// Send connection preface to server.
|
// Send connection preface to server.
|
||||||
n, err := t.conn.Write(clientPreface)
|
n, err := t.conn.Write(clientPreface)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -273,14 +295,21 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||||
t.Close()
|
t.Close()
|
||||||
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||||||
}
|
}
|
||||||
|
var ss []http2.Setting
|
||||||
|
|
||||||
if t.initialWindowSize != defaultWindowSize {
|
if t.initialWindowSize != defaultWindowSize {
|
||||||
err = t.framer.fr.WriteSettings(http2.Setting{
|
ss = append(ss, http2.Setting{
|
||||||
ID: http2.SettingInitialWindowSize,
|
ID: http2.SettingInitialWindowSize,
|
||||||
Val: uint32(t.initialWindowSize),
|
Val: uint32(t.initialWindowSize),
|
||||||
})
|
})
|
||||||
} else {
|
|
||||||
err = t.framer.fr.WriteSettings()
|
|
||||||
}
|
}
|
||||||
|
if opts.MaxHeaderListSize != nil {
|
||||||
|
ss = append(ss, http2.Setting{
|
||||||
|
ID: http2.SettingMaxHeaderListSize,
|
||||||
|
Val: *opts.MaxHeaderListSize,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
err = t.framer.fr.WriteSettings(ss...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Close()
|
t.Close()
|
||||||
return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
|
return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
|
||||||
|
@ -292,16 +321,23 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||||
return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
|
return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.framer.writer.Flush()
|
|
||||||
|
if err := t.framer.writer.Flush(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
go func() {
|
go func() {
|
||||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
||||||
t.loopy.run()
|
err := t.loopy.run()
|
||||||
t.conn.Close()
|
if err != nil {
|
||||||
|
errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||||
|
}
|
||||||
|
// If it's a connection error, let reader goroutine handle it
|
||||||
|
// since there might be data in the buffers.
|
||||||
|
if _, ok := err.(net.Error); !ok {
|
||||||
|
t.conn.Close()
|
||||||
|
}
|
||||||
close(t.writerDone)
|
close(t.writerDone)
|
||||||
}()
|
}()
|
||||||
if t.kp.Time != infinity {
|
|
||||||
go t.keepalive()
|
|
||||||
}
|
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,6 +364,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||||
ctx: s.ctx,
|
ctx: s.ctx,
|
||||||
ctxDone: s.ctx.Done(),
|
ctxDone: s.ctx.Done(),
|
||||||
recv: s.buf,
|
recv: s.buf,
|
||||||
|
closeStream: func(err error) {
|
||||||
|
t.CloseStream(s, err)
|
||||||
|
},
|
||||||
},
|
},
|
||||||
windowHandler: func(n int) {
|
windowHandler: func(n int) {
|
||||||
t.updateWindow(s, uint32(n))
|
t.updateWindow(s, uint32(n))
|
||||||
|
@ -370,6 +409,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
|
||||||
|
if callHdr.PreviousAttempts > 0 {
|
||||||
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
|
||||||
|
}
|
||||||
|
|
||||||
if callHdr.SendCompress != "" {
|
if callHdr.SendCompress != "" {
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||||
|
@ -377,7 +419,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||||
if dl, ok := ctx.Deadline(); ok {
|
if dl, ok := ctx.Deadline(); ok {
|
||||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||||
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
|
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
|
||||||
timeout := dl.Sub(time.Now())
|
timeout := time.Until(dl)
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
|
||||||
}
|
}
|
||||||
for k, v := range authData {
|
for k, v := range authData {
|
||||||
|
@ -433,7 +475,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||||
|
|
||||||
func (t *http2Client) createAudience(callHdr *CallHdr) string {
|
func (t *http2Client) createAudience(callHdr *CallHdr) string {
|
||||||
// Create an audience string only if needed.
|
// Create an audience string only if needed.
|
||||||
if len(t.creds) == 0 && callHdr.Creds == nil {
|
if len(t.perRPCCreds) == 0 && callHdr.Creds == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
// Construct URI required to get auth request metadata.
|
// Construct URI required to get auth request metadata.
|
||||||
|
@ -448,14 +490,14 @@ func (t *http2Client) createAudience(callHdr *CallHdr) string {
|
||||||
|
|
||||||
func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
|
func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
|
||||||
authData := map[string]string{}
|
authData := map[string]string{}
|
||||||
for _, c := range t.creds {
|
for _, c := range t.perRPCCreds {
|
||||||
data, err := c.GetRequestMetadata(ctx, audience)
|
data, err := c.GetRequestMetadata(ctx, audience)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := status.FromError(err); ok {
|
if _, ok := status.FromError(err); ok {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, streamErrorf(codes.Unauthenticated, "transport: %v", err)
|
return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
|
||||||
}
|
}
|
||||||
for k, v := range data {
|
for k, v := range data {
|
||||||
// Capital header names are illegal in HTTP/2.
|
// Capital header names are illegal in HTTP/2.
|
||||||
|
@ -473,11 +515,11 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||||
// options, then both sets of credentials will be applied.
|
// options, then both sets of credentials will be applied.
|
||||||
if callCreds := callHdr.Creds; callCreds != nil {
|
if callCreds := callHdr.Creds; callCreds != nil {
|
||||||
if !t.isSecure && callCreds.RequireTransportSecurity() {
|
if !t.isSecure && callCreds.RequireTransportSecurity() {
|
||||||
return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
|
return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
|
||||||
}
|
}
|
||||||
data, err := callCreds.GetRequestMetadata(ctx, audience)
|
data, err := callCreds.GetRequestMetadata(ctx, audience)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, streamErrorf(codes.Internal, "transport: %v", err)
|
return nil, status.Errorf(codes.Internal, "transport: %v", err)
|
||||||
}
|
}
|
||||||
for k, v := range data {
|
for k, v := range data {
|
||||||
// Capital header names are illegal in HTTP/2
|
// Capital header names are illegal in HTTP/2
|
||||||
|
@ -529,15 +571,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||||
}
|
}
|
||||||
t.activeStreams[id] = s
|
t.activeStreams[id] = s
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.czmu.Lock()
|
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||||
t.streamsStarted++
|
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
|
||||||
t.lastStreamCreated = time.Now()
|
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
var sendPing bool
|
var sendPing bool
|
||||||
// If the number of active streams change from 0 to 1, then check if keepalive
|
// If the number of active streams change from 0 to 1, then check if keepalive
|
||||||
// has gone dormant. If so, wake it up.
|
// has gone dormant. If so, wake it up.
|
||||||
if len(t.activeStreams) == 1 {
|
if len(t.activeStreams) == 1 && t.keepaliveEnabled {
|
||||||
select {
|
select {
|
||||||
case t.awakenKeepalive <- struct{}{}:
|
case t.awakenKeepalive <- struct{}{}:
|
||||||
sendPing = true
|
sendPing = true
|
||||||
|
@ -581,14 +621,40 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
var hdrListSizeErr error
|
||||||
|
checkForHeaderListSize := func(it interface{}) bool {
|
||||||
|
if t.maxSendHeaderListSize == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
hdrFrame := it.(*headerFrame)
|
||||||
|
var sz int64
|
||||||
|
for _, f := range hdrFrame.hf {
|
||||||
|
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||||
|
hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
for {
|
for {
|
||||||
success, err := t.controlBuf.executeAndPut(checkForStreamQuota, hdr)
|
success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
|
||||||
|
if !checkForStreamQuota(it) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !checkForHeaderListSize(it) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}, hdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if success {
|
if success {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if hdrListSizeErr != nil {
|
||||||
|
return nil, hdrListSizeErr
|
||||||
|
}
|
||||||
firstTry = false
|
firstTry = false
|
||||||
select {
|
select {
|
||||||
case <-ch:
|
case <-ch:
|
||||||
|
@ -624,13 +690,15 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
|
||||||
rst = true
|
rst = true
|
||||||
rstCode = http2.ErrCodeCancel
|
rstCode = http2.ErrCodeCancel
|
||||||
}
|
}
|
||||||
t.closeStream(s, err, rst, rstCode, nil, nil, false)
|
t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
|
func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
|
||||||
// Set stream status to done.
|
// Set stream status to done.
|
||||||
if s.swapState(streamDone) == streamDone {
|
if s.swapState(streamDone) == streamDone {
|
||||||
// If it was already done, return.
|
// If it was already done, return. If multiple closeStream calls
|
||||||
|
// happen simultaneously, wait for the first to finish.
|
||||||
|
<-s.done
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// status and trailers can be updated here without any synchronization because the stream goroutine will
|
// status and trailers can be updated here without any synchronization because the stream goroutine will
|
||||||
|
@ -644,10 +712,9 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||||
// This will unblock reads eventually.
|
// This will unblock reads eventually.
|
||||||
s.write(recvMsg{err: err})
|
s.write(recvMsg{err: err})
|
||||||
}
|
}
|
||||||
// This will unblock write.
|
|
||||||
close(s.done)
|
|
||||||
// If headerChan isn't closed, then close it.
|
// If headerChan isn't closed, then close it.
|
||||||
if atomic.SwapUint32(&s.headerDone, 1) == 0 {
|
if atomic.SwapUint32(&s.headerDone, 1) == 0 {
|
||||||
|
s.noHeaders = true
|
||||||
close(s.headerChan)
|
close(s.headerChan)
|
||||||
}
|
}
|
||||||
cleanup := &cleanupStream{
|
cleanup := &cleanupStream{
|
||||||
|
@ -659,13 +726,11 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||||
}
|
}
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.czmu.Lock()
|
|
||||||
if eosReceived {
|
if eosReceived {
|
||||||
t.streamsSucceeded++
|
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
|
||||||
} else {
|
} else {
|
||||||
t.streamsFailed++
|
atomic.AddInt64(&t.czData.streamsFailed, 1)
|
||||||
}
|
}
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
rst: rst,
|
rst: rst,
|
||||||
|
@ -682,11 +747,17 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
|
t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
|
||||||
|
// This will unblock write.
|
||||||
|
close(s.done)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close kicks off the shutdown process of the transport. This should be called
|
// Close kicks off the shutdown process of the transport. This should be called
|
||||||
// only once on a transport. Once it is called, the transport should not be
|
// only once on a transport. Once it is called, the transport should not be
|
||||||
// accessed any more.
|
// accessed any more.
|
||||||
|
//
|
||||||
|
// This method blocks until the addrConn that initiated this transport is
|
||||||
|
// re-connected. This happens because t.onClose() begins reconnect logic at the
|
||||||
|
// addrConn level and blocks until the addrConn is successfully connected.
|
||||||
func (t *http2Client) Close() error {
|
func (t *http2Client) Close() error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
// Make sure we only Close once.
|
// Make sure we only Close once.
|
||||||
|
@ -706,7 +777,7 @@ func (t *http2Client) Close() error {
|
||||||
}
|
}
|
||||||
// Notify all active streams.
|
// Notify all active streams.
|
||||||
for _, s := range streams {
|
for _, s := range streams {
|
||||||
t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, nil, nil, false)
|
t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
|
||||||
}
|
}
|
||||||
if t.statsHandler != nil {
|
if t.statsHandler != nil {
|
||||||
connEnd := &stats.ConnEnd{
|
connEnd := &stats.ConnEnd{
|
||||||
|
@ -714,6 +785,7 @@ func (t *http2Client) Close() error {
|
||||||
}
|
}
|
||||||
t.statsHandler.HandleConn(t.ctx, connEnd)
|
t.statsHandler.HandleConn(t.ctx, connEnd)
|
||||||
}
|
}
|
||||||
|
t.onClose()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -735,6 +807,7 @@ func (t *http2Client) GracefulClose() error {
|
||||||
if active == 0 {
|
if active == 0 {
|
||||||
return t.Close()
|
return t.Close()
|
||||||
}
|
}
|
||||||
|
t.controlBuf.put(&incomingGoAway{})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -899,6 +972,13 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||||
warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||||
statusCode = codes.Unknown
|
statusCode = codes.Unknown
|
||||||
}
|
}
|
||||||
|
if statusCode == codes.Canceled {
|
||||||
|
// Our deadline was already exceeded, and that was likely the cause of
|
||||||
|
// this cancelation. Alter the status code accordingly.
|
||||||
|
if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
|
||||||
|
statusCode = codes.DeadlineExceeded
|
||||||
|
}
|
||||||
|
}
|
||||||
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
|
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -908,13 +988,20 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
|
||||||
}
|
}
|
||||||
var maxStreams *uint32
|
var maxStreams *uint32
|
||||||
var ss []http2.Setting
|
var ss []http2.Setting
|
||||||
|
var updateFuncs []func()
|
||||||
f.ForeachSetting(func(s http2.Setting) error {
|
f.ForeachSetting(func(s http2.Setting) error {
|
||||||
if s.ID == http2.SettingMaxConcurrentStreams {
|
switch s.ID {
|
||||||
|
case http2.SettingMaxConcurrentStreams:
|
||||||
maxStreams = new(uint32)
|
maxStreams = new(uint32)
|
||||||
*maxStreams = s.Val
|
*maxStreams = s.Val
|
||||||
return nil
|
case http2.SettingMaxHeaderListSize:
|
||||||
|
updateFuncs = append(updateFuncs, func() {
|
||||||
|
t.maxSendHeaderListSize = new(uint32)
|
||||||
|
*t.maxSendHeaderListSize = s.Val
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
ss = append(ss, s)
|
||||||
}
|
}
|
||||||
ss = append(ss, s)
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if isFirst && maxStreams == nil {
|
if isFirst && maxStreams == nil {
|
||||||
|
@ -924,21 +1011,24 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
|
||||||
sf := &incomingSettings{
|
sf := &incomingSettings{
|
||||||
ss: ss,
|
ss: ss,
|
||||||
}
|
}
|
||||||
if maxStreams == nil {
|
if maxStreams != nil {
|
||||||
t.controlBuf.put(sf)
|
updateStreamQuota := func() {
|
||||||
return
|
delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
|
||||||
|
t.maxConcurrentStreams = *maxStreams
|
||||||
|
t.streamQuota += delta
|
||||||
|
if delta > 0 && t.waitingStreams > 0 {
|
||||||
|
close(t.streamsQuotaAvailable) // wake all of them up.
|
||||||
|
t.streamsQuotaAvailable = make(chan struct{}, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
updateFuncs = append(updateFuncs, updateStreamQuota)
|
||||||
}
|
}
|
||||||
updateStreamQuota := func(interface{}) bool {
|
t.controlBuf.executeAndPut(func(interface{}) bool {
|
||||||
delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
|
for _, f := range updateFuncs {
|
||||||
t.maxConcurrentStreams = *maxStreams
|
f()
|
||||||
t.streamQuota += delta
|
|
||||||
if delta > 0 && t.waitingStreams > 0 {
|
|
||||||
close(t.streamsQuotaAvailable) // wake all of them up.
|
|
||||||
t.streamsQuotaAvailable = make(chan struct{}, 1)
|
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}, sf)
|
||||||
t.controlBuf.executeAndPut(updateStreamQuota, sf)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Client) handlePing(f *http2.PingFrame) {
|
func (t *http2Client) handlePing(f *http2.PingFrame) {
|
||||||
|
@ -992,6 +1082,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||||
close(t.goAway)
|
close(t.goAway)
|
||||||
t.state = draining
|
t.state = draining
|
||||||
t.controlBuf.put(&incomingGoAway{})
|
t.controlBuf.put(&incomingGoAway{})
|
||||||
|
|
||||||
|
// This has to be a new goroutine because we're still using the current goroutine to read in the transport.
|
||||||
|
t.onGoAway(t.goAwayReason)
|
||||||
}
|
}
|
||||||
// All streams with IDs greater than the GoAwayId
|
// All streams with IDs greater than the GoAwayId
|
||||||
// and smaller than the previous GoAway ID should be killed.
|
// and smaller than the previous GoAway ID should be killed.
|
||||||
|
@ -1047,15 +1140,27 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
endStream := frame.StreamEnded()
|
||||||
atomic.StoreUint32(&s.bytesReceived, 1)
|
atomic.StoreUint32(&s.bytesReceived, 1)
|
||||||
var state decodeState
|
initialHeader := atomic.SwapUint32(&s.headerDone, 1) == 0
|
||||||
if err := state.decodeResponseHeader(frame); err != nil {
|
|
||||||
t.closeStream(s, err, true, http2.ErrCodeProtocol, nil, nil, false)
|
if !initialHeader && !endStream {
|
||||||
// Something wrong. Stops reading even when there is remaining.
|
// As specified by RFC 7540, a HEADERS frame (and associated CONTINUATION frames) can only appear
|
||||||
|
// at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
|
||||||
|
st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream")
|
||||||
|
t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
state := &decodeState{}
|
||||||
|
// Initialize isGRPC value to be !initialHeader, since if a gRPC ResponseHeader has been received
|
||||||
|
// which indicates peer speaking gRPC, we are in gRPC mode.
|
||||||
|
state.data.isGRPC = !initialHeader
|
||||||
|
if err := state.decodeHeader(frame); err != nil {
|
||||||
|
t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
endStream := frame.StreamEnded()
|
|
||||||
var isHeader bool
|
var isHeader bool
|
||||||
defer func() {
|
defer func() {
|
||||||
if t.statsHandler != nil {
|
if t.statsHandler != nil {
|
||||||
|
@ -1074,25 +1179,30 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// If headers haven't been received yet.
|
// If headers haven't been received yet.
|
||||||
if atomic.SwapUint32(&s.headerDone, 1) == 0 {
|
if initialHeader {
|
||||||
if !endStream {
|
if !endStream {
|
||||||
// Headers frame is not actually a trailers-only frame.
|
// Headers frame is ResponseHeader.
|
||||||
isHeader = true
|
isHeader = true
|
||||||
// These values can be set without any synchronization because
|
// These values can be set without any synchronization because
|
||||||
// stream goroutine will read it only after seeing a closed
|
// stream goroutine will read it only after seeing a closed
|
||||||
// headerChan which we'll close after setting this.
|
// headerChan which we'll close after setting this.
|
||||||
s.recvCompress = state.encoding
|
s.recvCompress = state.data.encoding
|
||||||
if len(state.mdata) > 0 {
|
if len(state.data.mdata) > 0 {
|
||||||
s.header = state.mdata
|
s.header = state.data.mdata
|
||||||
}
|
}
|
||||||
|
close(s.headerChan)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
// Headers frame is Trailers-only.
|
||||||
|
s.noHeaders = true
|
||||||
close(s.headerChan)
|
close(s.headerChan)
|
||||||
}
|
}
|
||||||
if !endStream {
|
|
||||||
return
|
// if client received END_STREAM from server while stream was still active, send RST_STREAM
|
||||||
}
|
rst := s.getState() == streamActive
|
||||||
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true)
|
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// reader runs as a separate goroutine in charge of reading data from network
|
// reader runs as a separate goroutine in charge of reading data from network
|
||||||
|
@ -1106,22 +1216,27 @@ func (t *http2Client) reader() {
|
||||||
// Check the validity of server preface.
|
// Check the validity of server preface.
|
||||||
frame, err := t.framer.fr.ReadFrame()
|
frame, err := t.framer.fr.ReadFrame()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Close()
|
t.Close() // this kicks off resetTransport, so must be last before return
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
||||||
|
if t.keepaliveEnabled {
|
||||||
|
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||||
|
}
|
||||||
sf, ok := frame.(*http2.SettingsFrame)
|
sf, ok := frame.(*http2.SettingsFrame)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Close()
|
t.Close() // this kicks off resetTransport, so must be last before return
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t.onSuccess()
|
t.onPrefaceReceipt()
|
||||||
t.handleSettings(sf, true)
|
t.handleSettings(sf, true)
|
||||||
|
|
||||||
// loop to keep reading incoming messages on this transport.
|
// loop to keep reading incoming messages on this transport.
|
||||||
for {
|
for {
|
||||||
frame, err := t.framer.fr.ReadFrame()
|
frame, err := t.framer.fr.ReadFrame()
|
||||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
if t.keepaliveEnabled {
|
||||||
|
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Abort an active stream if the http2.Framer returns a
|
// Abort an active stream if the http2.Framer returns a
|
||||||
// http2.StreamError. This can happen only if the server's response
|
// http2.StreamError. This can happen only if the server's response
|
||||||
|
@ -1132,7 +1247,9 @@ func (t *http2Client) reader() {
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
if s != nil {
|
if s != nil {
|
||||||
// use error detail to provide better err message
|
// use error detail to provide better err message
|
||||||
t.closeStream(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()), true, http2.ErrCodeProtocol, nil, nil, false)
|
code := http2ErrConvTab[se.Code]
|
||||||
|
msg := t.framer.fr.ErrorDetail().Error()
|
||||||
|
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
|
@ -1189,9 +1306,7 @@ func (t *http2Client) keepalive() {
|
||||||
} else {
|
} else {
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.czmu.Lock()
|
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||||
t.kpCount++
|
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
// Send ping.
|
// Send ping.
|
||||||
t.controlBuf.put(p)
|
t.controlBuf.put(p)
|
||||||
|
@ -1231,41 +1346,39 @@ func (t *http2Client) GoAway() <-chan struct{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
|
func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||||
t.czmu.RLock()
|
|
||||||
s := channelz.SocketInternalMetric{
|
s := channelz.SocketInternalMetric{
|
||||||
StreamsStarted: t.streamsStarted,
|
StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
|
||||||
StreamsSucceeded: t.streamsSucceeded,
|
StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
|
||||||
StreamsFailed: t.streamsFailed,
|
StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
|
||||||
MessagesSent: t.msgSent,
|
MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
|
||||||
MessagesReceived: t.msgRecv,
|
MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
|
||||||
KeepAlivesSent: t.kpCount,
|
KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
|
||||||
LastLocalStreamCreatedTimestamp: t.lastStreamCreated,
|
LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
|
||||||
LastMessageSentTimestamp: t.lastMsgSent,
|
LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
|
||||||
LastMessageReceivedTimestamp: t.lastMsgRecv,
|
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
|
||||||
LocalFlowControlWindow: int64(t.fc.getSize()),
|
LocalFlowControlWindow: int64(t.fc.getSize()),
|
||||||
//socket options
|
SocketOptions: channelz.GetSocketOption(t.conn),
|
||||||
LocalAddr: t.localAddr,
|
LocalAddr: t.localAddr,
|
||||||
RemoteAddr: t.remoteAddr,
|
RemoteAddr: t.remoteAddr,
|
||||||
// Security
|
|
||||||
// RemoteName :
|
// RemoteName :
|
||||||
}
|
}
|
||||||
t.czmu.RUnlock()
|
if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
|
||||||
|
s.Security = au.GetSecurityValue()
|
||||||
|
}
|
||||||
s.RemoteFlowControlWindow = t.getOutFlowWindow()
|
s.RemoteFlowControlWindow = t.getOutFlowWindow()
|
||||||
return &s
|
return &s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
|
||||||
|
|
||||||
func (t *http2Client) IncrMsgSent() {
|
func (t *http2Client) IncrMsgSent() {
|
||||||
t.czmu.Lock()
|
atomic.AddInt64(&t.czData.msgSent, 1)
|
||||||
t.msgSent++
|
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
|
||||||
t.lastMsgSent = time.Now()
|
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Client) IncrMsgRecv() {
|
func (t *http2Client) IncrMsgRecv() {
|
||||||
t.czmu.Lock()
|
atomic.AddInt64(&t.czData.msgRecv, 1)
|
||||||
t.msgRecv++
|
atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
|
||||||
t.lastMsgRecv = time.Now()
|
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Client) getOutFlowWindow() int64 {
|
func (t *http2Client) getOutFlowWindow() int64 {
|
|
@ -20,11 +20,11 @@ package transport
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -32,13 +32,14 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/net/http2/hpack"
|
"golang.org/x/net/http2/hpack"
|
||||||
|
|
||||||
"google.golang.org/grpc/channelz"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/channelz"
|
||||||
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/peer"
|
"google.golang.org/grpc/peer"
|
||||||
|
@ -47,9 +48,14 @@ import (
|
||||||
"google.golang.org/grpc/tap"
|
"google.golang.org/grpc/tap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
var (
|
||||||
// the stream's state.
|
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||||
var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
// the stream's state.
|
||||||
|
ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
||||||
|
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
|
||||||
|
// than the limit set by peer.
|
||||||
|
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
|
||||||
|
)
|
||||||
|
|
||||||
// http2Server implements the ServerTransport interface with HTTP2.
|
// http2Server implements the ServerTransport interface with HTTP2.
|
||||||
type http2Server struct {
|
type http2Server struct {
|
||||||
|
@ -88,9 +94,10 @@ type http2Server struct {
|
||||||
// Flag to signify that number of ping strikes should be reset to 0.
|
// Flag to signify that number of ping strikes should be reset to 0.
|
||||||
// This is set whenever data or header frames are sent.
|
// This is set whenever data or header frames are sent.
|
||||||
// 1 means yes.
|
// 1 means yes.
|
||||||
resetPingStrikes uint32 // Accessed atomically.
|
resetPingStrikes uint32 // Accessed atomically.
|
||||||
initialWindowSize int32
|
initialWindowSize int32
|
||||||
bdpEst *bdpEstimator
|
bdpEst *bdpEstimator
|
||||||
|
maxSendHeaderListSize *uint32
|
||||||
|
|
||||||
mu sync.Mutex // guard the following
|
mu sync.Mutex // guard the following
|
||||||
|
|
||||||
|
@ -111,33 +118,19 @@ type http2Server struct {
|
||||||
|
|
||||||
// Fields below are for channelz metric collection.
|
// Fields below are for channelz metric collection.
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID int64 // channelz unique identification number
|
||||||
czmu sync.RWMutex
|
czData *channelzData
|
||||||
kpCount int64
|
|
||||||
// The number of streams that have started, including already finished ones.
|
|
||||||
streamsStarted int64
|
|
||||||
// The number of streams that have ended successfully by sending frame with
|
|
||||||
// EoS bit set.
|
|
||||||
streamsSucceeded int64
|
|
||||||
streamsFailed int64
|
|
||||||
lastStreamCreated time.Time
|
|
||||||
msgSent int64
|
|
||||||
msgRecv int64
|
|
||||||
lastMsgSent time.Time
|
|
||||||
lastMsgRecv time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||||
// returned if something goes wrong.
|
// returned if something goes wrong.
|
||||||
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
|
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
|
||||||
writeBufSize := defaultWriteBufSize
|
writeBufSize := config.WriteBufferSize
|
||||||
if config.WriteBufferSize > 0 {
|
readBufSize := config.ReadBufferSize
|
||||||
writeBufSize = config.WriteBufferSize
|
maxHeaderListSize := defaultServerMaxHeaderListSize
|
||||||
|
if config.MaxHeaderListSize != nil {
|
||||||
|
maxHeaderListSize = *config.MaxHeaderListSize
|
||||||
}
|
}
|
||||||
readBufSize := defaultReadBufSize
|
framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
|
||||||
if config.ReadBufferSize > 0 {
|
|
||||||
readBufSize = config.ReadBufferSize
|
|
||||||
}
|
|
||||||
framer := newFramer(conn, writeBufSize, readBufSize)
|
|
||||||
// Send initial settings as connection preface to client.
|
// Send initial settings as connection preface to client.
|
||||||
var isettings []http2.Setting
|
var isettings []http2.Setting
|
||||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||||
|
@ -167,6 +160,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||||
ID: http2.SettingInitialWindowSize,
|
ID: http2.SettingInitialWindowSize,
|
||||||
Val: uint32(iwz)})
|
Val: uint32(iwz)})
|
||||||
}
|
}
|
||||||
|
if config.MaxHeaderListSize != nil {
|
||||||
|
isettings = append(isettings, http2.Setting{
|
||||||
|
ID: http2.SettingMaxHeaderListSize,
|
||||||
|
Val: *config.MaxHeaderListSize,
|
||||||
|
})
|
||||||
|
}
|
||||||
if err := framer.fr.WriteSettings(isettings...); err != nil {
|
if err := framer.fr.WriteSettings(isettings...); err != nil {
|
||||||
return nil, connectionErrorf(false, err, "transport: %v", err)
|
return nil, connectionErrorf(false, err, "transport: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -220,6 +219,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||||
idle: time.Now(),
|
idle: time.Now(),
|
||||||
kep: kep,
|
kep: kep,
|
||||||
initialWindowSize: iwz,
|
initialWindowSize: iwz,
|
||||||
|
czData: new(channelzData),
|
||||||
}
|
}
|
||||||
t.controlBuf = newControlBuffer(t.ctxDone)
|
t.controlBuf = newControlBuffer(t.ctxDone)
|
||||||
if dynamicWindow {
|
if dynamicWindow {
|
||||||
|
@ -237,7 +237,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||||
t.stats.HandleConn(t.ctx, connBegin)
|
t.stats.HandleConn(t.ctx, connBegin)
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, "")
|
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||||
}
|
}
|
||||||
t.framer.writer.Flush()
|
t.framer.writer.Flush()
|
||||||
|
|
||||||
|
@ -273,7 +273,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||||
go func() {
|
go func() {
|
||||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
||||||
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
||||||
t.loopy.run()
|
if err := t.loopy.run(); err != nil {
|
||||||
|
errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||||
|
}
|
||||||
t.conn.Close()
|
t.conn.Close()
|
||||||
close(t.writerDone)
|
close(t.writerDone)
|
||||||
}()
|
}()
|
||||||
|
@ -282,21 +284,21 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// operateHeader takes action on the decoded headers.
|
// operateHeader takes action on the decoded headers.
|
||||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
|
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
|
||||||
streamID := frame.Header().StreamID
|
streamID := frame.Header().StreamID
|
||||||
var state decodeState
|
state := &decodeState{
|
||||||
for _, hf := range frame.Fields {
|
serverSide: true,
|
||||||
if err := state.processHeaderField(hf); err != nil {
|
}
|
||||||
if se, ok := err.(StreamError); ok {
|
if err := state.decodeHeader(frame); err != nil {
|
||||||
t.controlBuf.put(&cleanupStream{
|
if se, ok := status.FromError(err); ok {
|
||||||
streamID: streamID,
|
t.controlBuf.put(&cleanupStream{
|
||||||
rst: true,
|
streamID: streamID,
|
||||||
rstCode: statusCodeConvTab[se.Code],
|
rst: true,
|
||||||
onWrite: func() {},
|
rstCode: statusCodeConvTab[se.Code()],
|
||||||
})
|
onWrite: func() {},
|
||||||
}
|
})
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := newRecvBuffer()
|
buf := newRecvBuffer()
|
||||||
|
@ -305,16 +307,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
st: t,
|
st: t,
|
||||||
buf: buf,
|
buf: buf,
|
||||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||||
recvCompress: state.encoding,
|
recvCompress: state.data.encoding,
|
||||||
method: state.method,
|
method: state.data.method,
|
||||||
contentSubtype: state.contentSubtype,
|
contentSubtype: state.data.contentSubtype,
|
||||||
}
|
}
|
||||||
if frame.StreamEnded() {
|
if frame.StreamEnded() {
|
||||||
// s is just created by the caller. No lock needed.
|
// s is just created by the caller. No lock needed.
|
||||||
s.state = streamReadDone
|
s.state = streamReadDone
|
||||||
}
|
}
|
||||||
if state.timeoutSet {
|
if state.data.timeoutSet {
|
||||||
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
|
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
|
||||||
} else {
|
} else {
|
||||||
s.ctx, s.cancel = context.WithCancel(t.ctx)
|
s.ctx, s.cancel = context.WithCancel(t.ctx)
|
||||||
}
|
}
|
||||||
|
@ -327,19 +329,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
}
|
}
|
||||||
s.ctx = peer.NewContext(s.ctx, pr)
|
s.ctx = peer.NewContext(s.ctx, pr)
|
||||||
// Attach the received metadata to the context.
|
// Attach the received metadata to the context.
|
||||||
if len(state.mdata) > 0 {
|
if len(state.data.mdata) > 0 {
|
||||||
s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
|
s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
|
||||||
}
|
}
|
||||||
if state.statsTags != nil {
|
if state.data.statsTags != nil {
|
||||||
s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags)
|
s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
|
||||||
}
|
}
|
||||||
if state.statsTrace != nil {
|
if state.data.statsTrace != nil {
|
||||||
s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace)
|
s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
|
||||||
}
|
}
|
||||||
if t.inTapHandle != nil {
|
if t.inTapHandle != nil {
|
||||||
var err error
|
var err error
|
||||||
info := &tap.Info{
|
info := &tap.Info{
|
||||||
FullMethodName: state.method,
|
FullMethodName: state.data.method,
|
||||||
}
|
}
|
||||||
s.ctx, err = t.inTapHandle(s.ctx, info)
|
s.ctx, err = t.inTapHandle(s.ctx, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -350,13 +352,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
rstCode: http2.ErrCodeRefusedStream,
|
rstCode: http2.ErrCodeRefusedStream,
|
||||||
onWrite: func() {},
|
onWrite: func() {},
|
||||||
})
|
})
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
if t.state != reachable {
|
if t.state != reachable {
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
|
@ -366,7 +368,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
rstCode: http2.ErrCodeRefusedStream,
|
rstCode: http2.ErrCodeRefusedStream,
|
||||||
onWrite: func() {},
|
onWrite: func() {},
|
||||||
})
|
})
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
|
@ -381,10 +383,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
}
|
}
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.czmu.Lock()
|
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||||
t.streamsStarted++
|
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
|
||||||
t.lastStreamCreated = time.Now()
|
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
s.requestRead = func(n int) {
|
s.requestRead = func(n int) {
|
||||||
t.adjustWindow(s, uint32(n))
|
t.adjustWindow(s, uint32(n))
|
||||||
|
@ -413,8 +413,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
t.updateWindow(s, uint32(n))
|
t.updateWindow(s, uint32(n))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
// Register the stream with loopy.
|
||||||
|
t.controlBuf.put(®isterStream{
|
||||||
|
streamID: s.id,
|
||||||
|
wq: s.wq,
|
||||||
|
})
|
||||||
handle(s)
|
handle(s)
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleStreams receives incoming streams using the given handler. This is
|
// HandleStreams receives incoming streams using the given handler. This is
|
||||||
|
@ -432,7 +437,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||||
s := t.activeStreams[se.StreamID]
|
s := t.activeStreams[se.StreamID]
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
if s != nil {
|
if s != nil {
|
||||||
t.closeStream(s, true, se.Code, nil, false)
|
t.closeStream(s, true, se.Code, false)
|
||||||
} else {
|
} else {
|
||||||
t.controlBuf.put(&cleanupStream{
|
t.controlBuf.put(&cleanupStream{
|
||||||
streamID: se.StreamID,
|
streamID: se.StreamID,
|
||||||
|
@ -574,7 +579,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||||
}
|
}
|
||||||
if size > 0 {
|
if size > 0 {
|
||||||
if err := s.fc.onData(size); err != nil {
|
if err := s.fc.onData(size); err != nil {
|
||||||
t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false)
|
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if f.Header().Flags.Has(http2.FlagDataPadded) {
|
if f.Header().Flags.Has(http2.FlagDataPadded) {
|
||||||
|
@ -599,11 +604,18 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
|
func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||||
s, ok := t.getStream(f)
|
// If the stream is not deleted from the transport's active streams map, then do a regular close stream.
|
||||||
if !ok {
|
if s, ok := t.getStream(f); ok {
|
||||||
|
t.closeStream(s, false, 0, false)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t.closeStream(s, false, 0, nil, false)
|
// If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map.
|
||||||
|
t.controlBuf.put(&cleanupStream{
|
||||||
|
streamID: f.Header().StreamID,
|
||||||
|
rst: false,
|
||||||
|
rstCode: 0,
|
||||||
|
onWrite: func() {},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
||||||
|
@ -611,11 +623,25 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var ss []http2.Setting
|
var ss []http2.Setting
|
||||||
|
var updateFuncs []func()
|
||||||
f.ForeachSetting(func(s http2.Setting) error {
|
f.ForeachSetting(func(s http2.Setting) error {
|
||||||
ss = append(ss, s)
|
switch s.ID {
|
||||||
|
case http2.SettingMaxHeaderListSize:
|
||||||
|
updateFuncs = append(updateFuncs, func() {
|
||||||
|
t.maxSendHeaderListSize = new(uint32)
|
||||||
|
*t.maxSendHeaderListSize = s.Val
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
ss = append(ss, s)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
t.controlBuf.put(&incomingSettings{
|
t.controlBuf.executeAndPut(func(interface{}) bool {
|
||||||
|
for _, f := range updateFuncs {
|
||||||
|
f()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}, &incomingSettings{
|
||||||
ss: ss,
|
ss: ss,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -695,6 +721,21 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD)
|
||||||
return headerFields
|
return headerFields
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||||
|
if t.maxSendHeaderListSize == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
hdrFrame := it.(*headerFrame)
|
||||||
|
var sz int64
|
||||||
|
for _, f := range hdrFrame.hf {
|
||||||
|
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||||
|
errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// WriteHeader sends the header metedata md back to the client.
|
// WriteHeader sends the header metedata md back to the client.
|
||||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
if s.updateHeaderSent() || s.getState() == streamDone {
|
||||||
|
@ -708,12 +749,15 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
s.header = md
|
s.header = md
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.writeHeaderLocked(s)
|
if err := t.writeHeaderLocked(s); err != nil {
|
||||||
|
s.hdrMu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
s.hdrMu.Unlock()
|
s.hdrMu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) writeHeaderLocked(s *Stream) {
|
func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||||
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
|
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
|
||||||
// first and create a slice of that exact size.
|
// first and create a slice of that exact size.
|
||||||
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
||||||
|
@ -723,21 +767,28 @@ func (t *http2Server) writeHeaderLocked(s *Stream) {
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||||
}
|
}
|
||||||
headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
|
headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
|
||||||
t.controlBuf.put(&headerFrame{
|
success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
|
||||||
streamID: s.id,
|
streamID: s.id,
|
||||||
hf: headerFields,
|
hf: headerFields,
|
||||||
endStream: false,
|
endStream: false,
|
||||||
onWrite: func() {
|
onWrite: func() {
|
||||||
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
||||||
},
|
},
|
||||||
wq: s.wq,
|
|
||||||
})
|
})
|
||||||
|
if !success {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.closeStream(s, true, http2.ErrCodeInternal, false)
|
||||||
|
return ErrHeaderListSizeLimitViolation
|
||||||
|
}
|
||||||
if t.stats != nil {
|
if t.stats != nil {
|
||||||
// Note: WireLength is not set in outHeader.
|
// Note: WireLength is not set in outHeader.
|
||||||
// TODO(mmukhi): Revisit this later, if needed.
|
// TODO(mmukhi): Revisit this later, if needed.
|
||||||
outHeader := &stats.OutHeader{}
|
outHeader := &stats.OutHeader{}
|
||||||
t.stats.HandleRPC(s.Context(), outHeader)
|
t.stats.HandleRPC(s.Context(), outHeader)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteStatus sends stream status to the client and terminates the stream.
|
// WriteStatus sends stream status to the client and terminates the stream.
|
||||||
|
@ -754,7 +805,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||||
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
|
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
|
||||||
if !s.updateHeaderSent() { // No headers have been sent.
|
if !s.updateHeaderSent() { // No headers have been sent.
|
||||||
if len(s.header) > 0 { // Send a separate header frame.
|
if len(s.header) > 0 { // Send a separate header frame.
|
||||||
t.writeHeaderLocked(s)
|
if err := t.writeHeaderLocked(s); err != nil {
|
||||||
|
s.hdrMu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
} else { // Send a trailer only response.
|
} else { // Send a trailer only response.
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
|
||||||
|
@ -767,10 +821,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||||
stBytes, err := proto.Marshal(p)
|
stBytes, err := proto.Marshal(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: return error instead, when callers are able to handle it.
|
// TODO: return error instead, when callers are able to handle it.
|
||||||
panic(err)
|
grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
|
||||||
|
} else {
|
||||||
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
||||||
}
|
}
|
||||||
|
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attach the trailer metadata.
|
// Attach the trailer metadata.
|
||||||
|
@ -784,7 +838,17 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
s.hdrMu.Unlock()
|
s.hdrMu.Unlock()
|
||||||
t.closeStream(s, false, 0, trailingHeader, true)
|
success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
|
||||||
|
if !success {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.closeStream(s, true, http2.ErrCodeInternal, false)
|
||||||
|
return ErrHeaderListSizeLimitViolation
|
||||||
|
}
|
||||||
|
// Send a RST_STREAM after the trailers if the client has not already half-closed.
|
||||||
|
rst := s.getState() == streamActive
|
||||||
|
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
|
||||||
if t.stats != nil {
|
if t.stats != nil {
|
||||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||||
}
|
}
|
||||||
|
@ -796,8 +860,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||||
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||||
if !s.isHeaderSent() { // Headers haven't been written yet.
|
if !s.isHeaderSent() { // Headers haven't been written yet.
|
||||||
if err := t.WriteHeader(s, nil); err != nil {
|
if err := t.WriteHeader(s, nil); err != nil {
|
||||||
|
if _, ok := err.(ConnectionError); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
|
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
|
||||||
return streamErrorf(codes.Internal, "transport: %v", err)
|
return status.Errorf(codes.Internal, "transport: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Writing headers checks for this condition.
|
// Writing headers checks for this condition.
|
||||||
|
@ -911,9 +978,7 @@ func (t *http2Server) keepalive() {
|
||||||
}
|
}
|
||||||
pingSent = true
|
pingSent = true
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.czmu.Lock()
|
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||||
t.kpCount++
|
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
t.controlBuf.put(p)
|
t.controlBuf.put(p)
|
||||||
keepalive.Reset(t.kp.Timeout)
|
keepalive.Reset(t.kp.Timeout)
|
||||||
|
@ -953,47 +1018,65 @@ func (t *http2Server) Close() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeStream clears the footprint of a stream when the stream is not needed
|
// deleteStream deletes the stream s from transport's active streams.
|
||||||
// any more.
|
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) {
|
||||||
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
oldState = s.swapState(streamDone)
|
||||||
if s.swapState(streamDone) == streamDone {
|
if oldState == streamDone {
|
||||||
// If the stream was already done, return.
|
// If the stream was already done, return.
|
||||||
return
|
return oldState
|
||||||
}
|
}
|
||||||
|
|
||||||
// In case stream sending and receiving are invoked in separate
|
// In case stream sending and receiving are invoked in separate
|
||||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||||
// called to interrupt the potential blocking on other goroutines.
|
// called to interrupt the potential blocking on other goroutines.
|
||||||
s.cancel()
|
s.cancel()
|
||||||
cleanup := &cleanupStream{
|
|
||||||
|
t.mu.Lock()
|
||||||
|
if _, ok := t.activeStreams[s.id]; ok {
|
||||||
|
delete(t.activeStreams, s.id)
|
||||||
|
if len(t.activeStreams) == 0 {
|
||||||
|
t.idle = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.mu.Unlock()
|
||||||
|
|
||||||
|
if channelz.IsOn() {
|
||||||
|
if eosReceived {
|
||||||
|
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
|
||||||
|
} else {
|
||||||
|
atomic.AddInt64(&t.czData.streamsFailed, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return oldState
|
||||||
|
}
|
||||||
|
|
||||||
|
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
|
||||||
|
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
||||||
|
oldState := t.deleteStream(s, eosReceived)
|
||||||
|
// If the stream is already closed, then don't put trailing header to controlbuf.
|
||||||
|
if oldState == streamDone {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr.cleanup = &cleanupStream{
|
||||||
streamID: s.id,
|
streamID: s.id,
|
||||||
rst: rst,
|
rst: rst,
|
||||||
rstCode: rstCode,
|
rstCode: rstCode,
|
||||||
onWrite: func() {
|
onWrite: func() {},
|
||||||
t.mu.Lock()
|
|
||||||
if t.activeStreams != nil {
|
|
||||||
delete(t.activeStreams, s.id)
|
|
||||||
if len(t.activeStreams) == 0 {
|
|
||||||
t.idle = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.mu.Unlock()
|
|
||||||
if channelz.IsOn() {
|
|
||||||
t.czmu.Lock()
|
|
||||||
if eosReceived {
|
|
||||||
t.streamsSucceeded++
|
|
||||||
} else {
|
|
||||||
t.streamsFailed++
|
|
||||||
}
|
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if hdr != nil {
|
|
||||||
hdr.cleanup = cleanup
|
|
||||||
t.controlBuf.put(hdr)
|
|
||||||
} else {
|
|
||||||
t.controlBuf.put(cleanup)
|
|
||||||
}
|
}
|
||||||
|
t.controlBuf.put(hdr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeStream clears the footprint of a stream when the stream is not needed any more.
|
||||||
|
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
|
||||||
|
t.deleteStream(s, eosReceived)
|
||||||
|
t.controlBuf.put(&cleanupStream{
|
||||||
|
streamID: s.id,
|
||||||
|
rst: rst,
|
||||||
|
rstCode: rstCode,
|
||||||
|
onWrite: func() {},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) RemoteAddr() net.Addr {
|
func (t *http2Server) RemoteAddr() net.Addr {
|
||||||
|
@ -1072,45 +1155,41 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
|
func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||||
t.czmu.RLock()
|
|
||||||
s := channelz.SocketInternalMetric{
|
s := channelz.SocketInternalMetric{
|
||||||
StreamsStarted: t.streamsStarted,
|
StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
|
||||||
StreamsSucceeded: t.streamsSucceeded,
|
StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
|
||||||
StreamsFailed: t.streamsFailed,
|
StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
|
||||||
MessagesSent: t.msgSent,
|
MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
|
||||||
MessagesReceived: t.msgRecv,
|
MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
|
||||||
KeepAlivesSent: t.kpCount,
|
KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
|
||||||
LastRemoteStreamCreatedTimestamp: t.lastStreamCreated,
|
LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
|
||||||
LastMessageSentTimestamp: t.lastMsgSent,
|
LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
|
||||||
LastMessageReceivedTimestamp: t.lastMsgRecv,
|
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
|
||||||
LocalFlowControlWindow: int64(t.fc.getSize()),
|
LocalFlowControlWindow: int64(t.fc.getSize()),
|
||||||
//socket options
|
SocketOptions: channelz.GetSocketOption(t.conn),
|
||||||
LocalAddr: t.localAddr,
|
LocalAddr: t.localAddr,
|
||||||
RemoteAddr: t.remoteAddr,
|
RemoteAddr: t.remoteAddr,
|
||||||
// Security
|
|
||||||
// RemoteName :
|
// RemoteName :
|
||||||
}
|
}
|
||||||
t.czmu.RUnlock()
|
if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
|
||||||
|
s.Security = au.GetSecurityValue()
|
||||||
|
}
|
||||||
s.RemoteFlowControlWindow = t.getOutFlowWindow()
|
s.RemoteFlowControlWindow = t.getOutFlowWindow()
|
||||||
return &s
|
return &s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) IncrMsgSent() {
|
func (t *http2Server) IncrMsgSent() {
|
||||||
t.czmu.Lock()
|
atomic.AddInt64(&t.czData.msgSent, 1)
|
||||||
t.msgSent++
|
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
|
||||||
t.lastMsgSent = time.Now()
|
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) IncrMsgRecv() {
|
func (t *http2Server) IncrMsgRecv() {
|
||||||
t.czmu.Lock()
|
atomic.AddInt64(&t.czData.msgRecv, 1)
|
||||||
t.msgRecv++
|
atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
|
||||||
t.lastMsgRecv = time.Now()
|
|
||||||
t.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *http2Server) getOutFlowWindow() int64 {
|
func (t *http2Server) getOutFlowWindow() int64 {
|
||||||
resp := make(chan uint32)
|
resp := make(chan uint32, 1)
|
||||||
timer := time.NewTimer(time.Second)
|
timer := time.NewTimer(time.Second)
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
t.controlBuf.put(&outFlowControlSizeRequest{resp})
|
t.controlBuf.put(&outFlowControlSizeRequest{resp})
|
||||||
|
@ -1124,14 +1203,12 @@ func (t *http2Server) getOutFlowWindow() int64 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var rgen = rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
||||||
|
|
||||||
func getJitter(v time.Duration) time.Duration {
|
func getJitter(v time.Duration) time.Duration {
|
||||||
if v == infinity {
|
if v == infinity {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
// Generate a jitter between +/- 10% of the value.
|
// Generate a jitter between +/- 10% of the value.
|
||||||
r := int64(v / 10)
|
r := int64(v / 10)
|
||||||
j := rgen.Int63n(2*r) - r
|
j := grpcrand.Int63n(2*r) - r
|
||||||
return time.Duration(j)
|
return time.Duration(j)
|
||||||
}
|
}
|
|
@ -23,11 +23,14 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
|
@ -42,9 +45,6 @@ const (
|
||||||
http2MaxFrameLen = 16384 // 16KB frame
|
http2MaxFrameLen = 16384 // 16KB frame
|
||||||
// http://http2.github.io/http2-spec/#SettingValues
|
// http://http2.github.io/http2-spec/#SettingValues
|
||||||
http2InitHeaderTableSize = 4096
|
http2InitHeaderTableSize = 4096
|
||||||
// http2IOBufSize specifies the buffer size for sending frames.
|
|
||||||
defaultWriteBufSize = 32 * 1024
|
|
||||||
defaultReadBufSize = 32 * 1024
|
|
||||||
// baseContentType is the base content-type for gRPC. This is a valid
|
// baseContentType is the base content-type for gRPC. This is a valid
|
||||||
// content-type on it's own, but can also include a content-subtype such as
|
// content-type on it's own, but can also include a content-subtype such as
|
||||||
// "proto" as a suffix after "+" or ";". See
|
// "proto" as a suffix after "+" or ";". See
|
||||||
|
@ -78,7 +78,8 @@ var (
|
||||||
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
|
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
|
||||||
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
|
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
|
||||||
}
|
}
|
||||||
httpStatusConvTab = map[int]codes.Code{
|
// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
|
||||||
|
HTTPStatusConvTab = map[int]codes.Code{
|
||||||
// 400 Bad Request - INTERNAL.
|
// 400 Bad Request - INTERNAL.
|
||||||
http.StatusBadRequest: codes.Internal,
|
http.StatusBadRequest: codes.Internal,
|
||||||
// 401 Unauthorized - UNAUTHENTICATED.
|
// 401 Unauthorized - UNAUTHENTICATED.
|
||||||
|
@ -98,9 +99,7 @@ var (
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Records the states during HPACK decoding. Must be reset once the
|
type parsedHeaderData struct {
|
||||||
// decoding of the entire headers are finished.
|
|
||||||
type decodeState struct {
|
|
||||||
encoding string
|
encoding string
|
||||||
// statusGen caches the stream status received from the trailer the server
|
// statusGen caches the stream status received from the trailer the server
|
||||||
// sent. Client side only. Do not access directly. After all trailers are
|
// sent. Client side only. Do not access directly. After all trailers are
|
||||||
|
@ -120,6 +119,30 @@ type decodeState struct {
|
||||||
statsTags []byte
|
statsTags []byte
|
||||||
statsTrace []byte
|
statsTrace []byte
|
||||||
contentSubtype string
|
contentSubtype string
|
||||||
|
|
||||||
|
// isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
|
||||||
|
//
|
||||||
|
// We are in gRPC mode (peer speaking gRPC) if:
|
||||||
|
// * We are client side and have already received a HEADER frame that indicates gRPC peer.
|
||||||
|
// * The header contains valid a content-type, i.e. a string starts with "application/grpc"
|
||||||
|
// And we should handle error specific to gRPC.
|
||||||
|
//
|
||||||
|
// Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
|
||||||
|
// are in HTTP fallback mode, and should handle error specific to HTTP.
|
||||||
|
isGRPC bool
|
||||||
|
grpcErr error
|
||||||
|
httpErr error
|
||||||
|
contentTypeErr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeState configures decoding criteria and records the decoded data.
|
||||||
|
type decodeState struct {
|
||||||
|
// whether decoding on server side or not
|
||||||
|
serverSide bool
|
||||||
|
|
||||||
|
// Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
|
||||||
|
// frame once decodeHeader function has been invoked and returned.
|
||||||
|
data parsedHeaderData
|
||||||
}
|
}
|
||||||
|
|
||||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||||
|
@ -138,6 +161,9 @@ func isReservedHeader(hdr string) bool {
|
||||||
"grpc-status",
|
"grpc-status",
|
||||||
"grpc-timeout",
|
"grpc-timeout",
|
||||||
"grpc-status-details-bin",
|
"grpc-status-details-bin",
|
||||||
|
// Intentionally exclude grpc-previous-rpc-attempts and
|
||||||
|
// grpc-retry-pushback-ms, which are "reserved", but their API
|
||||||
|
// intentionally works via metadata.
|
||||||
"te":
|
"te":
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
|
@ -145,8 +171,8 @@ func isReservedHeader(hdr string) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// isWhitelistedHeader checks whether hdr should be propagated
|
// isWhitelistedHeader checks whether hdr should be propagated into metadata
|
||||||
// into metadata visible to users.
|
// visible to users, even though it is classified as "reserved", above.
|
||||||
func isWhitelistedHeader(hdr string) bool {
|
func isWhitelistedHeader(hdr string) bool {
|
||||||
switch hdr {
|
switch hdr {
|
||||||
case ":authority", "user-agent":
|
case ":authority", "user-agent":
|
||||||
|
@ -197,11 +223,11 @@ func contentType(contentSubtype string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *decodeState) status() *status.Status {
|
func (d *decodeState) status() *status.Status {
|
||||||
if d.statusGen == nil {
|
if d.data.statusGen == nil {
|
||||||
// No status-details were provided; generate status using code/msg.
|
// No status-details were provided; generate status using code/msg.
|
||||||
d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg)
|
d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
|
||||||
}
|
}
|
||||||
return d.statusGen
|
return d.data.statusGen
|
||||||
}
|
}
|
||||||
|
|
||||||
const binHdrSuffix = "-bin"
|
const binHdrSuffix = "-bin"
|
||||||
|
@ -233,111 +259,152 @@ func decodeMetadataHeader(k, v string) (string, error) {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error {
|
func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
|
||||||
for _, hf := range frame.Fields {
|
// frame.Truncated is set to true when framer detects that the current header
|
||||||
if err := d.processHeaderField(hf); err != nil {
|
// list size hits MaxHeaderListSize limit.
|
||||||
return err
|
if frame.Truncated {
|
||||||
}
|
return status.Error(codes.Internal, "peer header list size exceeded limit")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If grpc status exists, no need to check further.
|
for _, hf := range frame.Fields {
|
||||||
if d.rawStatusCode != nil || d.statusGen != nil {
|
d.processHeaderField(hf)
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.data.isGRPC {
|
||||||
|
if d.data.grpcErr != nil {
|
||||||
|
return d.data.grpcErr
|
||||||
|
}
|
||||||
|
if d.serverSide {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if d.data.rawStatusCode == nil && d.data.statusGen == nil {
|
||||||
|
// gRPC status doesn't exist.
|
||||||
|
// Set rawStatusCode to be unknown and return nil error.
|
||||||
|
// So that, if the stream has ended this Unknown status
|
||||||
|
// will be propagated to the user.
|
||||||
|
// Otherwise, it will be ignored. In which case, status from
|
||||||
|
// a later trailer, that has StreamEnded flag set, is propagated.
|
||||||
|
code := int(codes.Unknown)
|
||||||
|
d.data.rawStatusCode = &code
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If grpc status doesn't exist and http status doesn't exist,
|
// HTTP fallback mode
|
||||||
// then it's a malformed header.
|
if d.data.httpErr != nil {
|
||||||
if d.httpStatus == nil {
|
return d.data.httpErr
|
||||||
return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if *(d.httpStatus) != http.StatusOK {
|
var (
|
||||||
code, ok := httpStatusConvTab[*(d.httpStatus)]
|
code = codes.Internal // when header does not include HTTP status, return INTERNAL
|
||||||
|
ok bool
|
||||||
|
)
|
||||||
|
|
||||||
|
if d.data.httpStatus != nil {
|
||||||
|
code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
|
||||||
if !ok {
|
if !ok {
|
||||||
code = codes.Unknown
|
code = codes.Unknown
|
||||||
}
|
}
|
||||||
return streamErrorf(code, http.StatusText(*(d.httpStatus)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// gRPC status doesn't exist and http status is OK.
|
return status.Error(code, d.constructHTTPErrMsg())
|
||||||
// Set rawStatusCode to be unknown and return nil error.
|
}
|
||||||
// So that, if the stream has ended this Unknown status
|
|
||||||
// will be propagated to the user.
|
|
||||||
// Otherwise, it will be ignored. In which case, status from
|
|
||||||
// a later trailer, that has StreamEnded flag set, is propagated.
|
|
||||||
code := int(codes.Unknown)
|
|
||||||
d.rawStatusCode = &code
|
|
||||||
return nil
|
|
||||||
|
|
||||||
|
// constructErrMsg constructs error message to be returned in HTTP fallback mode.
|
||||||
|
// Format: HTTP status code and its corresponding message + content-type error message.
|
||||||
|
func (d *decodeState) constructHTTPErrMsg() string {
|
||||||
|
var errMsgs []string
|
||||||
|
|
||||||
|
if d.data.httpStatus == nil {
|
||||||
|
errMsgs = append(errMsgs, "malformed header: missing HTTP status")
|
||||||
|
} else {
|
||||||
|
errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.data.contentTypeErr == "" {
|
||||||
|
errMsgs = append(errMsgs, "transport: missing content-type field")
|
||||||
|
} else {
|
||||||
|
errMsgs = append(errMsgs, d.data.contentTypeErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(errMsgs, "; ")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *decodeState) addMetadata(k, v string) {
|
func (d *decodeState) addMetadata(k, v string) {
|
||||||
if d.mdata == nil {
|
if d.data.mdata == nil {
|
||||||
d.mdata = make(map[string][]string)
|
d.data.mdata = make(map[string][]string)
|
||||||
}
|
}
|
||||||
d.mdata[k] = append(d.mdata[k], v)
|
d.data.mdata[k] = append(d.data.mdata[k], v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
|
||||||
switch f.Name {
|
switch f.Name {
|
||||||
case "content-type":
|
case "content-type":
|
||||||
contentSubtype, validContentType := contentSubtype(f.Value)
|
contentSubtype, validContentType := contentSubtype(f.Value)
|
||||||
if !validContentType {
|
if !validContentType {
|
||||||
return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
|
d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
d.contentSubtype = contentSubtype
|
d.data.contentSubtype = contentSubtype
|
||||||
// TODO: do we want to propagate the whole content-type in the metadata,
|
// TODO: do we want to propagate the whole content-type in the metadata,
|
||||||
// or come up with a way to just propagate the content-subtype if it was set?
|
// or come up with a way to just propagate the content-subtype if it was set?
|
||||||
// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
|
// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
|
||||||
// in the metadata?
|
// in the metadata?
|
||||||
d.addMetadata(f.Name, f.Value)
|
d.addMetadata(f.Name, f.Value)
|
||||||
|
d.data.isGRPC = true
|
||||||
case "grpc-encoding":
|
case "grpc-encoding":
|
||||||
d.encoding = f.Value
|
d.data.encoding = f.Value
|
||||||
case "grpc-status":
|
case "grpc-status":
|
||||||
code, err := strconv.Atoi(f.Value)
|
code, err := strconv.Atoi(f.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
|
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
d.rawStatusCode = &code
|
d.data.rawStatusCode = &code
|
||||||
case "grpc-message":
|
case "grpc-message":
|
||||||
d.rawStatusMsg = decodeGrpcMessage(f.Value)
|
d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
|
||||||
case "grpc-status-details-bin":
|
case "grpc-status-details-bin":
|
||||||
v, err := decodeBinHeader(f.Value)
|
v, err := decodeBinHeader(f.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
s := &spb.Status{}
|
s := &spb.Status{}
|
||||||
if err := proto.Unmarshal(v, s); err != nil {
|
if err := proto.Unmarshal(v, s); err != nil {
|
||||||
return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
d.statusGen = status.FromProto(s)
|
d.data.statusGen = status.FromProto(s)
|
||||||
case "grpc-timeout":
|
case "grpc-timeout":
|
||||||
d.timeoutSet = true
|
d.data.timeoutSet = true
|
||||||
var err error
|
var err error
|
||||||
if d.timeout, err = decodeTimeout(f.Value); err != nil {
|
if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
|
||||||
return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
|
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
|
||||||
}
|
}
|
||||||
case ":path":
|
case ":path":
|
||||||
d.method = f.Value
|
d.data.method = f.Value
|
||||||
case ":status":
|
case ":status":
|
||||||
code, err := strconv.Atoi(f.Value)
|
code, err := strconv.Atoi(f.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err)
|
d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
d.httpStatus = &code
|
d.data.httpStatus = &code
|
||||||
case "grpc-tags-bin":
|
case "grpc-tags-bin":
|
||||||
v, err := decodeBinHeader(f.Value)
|
v, err := decodeBinHeader(f.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
|
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
d.statsTags = v
|
d.data.statsTags = v
|
||||||
d.addMetadata(f.Name, string(v))
|
d.addMetadata(f.Name, string(v))
|
||||||
case "grpc-trace-bin":
|
case "grpc-trace-bin":
|
||||||
v, err := decodeBinHeader(f.Value)
|
v, err := decodeBinHeader(f.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
|
d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
d.statsTrace = v
|
d.data.statsTrace = v
|
||||||
d.addMetadata(f.Name, string(v))
|
d.addMetadata(f.Name, string(v))
|
||||||
default:
|
default:
|
||||||
if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
|
if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
|
||||||
|
@ -346,11 +413,10 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
||||||
v, err := decodeMetadataHeader(f.Name, f.Value)
|
v, err := decodeMetadataHeader(f.Name, f.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
d.addMetadata(f.Name, v)
|
d.addMetadata(f.Name, v)
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type timeoutUnit uint8
|
type timeoutUnit uint8
|
||||||
|
@ -423,6 +489,10 @@ func decodeTimeout(s string) (time.Duration, error) {
|
||||||
if size < 2 {
|
if size < 2 {
|
||||||
return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
|
return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
|
||||||
}
|
}
|
||||||
|
if size > 9 {
|
||||||
|
// Spec allows for 8 digits plus the unit.
|
||||||
|
return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
|
||||||
|
}
|
||||||
unit := timeoutUnit(s[size-1])
|
unit := timeoutUnit(s[size-1])
|
||||||
d, ok := timeoutUnitToDuration(unit)
|
d, ok := timeoutUnitToDuration(unit)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -432,21 +502,27 @@ func decodeTimeout(s string) (time.Duration, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
const maxHours = math.MaxInt64 / int64(time.Hour)
|
||||||
|
if d == time.Hour && t > maxHours {
|
||||||
|
// This timeout would overflow math.MaxInt64; clamp it.
|
||||||
|
return time.Duration(math.MaxInt64), nil
|
||||||
|
}
|
||||||
return d * time.Duration(t), nil
|
return d * time.Duration(t), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
spaceByte = ' '
|
spaceByte = ' '
|
||||||
tildaByte = '~'
|
tildeByte = '~'
|
||||||
percentByte = '%'
|
percentByte = '%'
|
||||||
)
|
)
|
||||||
|
|
||||||
// encodeGrpcMessage is used to encode status code in header field
|
// encodeGrpcMessage is used to encode status code in header field
|
||||||
// "grpc-message".
|
// "grpc-message". It does percent encoding and also replaces invalid utf-8
|
||||||
// It checks to see if each individual byte in msg is an
|
// characters with Unicode replacement character.
|
||||||
// allowable byte, and then either percent encoding or passing it through.
|
//
|
||||||
// When percent encoding, the byte is converted into hexadecimal notation
|
// It checks to see if each individual byte in msg is an allowable byte, and
|
||||||
// with a '%' prepended.
|
// then either percent encoding or passing it through. When percent encoding,
|
||||||
|
// the byte is converted into hexadecimal notation with a '%' prepended.
|
||||||
func encodeGrpcMessage(msg string) string {
|
func encodeGrpcMessage(msg string) string {
|
||||||
if msg == "" {
|
if msg == "" {
|
||||||
return ""
|
return ""
|
||||||
|
@ -454,7 +530,7 @@ func encodeGrpcMessage(msg string) string {
|
||||||
lenMsg := len(msg)
|
lenMsg := len(msg)
|
||||||
for i := 0; i < lenMsg; i++ {
|
for i := 0; i < lenMsg; i++ {
|
||||||
c := msg[i]
|
c := msg[i]
|
||||||
if !(c >= spaceByte && c < tildaByte && c != percentByte) {
|
if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
|
||||||
return encodeGrpcMessageUnchecked(msg)
|
return encodeGrpcMessageUnchecked(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -463,14 +539,26 @@ func encodeGrpcMessage(msg string) string {
|
||||||
|
|
||||||
func encodeGrpcMessageUnchecked(msg string) string {
|
func encodeGrpcMessageUnchecked(msg string) string {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
lenMsg := len(msg)
|
for len(msg) > 0 {
|
||||||
for i := 0; i < lenMsg; i++ {
|
r, size := utf8.DecodeRuneInString(msg)
|
||||||
c := msg[i]
|
for _, b := range []byte(string(r)) {
|
||||||
if c >= spaceByte && c < tildaByte && c != percentByte {
|
if size > 1 {
|
||||||
buf.WriteByte(c)
|
// If size > 1, r is not ascii. Always do percent encoding.
|
||||||
} else {
|
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||||
buf.WriteString(fmt.Sprintf("%%%02X", c))
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// The for loop is necessary even if size == 1. r could be
|
||||||
|
// utf8.RuneError.
|
||||||
|
//
|
||||||
|
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
|
||||||
|
if b >= spaceByte && b <= tildeByte && b != percentByte {
|
||||||
|
buf.WriteByte(b)
|
||||||
|
} else {
|
||||||
|
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
msg = msg[size:]
|
||||||
}
|
}
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
@ -531,6 +619,9 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||||
if w.err != nil {
|
if w.err != nil {
|
||||||
return 0, w.err
|
return 0, w.err
|
||||||
}
|
}
|
||||||
|
if w.batchSize == 0 { // Buffer has been disabled.
|
||||||
|
return w.conn.Write(b)
|
||||||
|
}
|
||||||
for len(b) > 0 {
|
for len(b) > 0 {
|
||||||
nn := copy(w.buf[w.offset:], b)
|
nn := copy(w.buf[w.offset:], b)
|
||||||
b = b[nn:]
|
b = b[nn:]
|
||||||
|
@ -563,8 +654,14 @@ type framer struct {
|
||||||
fr *http2.Framer
|
fr *http2.Framer
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
|
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
|
||||||
r := bufio.NewReaderSize(conn, readBufferSize)
|
if writeBufferSize < 0 {
|
||||||
|
writeBufferSize = 0
|
||||||
|
}
|
||||||
|
var r io.Reader = conn
|
||||||
|
if readBufferSize > 0 {
|
||||||
|
r = bufio.NewReaderSize(r, readBufferSize)
|
||||||
|
}
|
||||||
w := newBufWriter(conn, writeBufferSize)
|
w := newBufWriter(conn, writeBufferSize)
|
||||||
f := &framer{
|
f := &framer{
|
||||||
writer: w,
|
writer: w,
|
||||||
|
@ -573,6 +670,7 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
|
||||||
// Opt-in to Frame reuse API on framer to reduce garbage.
|
// Opt-in to Frame reuse API on framer to reduce garbage.
|
||||||
// Frames aren't safe to read from after a subsequent call to ReadFrame.
|
// Frames aren't safe to read from after a subsequent call to ReadFrame.
|
||||||
f.fr.SetReuseFrames()
|
f.fr.SetReuseFrames()
|
||||||
|
f.fr.MaxHeaderListSize = maxHeaderListSize
|
||||||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||||
return f
|
return f
|
||||||
}
|
}
|
|
@ -42,9 +42,3 @@ func errorf(format string, args ...interface{}) {
|
||||||
grpclog.Errorf(format, args...)
|
grpclog.Errorf(format, args...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func fatalf(format string, args ...interface{}) {
|
|
||||||
if grpclog.V(logLevel) {
|
|
||||||
grpclog.Fatalf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -19,9 +19,10 @@
|
||||||
// Package transport defines and implements message oriented communication
|
// Package transport defines and implements message oriented communication
|
||||||
// channel to complete various transactions (e.g., an RPC). It is meant for
|
// channel to complete various transactions (e.g., an RPC). It is meant for
|
||||||
// grpc-internal usage and is not intended to be imported directly by users.
|
// grpc-internal usage and is not intended to be imported directly by users.
|
||||||
package transport // externally used as import "google.golang.org/grpc/transport"
|
package transport
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -29,7 +30,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
|
@ -110,15 +110,15 @@ func (b *recvBuffer) get() <-chan recvMsg {
|
||||||
return b.c
|
return b.c
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
|
||||||
// recvBufferReader implements io.Reader interface to read the data from
|
// recvBufferReader implements io.Reader interface to read the data from
|
||||||
// recvBuffer.
|
// recvBuffer.
|
||||||
type recvBufferReader struct {
|
type recvBufferReader struct {
|
||||||
ctx context.Context
|
closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
|
||||||
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
|
ctx context.Context
|
||||||
recv *recvBuffer
|
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
|
||||||
last []byte // Stores the remaining data in the previous calls.
|
recv *recvBuffer
|
||||||
err error
|
last []byte // Stores the remaining data in the previous calls.
|
||||||
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads the next len(p) bytes from last. If last is drained, it tries to
|
// Read reads the next len(p) bytes from last. If last is drained, it tries to
|
||||||
|
@ -128,31 +128,53 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return 0, r.err
|
return 0, r.err
|
||||||
}
|
}
|
||||||
n, r.err = r.read(p)
|
|
||||||
return n, r.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
|
||||||
if r.last != nil && len(r.last) > 0 {
|
if r.last != nil && len(r.last) > 0 {
|
||||||
// Read remaining data left in last call.
|
// Read remaining data left in last call.
|
||||||
copied := copy(p, r.last)
|
copied := copy(p, r.last)
|
||||||
r.last = r.last[copied:]
|
r.last = r.last[copied:]
|
||||||
return copied, nil
|
return copied, nil
|
||||||
}
|
}
|
||||||
|
if r.closeStream != nil {
|
||||||
|
n, r.err = r.readClient(p)
|
||||||
|
} else {
|
||||||
|
n, r.err = r.read(p)
|
||||||
|
}
|
||||||
|
return n, r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recvBufferReader) read(p []byte) (n int, err error) {
|
||||||
select {
|
select {
|
||||||
case <-r.ctxDone:
|
case <-r.ctxDone:
|
||||||
return 0, ContextErr(r.ctx.Err())
|
return 0, ContextErr(r.ctx.Err())
|
||||||
case m := <-r.recv.get():
|
case m := <-r.recv.get():
|
||||||
r.recv.load()
|
return r.readAdditional(m, p)
|
||||||
if m.err != nil {
|
|
||||||
return 0, m.err
|
|
||||||
}
|
|
||||||
copied := copy(p, m.data)
|
|
||||||
r.last = m.data[copied:]
|
|
||||||
return copied, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
|
||||||
|
// If the context is canceled, then closes the stream with nil metadata.
|
||||||
|
// closeStream writes its error parameter to r.recv as a recvMsg.
|
||||||
|
// r.readAdditional acts on that message and returns the necessary error.
|
||||||
|
select {
|
||||||
|
case <-r.ctxDone:
|
||||||
|
r.closeStream(ContextErr(r.ctx.Err()))
|
||||||
|
m := <-r.recv.get()
|
||||||
|
return r.readAdditional(m, p)
|
||||||
|
case m := <-r.recv.get():
|
||||||
|
return r.readAdditional(m, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
|
||||||
|
r.recv.load()
|
||||||
|
if m.err != nil {
|
||||||
|
return 0, m.err
|
||||||
|
}
|
||||||
|
copied := copy(p, m.data)
|
||||||
|
r.last = m.data[copied:]
|
||||||
|
return copied, nil
|
||||||
|
}
|
||||||
|
|
||||||
type streamState uint32
|
type streamState uint32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -176,7 +198,6 @@ type Stream struct {
|
||||||
buf *recvBuffer
|
buf *recvBuffer
|
||||||
trReader io.Reader
|
trReader io.Reader
|
||||||
fc *inFlow
|
fc *inFlow
|
||||||
recvQuota uint32
|
|
||||||
wq *writeQuota
|
wq *writeQuota
|
||||||
|
|
||||||
// Callback to state application's intentions to read data. This
|
// Callback to state application's intentions to read data. This
|
||||||
|
@ -187,10 +208,16 @@ type Stream struct {
|
||||||
headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||||
|
|
||||||
// hdrMu protects header and trailer metadata on the server-side.
|
// hdrMu protects header and trailer metadata on the server-side.
|
||||||
hdrMu sync.Mutex
|
hdrMu sync.Mutex
|
||||||
header metadata.MD // the received header metadata.
|
// On client side, header keeps the received header metadata.
|
||||||
|
//
|
||||||
|
// On server side, header keeps the header set by SetHeader(). The complete
|
||||||
|
// header will merged into this after t.WriteHeader() is called.
|
||||||
|
header metadata.MD
|
||||||
trailer metadata.MD // the key-value map of trailer metadata.
|
trailer metadata.MD // the key-value map of trailer metadata.
|
||||||
|
|
||||||
|
noHeaders bool // set if the client never received headers (set only after the stream is done).
|
||||||
|
|
||||||
// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
|
// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
|
||||||
headerSent uint32
|
headerSent uint32
|
||||||
|
|
||||||
|
@ -259,16 +286,25 @@ func (s *Stream) SetSendCompress(str string) {
|
||||||
s.sendCompress = str
|
s.sendCompress = str
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done returns a chanel which is closed when it receives the final status
|
// Done returns a channel which is closed when it receives the final status
|
||||||
// from the server.
|
// from the server.
|
||||||
func (s *Stream) Done() <-chan struct{} {
|
func (s *Stream) Done() <-chan struct{} {
|
||||||
return s.done
|
return s.done
|
||||||
}
|
}
|
||||||
|
|
||||||
// Header acquires the key-value pairs of header metadata once it
|
// Header returns the header metadata of the stream.
|
||||||
// is available. It blocks until i) the metadata is ready or ii) there is no
|
//
|
||||||
// header metadata or iii) the stream is canceled/expired.
|
// On client side, it acquires the key-value pairs of header metadata once it is
|
||||||
|
// available. It blocks until i) the metadata is ready or ii) there is no header
|
||||||
|
// metadata or iii) the stream is canceled/expired.
|
||||||
|
//
|
||||||
|
// On server side, it returns the out header after t.WriteHeader is called.
|
||||||
func (s *Stream) Header() (metadata.MD, error) {
|
func (s *Stream) Header() (metadata.MD, error) {
|
||||||
|
if s.headerChan == nil && s.header != nil {
|
||||||
|
// On server side, return the header in stream. It will be the out
|
||||||
|
// header after t.WriteHeader is called.
|
||||||
|
return s.header.Copy(), nil
|
||||||
|
}
|
||||||
err := s.waitOnHeader()
|
err := s.waitOnHeader()
|
||||||
// Even if the stream is closed, header is returned if available.
|
// Even if the stream is closed, header is returned if available.
|
||||||
select {
|
select {
|
||||||
|
@ -282,6 +318,18 @@ func (s *Stream) Header() (metadata.MD, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TrailersOnly blocks until a header or trailers-only frame is received and
|
||||||
|
// then returns true if the stream was trailers-only. If the stream ends
|
||||||
|
// before headers are received, returns true, nil. If a context error happens
|
||||||
|
// first, returns it as a status error. Client-side only.
|
||||||
|
func (s *Stream) TrailersOnly() (bool, error) {
|
||||||
|
err := s.waitOnHeader()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return s.noHeaders, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||||
// after the entire stream is done, it could return an empty MD. Client
|
// after the entire stream is done, it could return an empty MD. Client
|
||||||
// side only.
|
// side only.
|
||||||
|
@ -292,12 +340,6 @@ func (s *Stream) Trailer() metadata.MD {
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerTransport returns the underlying ServerTransport for the stream.
|
|
||||||
// The client side stream always returns nil.
|
|
||||||
func (s *Stream) ServerTransport() ServerTransport {
|
|
||||||
return s.st
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentSubtype returns the content-subtype for a request. For example, a
|
// ContentSubtype returns the content-subtype for a request. For example, a
|
||||||
// content-subtype of "proto" will result in a content-type of
|
// content-subtype of "proto" will result in a content-type of
|
||||||
// "application/grpc+proto". This will always be lowercase. See
|
// "application/grpc+proto". This will always be lowercase. See
|
||||||
|
@ -319,7 +361,7 @@ func (s *Stream) Method() string {
|
||||||
|
|
||||||
// Status returns the status received from the server.
|
// Status returns the status received from the server.
|
||||||
// Status can be read safely only after the stream has ended,
|
// Status can be read safely only after the stream has ended,
|
||||||
// that is, read or write has returned io.EOF.
|
// that is, after Done() is closed.
|
||||||
func (s *Stream) Status() *status.Status {
|
func (s *Stream) Status() *status.Status {
|
||||||
return s.status
|
return s.status
|
||||||
}
|
}
|
||||||
|
@ -344,8 +386,7 @@ func (s *Stream) SetHeader(md metadata.MD) error {
|
||||||
// combined with any metadata set by previous calls to SetHeader and
|
// combined with any metadata set by previous calls to SetHeader and
|
||||||
// then written to the transport stream.
|
// then written to the transport stream.
|
||||||
func (s *Stream) SendHeader(md metadata.MD) error {
|
func (s *Stream) SendHeader(md metadata.MD) error {
|
||||||
t := s.ServerTransport()
|
return s.st.WriteHeader(s, md)
|
||||||
return t.WriteHeader(s, md)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||||
|
@ -439,6 +480,7 @@ type ServerConfig struct {
|
||||||
WriteBufferSize int
|
WriteBufferSize int
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
ChannelzParentID int64
|
ChannelzParentID int64
|
||||||
|
MaxHeaderListSize *uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||||
|
@ -451,17 +493,18 @@ func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (S
|
||||||
type ConnectOptions struct {
|
type ConnectOptions struct {
|
||||||
// UserAgent is the application user agent.
|
// UserAgent is the application user agent.
|
||||||
UserAgent string
|
UserAgent string
|
||||||
// Authority is the :authority pseudo-header to use. This field has no effect if
|
|
||||||
// TransportCredentials is set.
|
|
||||||
Authority string
|
|
||||||
// Dialer specifies how to dial a network address.
|
// Dialer specifies how to dial a network address.
|
||||||
Dialer func(context.Context, string) (net.Conn, error)
|
Dialer func(context.Context, string) (net.Conn, error)
|
||||||
// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
|
// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
|
||||||
FailOnNonTempDialError bool
|
FailOnNonTempDialError bool
|
||||||
// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
|
// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
|
||||||
PerRPCCredentials []credentials.PerRPCCredentials
|
PerRPCCredentials []credentials.PerRPCCredentials
|
||||||
// TransportCredentials stores the Authenticator required to setup a client connection.
|
// TransportCredentials stores the Authenticator required to setup a client
|
||||||
|
// connection. Only one of TransportCredentials and CredsBundle is non-nil.
|
||||||
TransportCredentials credentials.TransportCredentials
|
TransportCredentials credentials.TransportCredentials
|
||||||
|
// CredsBundle is the credentials bundle to be used. Only one of
|
||||||
|
// TransportCredentials and CredsBundle is non-nil.
|
||||||
|
CredsBundle credentials.Bundle
|
||||||
// KeepaliveParams stores the keepalive parameters.
|
// KeepaliveParams stores the keepalive parameters.
|
||||||
KeepaliveParams keepalive.ClientParameters
|
KeepaliveParams keepalive.ClientParameters
|
||||||
// StatsHandler stores the handler for stats.
|
// StatsHandler stores the handler for stats.
|
||||||
|
@ -476,6 +519,8 @@ type ConnectOptions struct {
|
||||||
ReadBufferSize int
|
ReadBufferSize int
|
||||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||||
ChannelzParentID int64
|
ChannelzParentID int64
|
||||||
|
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||||
|
MaxHeaderListSize *uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// TargetInfo contains the information of the target such as network address and metadata.
|
// TargetInfo contains the information of the target such as network address and metadata.
|
||||||
|
@ -487,8 +532,8 @@ type TargetInfo struct {
|
||||||
|
|
||||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||||
// and returns it to the caller.
|
// and returns it to the caller.
|
||||||
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
|
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||||
return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
|
return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options provides additional hints and information for message
|
// Options provides additional hints and information for message
|
||||||
|
@ -497,11 +542,6 @@ type Options struct {
|
||||||
// Last indicates whether this write is the last piece for
|
// Last indicates whether this write is the last piece for
|
||||||
// this stream.
|
// this stream.
|
||||||
Last bool
|
Last bool
|
||||||
|
|
||||||
// Delay is a hint to the transport implementation for whether
|
|
||||||
// the data could be buffered for a batching write. The
|
|
||||||
// transport implementation may ignore the hint.
|
|
||||||
Delay bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallHdr carries the information of a particular RPC.
|
// CallHdr carries the information of a particular RPC.
|
||||||
|
@ -519,14 +559,6 @@ type CallHdr struct {
|
||||||
// Creds specifies credentials.PerRPCCredentials for a call.
|
// Creds specifies credentials.PerRPCCredentials for a call.
|
||||||
Creds credentials.PerRPCCredentials
|
Creds credentials.PerRPCCredentials
|
||||||
|
|
||||||
// Flush indicates whether a new stream command should be sent
|
|
||||||
// to the peer without waiting for the first data. This is
|
|
||||||
// only a hint.
|
|
||||||
// If it's true, the transport may modify the flush decision
|
|
||||||
// for performance purposes.
|
|
||||||
// If it's false, new stream will never be flushed.
|
|
||||||
Flush bool
|
|
||||||
|
|
||||||
// ContentSubtype specifies the content-subtype for a request. For example, a
|
// ContentSubtype specifies the content-subtype for a request. For example, a
|
||||||
// content-subtype of "proto" will result in a content-type of
|
// content-subtype of "proto" will result in a content-type of
|
||||||
// "application/grpc+proto". The value of ContentSubtype must be all
|
// "application/grpc+proto". The value of ContentSubtype must be all
|
||||||
|
@ -534,6 +566,8 @@ type CallHdr struct {
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||||
// for more details.
|
// for more details.
|
||||||
ContentSubtype string
|
ContentSubtype string
|
||||||
|
|
||||||
|
PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientTransport is the common interface for all gRPC client-side transport
|
// ClientTransport is the common interface for all gRPC client-side transport
|
||||||
|
@ -576,6 +610,9 @@ type ClientTransport interface {
|
||||||
// GetGoAwayReason returns the reason why GoAway frame was received.
|
// GetGoAwayReason returns the reason why GoAway frame was received.
|
||||||
GetGoAwayReason() GoAwayReason
|
GetGoAwayReason() GoAwayReason
|
||||||
|
|
||||||
|
// RemoteAddr returns the remote network address.
|
||||||
|
RemoteAddr() net.Addr
|
||||||
|
|
||||||
// IncrMsgSent increments the number of message sent through this transport.
|
// IncrMsgSent increments the number of message sent through this transport.
|
||||||
IncrMsgSent()
|
IncrMsgSent()
|
||||||
|
|
||||||
|
@ -622,14 +659,6 @@ type ServerTransport interface {
|
||||||
IncrMsgRecv()
|
IncrMsgRecv()
|
||||||
}
|
}
|
||||||
|
|
||||||
// streamErrorf creates an StreamError with the specified error code and description.
|
|
||||||
func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
|
|
||||||
return StreamError{
|
|
||||||
Code: c,
|
|
||||||
Desc: fmt.Sprintf(format, a...),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// connectionErrorf creates an ConnectionError with the specified error description.
|
// connectionErrorf creates an ConnectionError with the specified error description.
|
||||||
func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
|
func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
|
||||||
return ConnectionError{
|
return ConnectionError{
|
||||||
|
@ -672,7 +701,7 @@ var (
|
||||||
// errStreamDrain indicates that the stream is rejected because the
|
// errStreamDrain indicates that the stream is rejected because the
|
||||||
// connection is draining. This could be caused by goaway or balancer
|
// connection is draining. This could be caused by goaway or balancer
|
||||||
// removing the address.
|
// removing the address.
|
||||||
errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining")
|
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
|
||||||
// errStreamDone is returned from write at the client side to indiacte application
|
// errStreamDone is returned from write at the client side to indiacte application
|
||||||
// layer of an error.
|
// layer of an error.
|
||||||
errStreamDone = errors.New("the stream is done")
|
errStreamDone = errors.New("the stream is done")
|
||||||
|
@ -681,18 +710,6 @@ var (
|
||||||
statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
|
statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: See if we can replace StreamError with status package errors.
|
|
||||||
|
|
||||||
// StreamError is an error that only affects one stream within a connection.
|
|
||||||
type StreamError struct {
|
|
||||||
Code codes.Code
|
|
||||||
Desc string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e StreamError) Error() string {
|
|
||||||
return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GoAwayReason contains the reason for the GoAway frame received.
|
// GoAwayReason contains the reason for the GoAway frame received.
|
||||||
type GoAwayReason uint8
|
type GoAwayReason uint8
|
||||||
|
|
||||||
|
@ -706,3 +723,38 @@ const (
|
||||||
// "too_many_pings".
|
// "too_many_pings".
|
||||||
GoAwayTooManyPings GoAwayReason = 2
|
GoAwayTooManyPings GoAwayReason = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// channelzData is used to store channelz related data for http2Client and http2Server.
|
||||||
|
// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
|
||||||
|
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
|
||||||
|
// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
|
||||||
|
type channelzData struct {
|
||||||
|
kpCount int64
|
||||||
|
// The number of streams that have started, including already finished ones.
|
||||||
|
streamsStarted int64
|
||||||
|
// Client side: The number of streams that have ended successfully by receiving
|
||||||
|
// EoS bit set frame from server.
|
||||||
|
// Server side: The number of streams that have ended successfully by sending
|
||||||
|
// frame with EoS bit set.
|
||||||
|
streamsSucceeded int64
|
||||||
|
streamsFailed int64
|
||||||
|
// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
|
||||||
|
// instead of time.Time since it's more costly to atomically update time.Time variable than int64
|
||||||
|
// variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
|
||||||
|
lastStreamCreatedTime int64
|
||||||
|
msgSent int64
|
||||||
|
msgRecv int64
|
||||||
|
lastMsgSentTime int64
|
||||||
|
lastMsgRecvTime int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextErr converts the error from context package into a status error.
|
||||||
|
func ContextErr(err error) error {
|
||||||
|
switch err {
|
||||||
|
case context.DeadlineExceeded:
|
||||||
|
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||||
|
case context.Canceled:
|
||||||
|
return status.Error(codes.Canceled, err.Error())
|
||||||
|
}
|
||||||
|
return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||||
|
}
|
|
@ -16,7 +16,8 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package keepalive defines configurable parameters for point-to-point healthcheck.
|
// Package keepalive defines configurable parameters for point-to-point
|
||||||
|
// healthcheck.
|
||||||
package keepalive
|
package keepalive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -24,42 +25,61 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClientParameters is used to set keepalive parameters on the client-side.
|
// ClientParameters is used to set keepalive parameters on the client-side.
|
||||||
// These configure how the client will actively probe to notice when a connection is broken
|
// These configure how the client will actively probe to notice when a
|
||||||
// and send pings so intermediaries will be aware of the liveness of the connection.
|
// connection is broken and send pings so intermediaries will be aware of the
|
||||||
// Make sure these parameters are set in coordination with the keepalive policy on the server,
|
// liveness of the connection. Make sure these parameters are set in
|
||||||
// as incompatible settings can result in closing of connection.
|
// coordination with the keepalive policy on the server, as incompatible
|
||||||
|
// settings can result in closing of connection.
|
||||||
type ClientParameters struct {
|
type ClientParameters struct {
|
||||||
// After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive.
|
// After a duration of this time if the client doesn't see any activity it
|
||||||
|
// pings the server to see if the transport is still alive.
|
||||||
|
// If set below 10s, a minimum value of 10s will be used instead.
|
||||||
Time time.Duration // The current default value is infinity.
|
Time time.Duration // The current default value is infinity.
|
||||||
// After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that
|
// After having pinged for keepalive check, the client waits for a duration
|
||||||
// the connection is closed.
|
// of Timeout and if no activity is seen even after that the connection is
|
||||||
|
// closed.
|
||||||
Timeout time.Duration // The current default value is 20 seconds.
|
Timeout time.Duration // The current default value is 20 seconds.
|
||||||
// If true, client runs keepalive checks even with no active RPCs.
|
// If true, client sends keepalive pings even with no active RPCs. If false,
|
||||||
|
// when there are no active RPCs, Time and Timeout will be ignored and no
|
||||||
|
// keepalive pings will be sent.
|
||||||
PermitWithoutStream bool // false by default.
|
PermitWithoutStream bool // false by default.
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerParameters is used to set keepalive and max-age parameters on the server-side.
|
// ServerParameters is used to set keepalive and max-age parameters on the
|
||||||
|
// server-side.
|
||||||
type ServerParameters struct {
|
type ServerParameters struct {
|
||||||
// MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway.
|
// MaxConnectionIdle is a duration for the amount of time after which an
|
||||||
// Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment.
|
// idle connection would be closed by sending a GoAway. Idleness duration is
|
||||||
|
// defined since the most recent time the number of outstanding RPCs became
|
||||||
|
// zero or the connection establishment.
|
||||||
MaxConnectionIdle time.Duration // The current default value is infinity.
|
MaxConnectionIdle time.Duration // The current default value is infinity.
|
||||||
// MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway.
|
// MaxConnectionAge is a duration for the maximum amount of time a
|
||||||
// A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms.
|
// connection may exist before it will be closed by sending a GoAway. A
|
||||||
|
// random jitter of +/-10% will be added to MaxConnectionAge to spread out
|
||||||
|
// connection storms.
|
||||||
MaxConnectionAge time.Duration // The current default value is infinity.
|
MaxConnectionAge time.Duration // The current default value is infinity.
|
||||||
// MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed.
|
// MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
|
||||||
|
// which the connection will be forcibly closed.
|
||||||
MaxConnectionAgeGrace time.Duration // The current default value is infinity.
|
MaxConnectionAgeGrace time.Duration // The current default value is infinity.
|
||||||
// After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive.
|
// After a duration of this time if the server doesn't see any activity it
|
||||||
|
// pings the client to see if the transport is still alive.
|
||||||
|
// If set below 1s, a minimum value of 1s will be used instead.
|
||||||
Time time.Duration // The current default value is 2 hours.
|
Time time.Duration // The current default value is 2 hours.
|
||||||
// After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
|
// After having pinged for keepalive check, the server waits for a duration
|
||||||
// the connection is closed.
|
// of Timeout and if no activity is seen even after that the connection is
|
||||||
|
// closed.
|
||||||
Timeout time.Duration // The current default value is 20 seconds.
|
Timeout time.Duration // The current default value is 20 seconds.
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnforcementPolicy is used to set keepalive enforcement policy on the server-side.
|
// EnforcementPolicy is used to set keepalive enforcement policy on the
|
||||||
// Server will close connection with a client that violates this policy.
|
// server-side. Server will close connection with a client that violates this
|
||||||
|
// policy.
|
||||||
type EnforcementPolicy struct {
|
type EnforcementPolicy struct {
|
||||||
// MinTime is the minimum amount of time a client should wait before sending a keepalive ping.
|
// MinTime is the minimum amount of time a client should wait before sending
|
||||||
|
// a keepalive ping.
|
||||||
MinTime time.Duration // The current default value is 5 minutes.
|
MinTime time.Duration // The current default value is 5 minutes.
|
||||||
// If true, server expects keepalive pings even when there are no active streams(RPCs).
|
// If true, server allows keepalive pings even when there are no active
|
||||||
|
// streams(RPCs). If false, and client sends ping when there are no active
|
||||||
|
// streams, server will send GOAWAY and close the connection.
|
||||||
PermitWithoutStream bool // false by default.
|
PermitWithoutStream bool // false by default.
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,10 +22,9 @@
|
||||||
package metadata // import "google.golang.org/grpc/metadata"
|
package metadata // import "google.golang.org/grpc/metadata"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// DecodeKeyValue returns k, v, nil.
|
// DecodeKeyValue returns k, v, nil.
|
||||||
|
|
|
@ -19,13 +19,13 @@
|
||||||
package naming
|
package naming
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -37,6 +37,9 @@ const (
|
||||||
var (
|
var (
|
||||||
errMissingAddr = errors.New("missing address")
|
errMissingAddr = errors.New("missing address")
|
||||||
errWatcherClose = errors.New("watcher has been closed")
|
errWatcherClose = errors.New("watcher has been closed")
|
||||||
|
|
||||||
|
lookupHost = net.DefaultResolver.LookupHost
|
||||||
|
lookupSRV = net.DefaultResolver.LookupSRV
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
|
// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
|
||||||
|
@ -73,8 +76,8 @@ func formatIP(addr string) (addrIP string, ok bool) {
|
||||||
|
|
||||||
// parseTarget takes the user input target string, returns formatted host and port info.
|
// parseTarget takes the user input target string, returns formatted host and port info.
|
||||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
// If target doesn't specify a port, set the port to be the defaultPort.
|
||||||
// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
|
// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
|
||||||
// are strippd when setting the host.
|
// are stripped when setting the host.
|
||||||
// examples:
|
// examples:
|
||||||
// target: "www.google.com" returns host: "www.google.com", port: "443"
|
// target: "www.google.com" returns host: "www.google.com", port: "443"
|
||||||
// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
|
// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
|
||||||
|
@ -218,7 +221,7 @@ func (w *dnsWatcher) lookupSRV() map[string]*Update {
|
||||||
for _, s := range srvs {
|
for _, s := range srvs {
|
||||||
lbAddrs, err := lookupHost(w.ctx, s.Target)
|
lbAddrs, err := lookupHost(w.ctx, s.Target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
|
grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, a := range lbAddrs {
|
for _, a := range lbAddrs {
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package naming defines the naming API and related data structures for gRPC.
|
// Package naming defines the naming API and related data structures for gRPC.
|
||||||
// The interface is EXPERIMENTAL and may be suject to change.
|
// The interface is EXPERIMENTAL and may be subject to change.
|
||||||
//
|
//
|
||||||
// Deprecated: please use package resolver.
|
// Deprecated: please use package resolver.
|
||||||
package naming
|
package naming
|
||||||
|
|
|
@ -21,9 +21,9 @@
|
||||||
package peer
|
package peer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -19,19 +19,16 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/channelz"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/internal/transport"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/transport"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||||
|
@ -45,16 +42,10 @@ type pickerWrapper struct {
|
||||||
// The latest connection happened.
|
// The latest connection happened.
|
||||||
connErrMu sync.Mutex
|
connErrMu sync.Mutex
|
||||||
connErr error
|
connErr error
|
||||||
|
|
||||||
stickinessMDKey atomic.Value
|
|
||||||
stickiness *stickyStore
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPickerWrapper() *pickerWrapper {
|
func newPickerWrapper() *pickerWrapper {
|
||||||
bp := &pickerWrapper{
|
bp := &pickerWrapper{blockingCh: make(chan struct{})}
|
||||||
blockingCh: make(chan struct{}),
|
|
||||||
stickiness: newStickyStore(),
|
|
||||||
}
|
|
||||||
return bp
|
return bp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,27 +62,6 @@ func (bp *pickerWrapper) connectionError() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bp *pickerWrapper) updateStickinessMDKey(newKey string) {
|
|
||||||
// No need to check ok because mdKey == "" if ok == false.
|
|
||||||
if oldKey, _ := bp.stickinessMDKey.Load().(string); oldKey != newKey {
|
|
||||||
bp.stickinessMDKey.Store(newKey)
|
|
||||||
bp.stickiness.reset(newKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bp *pickerWrapper) getStickinessMDKey() string {
|
|
||||||
// No need to check ok because mdKey == "" if ok == false.
|
|
||||||
mdKey, _ := bp.stickinessMDKey.Load().(string)
|
|
||||||
return mdKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bp *pickerWrapper) clearStickinessState() {
|
|
||||||
if oldKey := bp.getStickinessMDKey(); oldKey != "" {
|
|
||||||
// There's no need to reset store if mdKey was "".
|
|
||||||
bp.stickiness.reset(oldKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||||
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||||
bp.mu.Lock()
|
bp.mu.Lock()
|
||||||
|
@ -131,31 +101,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
|
||||||
// - the subConn returned by the current picker is not READY
|
// - the subConn returned by the current picker is not READY
|
||||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||||
func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||||
|
var ch chan struct{}
|
||||||
mdKey := bp.getStickinessMDKey()
|
|
||||||
stickyKey, isSticky := stickyKeyFromContext(ctx, mdKey)
|
|
||||||
|
|
||||||
// Potential race here: if stickinessMDKey is updated after the above two
|
|
||||||
// lines, and this pick is a sticky pick, the following put could add an
|
|
||||||
// entry to sticky store with an outdated sticky key.
|
|
||||||
//
|
|
||||||
// The solution: keep the current md key in sticky store, and at the
|
|
||||||
// beginning of each get/put, check the mdkey against store.curMDKey.
|
|
||||||
// - Cons: one more string comparing for each get/put.
|
|
||||||
// - Pros: the string matching happens inside get/put, so the overhead for
|
|
||||||
// non-sticky RPCs will be minimal.
|
|
||||||
|
|
||||||
if isSticky {
|
|
||||||
if t, ok := bp.stickiness.get(mdKey, stickyKey); ok {
|
|
||||||
// Done function returned is always nil.
|
|
||||||
return t, nil, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
p balancer.Picker
|
|
||||||
ch chan struct{}
|
|
||||||
)
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
bp.mu.Lock()
|
bp.mu.Lock()
|
||||||
|
@ -181,7 +127,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
||||||
}
|
}
|
||||||
|
|
||||||
ch = bp.blockingCh
|
ch = bp.blockingCh
|
||||||
p = bp.picker
|
p := bp.picker
|
||||||
bp.mu.Unlock()
|
bp.mu.Unlock()
|
||||||
|
|
||||||
subConn, done, err := p.Pick(ctx, opts)
|
subConn, done, err := p.Pick(ctx, opts)
|
||||||
|
@ -195,26 +141,35 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
|
return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
|
||||||
|
case context.DeadlineExceeded:
|
||||||
|
return nil, nil, status.Error(codes.DeadlineExceeded, err.Error())
|
||||||
|
case context.Canceled:
|
||||||
|
return nil, nil, status.Error(codes.Canceled, err.Error())
|
||||||
default:
|
default:
|
||||||
|
if _, ok := status.FromError(err); ok {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
// err is some other error.
|
// err is some other error.
|
||||||
return nil, nil, toRPCErr(err)
|
return nil, nil, status.Error(codes.Unknown, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
acw, ok := subConn.(*acBalancerWrapper)
|
acw, ok := subConn.(*acBalancerWrapper)
|
||||||
if !ok {
|
if !ok {
|
||||||
grpclog.Infof("subconn returned from pick is not *acBalancerWrapper")
|
grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
||||||
if isSticky {
|
|
||||||
bp.stickiness.put(mdKey, stickyKey, acw)
|
|
||||||
}
|
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
return t, doneChannelzWrapper(acw, done), nil
|
return t, doneChannelzWrapper(acw, done), nil
|
||||||
}
|
}
|
||||||
return t, done, nil
|
return t, done, nil
|
||||||
}
|
}
|
||||||
|
if done != nil {
|
||||||
|
// Calling done with nil error, no bytes sent and no bytes received.
|
||||||
|
// DoneInfo with default value works.
|
||||||
|
done(balancer.DoneInfo{})
|
||||||
|
}
|
||||||
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
||||||
// If ok == false, ac.state is not READY.
|
// If ok == false, ac.state is not READY.
|
||||||
// A valid picker always returns READY subConn. This means the state of ac
|
// A valid picker always returns READY subConn. This means the state of ac
|
||||||
|
@ -232,100 +187,3 @@ func (bp *pickerWrapper) close() {
|
||||||
bp.done = true
|
bp.done = true
|
||||||
close(bp.blockingCh)
|
close(bp.blockingCh)
|
||||||
}
|
}
|
||||||
|
|
||||||
type stickyStoreEntry struct {
|
|
||||||
acw *acBalancerWrapper
|
|
||||||
addr resolver.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
type stickyStore struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
// curMDKey is check before every get/put to avoid races. The operation will
|
|
||||||
// abort immediately when the given mdKey is different from the curMDKey.
|
|
||||||
curMDKey string
|
|
||||||
store map[string]*stickyStoreEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStickyStore() *stickyStore {
|
|
||||||
return &stickyStore{
|
|
||||||
store: make(map[string]*stickyStoreEntry),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset clears the map in stickyStore, and set the currentMDKey to newMDKey.
|
|
||||||
func (ss *stickyStore) reset(newMDKey string) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
ss.curMDKey = newMDKey
|
|
||||||
ss.store = make(map[string]*stickyStoreEntry)
|
|
||||||
ss.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// stickyKey is the key to look up in store. mdKey will be checked against
|
|
||||||
// curMDKey to avoid races.
|
|
||||||
func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
if mdKey != ss.curMDKey {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// TODO(stickiness): limit the total number of entries.
|
|
||||||
ss.store[stickyKey] = &stickyStoreEntry{
|
|
||||||
acw: acw,
|
|
||||||
addr: acw.getAddrConn().getCurAddr(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// stickyKey is the key to look up in store. mdKey will be checked against
|
|
||||||
// curMDKey to avoid races.
|
|
||||||
func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, bool) {
|
|
||||||
ss.mu.Lock()
|
|
||||||
defer ss.mu.Unlock()
|
|
||||||
if mdKey != ss.curMDKey {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
entry, ok := ss.store[stickyKey]
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
ac := entry.acw.getAddrConn()
|
|
||||||
if ac.getCurAddr() != entry.addr {
|
|
||||||
delete(ss.store, stickyKey)
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
t, ok := ac.getReadyTransport()
|
|
||||||
if !ok {
|
|
||||||
delete(ss.store, stickyKey)
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
return t, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get one value from metadata in ctx with key stickinessMDKey.
|
|
||||||
//
|
|
||||||
// It returns "", false if stickinessMDKey is an empty string.
|
|
||||||
func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) {
|
|
||||||
if stickinessMDKey == "" {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
md, added, ok := metadata.FromOutgoingContextRaw(ctx)
|
|
||||||
if !ok {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
if vv, ok := md[stickinessMDKey]; ok {
|
|
||||||
if len(vv) > 0 {
|
|
||||||
return vv[0], true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ss := range added {
|
|
||||||
for i := 0; i < len(ss)-1; i += 2 {
|
|
||||||
if ss[i] == stickinessMDKey {
|
|
||||||
return ss[i+1], true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
|
@ -19,7 +19,8 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
@ -56,6 +57,7 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er
|
||||||
if b.sc == nil {
|
if b.sc == nil {
|
||||||
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
|
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
//TODO(yuxuanli): why not change the cc state to Idle?
|
||||||
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,8 @@ package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -27,10 +29,10 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const proxyAuthHeaderKey = "Proxy-Authorization"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// errDisabled indicates that proxy is disabled for the address.
|
// errDisabled indicates that proxy is disabled for the address.
|
||||||
errDisabled = errors.New("proxy is disabled for the address")
|
errDisabled = errors.New("proxy is disabled for the address")
|
||||||
|
@ -38,7 +40,7 @@ var (
|
||||||
httpProxyFromEnvironment = http.ProxyFromEnvironment
|
httpProxyFromEnvironment = http.ProxyFromEnvironment
|
||||||
)
|
)
|
||||||
|
|
||||||
func mapAddress(ctx context.Context, address string) (string, error) {
|
func mapAddress(ctx context.Context, address string) (*url.URL, error) {
|
||||||
req := &http.Request{
|
req := &http.Request{
|
||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Scheme: "https",
|
Scheme: "https",
|
||||||
|
@ -47,12 +49,12 @@ func mapAddress(ctx context.Context, address string) (string, error) {
|
||||||
}
|
}
|
||||||
url, err := httpProxyFromEnvironment(req)
|
url, err := httpProxyFromEnvironment(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return nil, err
|
||||||
}
|
}
|
||||||
if url == nil {
|
if url == nil {
|
||||||
return "", errDisabled
|
return nil, errDisabled
|
||||||
}
|
}
|
||||||
return url.Host, nil
|
return url, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
|
// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
|
||||||
|
@ -69,18 +71,28 @@ func (c *bufConn) Read(b []byte) (int, error) {
|
||||||
return c.r.Read(b)
|
return c.r.Read(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) {
|
func basicAuth(username, password string) string {
|
||||||
|
auth := username + ":" + password
|
||||||
|
return base64.StdEncoding.EncodeToString([]byte(auth))
|
||||||
|
}
|
||||||
|
|
||||||
|
func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.Close()
|
conn.Close()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
req := (&http.Request{
|
req := &http.Request{
|
||||||
Method: http.MethodConnect,
|
Method: http.MethodConnect,
|
||||||
URL: &url.URL{Host: addr},
|
URL: &url.URL{Host: backendAddr},
|
||||||
Header: map[string][]string{"User-Agent": {grpcUA}},
|
Header: map[string][]string{"User-Agent": {grpcUA}},
|
||||||
})
|
}
|
||||||
|
if t := proxyURL.User; t != nil {
|
||||||
|
u := t.Username()
|
||||||
|
p, _ := t.Password()
|
||||||
|
req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
|
||||||
|
}
|
||||||
|
|
||||||
if err := sendHTTPRequest(ctx, req, conn); err != nil {
|
if err := sendHTTPRequest(ctx, req, conn); err != nil {
|
||||||
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
|
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||||
|
@ -108,23 +120,33 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_
|
||||||
// provided dialer, does HTTP CONNECT handshake and returns the connection.
|
// provided dialer, does HTTP CONNECT handshake and returns the connection.
|
||||||
func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
|
func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
|
||||||
return func(ctx context.Context, addr string) (conn net.Conn, err error) {
|
return func(ctx context.Context, addr string) (conn net.Conn, err error) {
|
||||||
var skipHandshake bool
|
var newAddr string
|
||||||
newAddr, err := mapAddress(ctx, addr)
|
proxyURL, err := mapAddress(ctx, addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != errDisabled {
|
if err != errDisabled {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
skipHandshake = true
|
|
||||||
newAddr = addr
|
newAddr = addr
|
||||||
|
} else {
|
||||||
|
newAddr = proxyURL.Host
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err = dialer(ctx, newAddr)
|
conn, err = dialer(ctx, newAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !skipHandshake {
|
if proxyURL != nil {
|
||||||
conn, err = doHTTPConnectHandshake(ctx, conn, addr)
|
// proxy is disabled if proxyURL is nil.
|
||||||
|
conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
if err := req.Write(conn); err != nil {
|
||||||
|
return fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Copyright 2017 gRPC authors.
|
* Copyright 2018 gRPC authors.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -21,10 +21,10 @@
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -32,8 +32,9 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/backoff"
|
||||||
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,32 +43,65 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultPort = "443"
|
defaultPort = "443"
|
||||||
defaultFreq = time.Minute * 30
|
defaultFreq = time.Minute * 30
|
||||||
golang = "GO"
|
defaultDNSSvrPort = "53"
|
||||||
|
golang = "GO"
|
||||||
|
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
|
||||||
|
txtPrefix = "_grpc_config."
|
||||||
// In DNS, service config is encoded in a TXT record via the mechanism
|
// In DNS, service config is encoded in a TXT record via the mechanism
|
||||||
// described in RFC-1464 using the attribute name grpc_config.
|
// described in RFC-1464 using the attribute name grpc_config.
|
||||||
txtAttribute = "grpc_config="
|
txtAttribute = "grpc_config="
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errMissingAddr = errors.New("missing address")
|
errMissingAddr = errors.New("dns resolver: missing address")
|
||||||
randomGen = rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
||||||
|
// Addresses ending with a colon that is supposed to be the separator
|
||||||
|
// between host and port is not allowed. E.g. "::" is a valid address as
|
||||||
|
// it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
|
||||||
|
// a colon as the host and port separator
|
||||||
|
errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultResolver netResolver = net.DefaultResolver
|
||||||
|
)
|
||||||
|
|
||||||
|
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
var dialer net.Dialer
|
||||||
|
return dialer.DialContext(ctx, network, authority)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var customAuthorityResolver = func(authority string) (netResolver, error) {
|
||||||
|
host, port, err := parseTarget(authority, defaultDNSSvrPort)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
authorityWithPort := net.JoinHostPort(host, port)
|
||||||
|
|
||||||
|
return &net.Resolver{
|
||||||
|
PreferGo: true,
|
||||||
|
Dial: customAuthorityDialler(authorityWithPort),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
||||||
func NewBuilder() resolver.Builder {
|
func NewBuilder() resolver.Builder {
|
||||||
return &dnsBuilder{freq: defaultFreq}
|
return &dnsBuilder{minFreq: defaultFreq}
|
||||||
}
|
}
|
||||||
|
|
||||||
type dnsBuilder struct {
|
type dnsBuilder struct {
|
||||||
// frequency of polling the DNS server.
|
// minimum frequency of polling the DNS server.
|
||||||
freq time.Duration
|
minFreq time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||||
host, port, err := parseTarget(target.Endpoint)
|
host, port, err := parseTarget(target.Endpoint, defaultPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -90,7 +124,8 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||||||
// DNS address (non-IP).
|
// DNS address (non-IP).
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
d := &dnsResolver{
|
d := &dnsResolver{
|
||||||
freq: b.freq,
|
freq: b.minFreq,
|
||||||
|
backoff: backoff.Exponential{MaxDelay: b.minFreq},
|
||||||
host: host,
|
host: host,
|
||||||
port: port,
|
port: port,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
|
@ -101,6 +136,15 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||||||
disableServiceConfig: opts.DisableServiceConfig,
|
disableServiceConfig: opts.DisableServiceConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if target.Authority == "" {
|
||||||
|
d.resolver = defaultResolver
|
||||||
|
} else {
|
||||||
|
d.resolver, err = customAuthorityResolver(target.Authority)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
d.wg.Add(1)
|
d.wg.Add(1)
|
||||||
go d.watcher()
|
go d.watcher()
|
||||||
return d, nil
|
return d, nil
|
||||||
|
@ -111,6 +155,12 @@ func (b *dnsBuilder) Scheme() string {
|
||||||
return "dns"
|
return "dns"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type netResolver interface {
|
||||||
|
LookupHost(ctx context.Context, host string) (addrs []string, err error)
|
||||||
|
LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
|
||||||
|
LookupTXT(ctx context.Context, name string) (txts []string, err error)
|
||||||
|
}
|
||||||
|
|
||||||
// ipResolver watches for the name resolution update for an IP address.
|
// ipResolver watches for the name resolution update for an IP address.
|
||||||
type ipResolver struct {
|
type ipResolver struct {
|
||||||
cc resolver.ClientConn
|
cc resolver.ClientConn
|
||||||
|
@ -146,12 +196,15 @@ func (i *ipResolver) watcher() {
|
||||||
|
|
||||||
// dnsResolver watches for the name resolution update for a non-IP target.
|
// dnsResolver watches for the name resolution update for a non-IP target.
|
||||||
type dnsResolver struct {
|
type dnsResolver struct {
|
||||||
freq time.Duration
|
freq time.Duration
|
||||||
host string
|
backoff backoff.Exponential
|
||||||
port string
|
retryCount int
|
||||||
ctx context.Context
|
host string
|
||||||
cancel context.CancelFunc
|
port string
|
||||||
cc resolver.ClientConn
|
resolver netResolver
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
cc resolver.ClientConn
|
||||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||||
rn chan struct{}
|
rn chan struct{}
|
||||||
t *time.Timer
|
t *time.Timer
|
||||||
|
@ -190,8 +243,15 @@ func (d *dnsResolver) watcher() {
|
||||||
case <-d.rn:
|
case <-d.rn:
|
||||||
}
|
}
|
||||||
result, sc := d.lookup()
|
result, sc := d.lookup()
|
||||||
// Next lookup should happen after an interval defined by d.freq.
|
// Next lookup should happen within an interval defined by d.freq. It may be
|
||||||
d.t.Reset(d.freq)
|
// more often due to exponential retry on empty address list.
|
||||||
|
if len(result) == 0 {
|
||||||
|
d.retryCount++
|
||||||
|
d.t.Reset(d.backoff.Backoff(d.retryCount))
|
||||||
|
} else {
|
||||||
|
d.retryCount = 0
|
||||||
|
d.t.Reset(d.freq)
|
||||||
|
}
|
||||||
d.cc.NewServiceConfig(sc)
|
d.cc.NewServiceConfig(sc)
|
||||||
d.cc.NewAddress(result)
|
d.cc.NewAddress(result)
|
||||||
}
|
}
|
||||||
|
@ -199,13 +259,13 @@ func (d *dnsResolver) watcher() {
|
||||||
|
|
||||||
func (d *dnsResolver) lookupSRV() []resolver.Address {
|
func (d *dnsResolver) lookupSRV() []resolver.Address {
|
||||||
var newAddrs []resolver.Address
|
var newAddrs []resolver.Address
|
||||||
_, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
|
_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
|
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for _, s := range srvs {
|
for _, s := range srvs {
|
||||||
lbAddrs, err := lookupHost(d.ctx, s.Target)
|
lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
|
grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
|
||||||
continue
|
continue
|
||||||
|
@ -224,7 +284,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dnsResolver) lookupTXT() string {
|
func (d *dnsResolver) lookupTXT() string {
|
||||||
ss, err := lookupTXT(d.ctx, d.host)
|
ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
|
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
|
||||||
return ""
|
return ""
|
||||||
|
@ -244,7 +304,7 @@ func (d *dnsResolver) lookupTXT() string {
|
||||||
|
|
||||||
func (d *dnsResolver) lookupHost() []resolver.Address {
|
func (d *dnsResolver) lookupHost() []resolver.Address {
|
||||||
var newAddrs []resolver.Address
|
var newAddrs []resolver.Address
|
||||||
addrs, err := lookupHost(d.ctx, d.host)
|
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
|
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
|
||||||
return nil
|
return nil
|
||||||
|
@ -286,17 +346,16 @@ func formatIP(addr string) (addrIP string, ok bool) {
|
||||||
return "[" + addr + "]", true
|
return "[" + addr + "]", true
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseTarget takes the user input target string, returns formatted host and port info.
|
// parseTarget takes the user input target string and default port, returns formatted host and port info.
|
||||||
// If target doesn't specify a port, set the port to be the defaultPort.
|
// If target doesn't specify a port, set the port to be the defaultPort.
|
||||||
// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
|
// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
|
||||||
// are strippd when setting the host.
|
// are stripped when setting the host.
|
||||||
// examples:
|
// examples:
|
||||||
// target: "www.google.com" returns host: "www.google.com", port: "443"
|
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
|
||||||
// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
|
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
|
||||||
// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
|
// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
|
||||||
// target: ":80" returns host: "localhost", port: "80"
|
// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
|
||||||
// target: ":" returns host: "localhost", port: "443"
|
func parseTarget(target, defaultPort string) (host, port string, err error) {
|
||||||
func parseTarget(target string) (host, port string, err error) {
|
|
||||||
if target == "" {
|
if target == "" {
|
||||||
return "", "", errMissingAddr
|
return "", "", errMissingAddr
|
||||||
}
|
}
|
||||||
|
@ -305,15 +364,15 @@ func parseTarget(target string) (host, port string, err error) {
|
||||||
return target, defaultPort, nil
|
return target, defaultPort, nil
|
||||||
}
|
}
|
||||||
if host, port, err = net.SplitHostPort(target); err == nil {
|
if host, port, err = net.SplitHostPort(target); err == nil {
|
||||||
|
if port == "" {
|
||||||
|
// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
|
||||||
|
return "", "", errEndsWithColon
|
||||||
|
}
|
||||||
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
|
||||||
if host == "" {
|
if host == "" {
|
||||||
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
|
||||||
host = "localhost"
|
host = "localhost"
|
||||||
}
|
}
|
||||||
if port == "" {
|
|
||||||
// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
|
|
||||||
port = defaultPort
|
|
||||||
}
|
|
||||||
return host, port, nil
|
return host, port, nil
|
||||||
}
|
}
|
||||||
if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
|
if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
|
||||||
|
@ -346,7 +405,7 @@ func chosenByPercentage(a *int) bool {
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return randomGen.Intn(100)+1 <= *a
|
return grpcrand.Intn(100)+1 <= *a
|
||||||
}
|
}
|
||||||
|
|
||||||
func canaryingSC(js string) string {
|
func canaryingSC(js string) string {
|
||||||
|
|
|
@ -1,35 +0,0 @@
|
||||||
// +build go1.6, !go1.8
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2017 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package dns
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
|
|
||||||
lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
|
|
||||||
return net.LookupSRV(service, proto, name)
|
|
||||||
}
|
|
||||||
lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
|
|
||||||
)
|
|
|
@ -45,7 +45,7 @@ type passthroughResolver struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *passthroughResolver) start() {
|
func (r *passthroughResolver) start() {
|
||||||
r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
|
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
|
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
|
||||||
|
|
|
@ -49,8 +49,12 @@ func Get(scheme string) Builder {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDefaultScheme sets the default scheme that will be used.
|
// SetDefaultScheme sets the default scheme that will be used. The default
|
||||||
// The default default scheme is "passthrough".
|
// default scheme is "passthrough".
|
||||||
|
//
|
||||||
|
// NOTE: this function must only be called during initialization time (i.e. in
|
||||||
|
// an init() function), and is not thread-safe. The scheme set last overrides
|
||||||
|
// previously set values.
|
||||||
func SetDefaultScheme(scheme string) {
|
func SetDefaultScheme(scheme string) {
|
||||||
defaultScheme = scheme
|
defaultScheme = scheme
|
||||||
}
|
}
|
||||||
|
@ -94,6 +98,15 @@ type BuildOption struct {
|
||||||
DisableServiceConfig bool
|
DisableServiceConfig bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// State contains the current Resolver state relevant to the ClientConn.
|
||||||
|
type State struct {
|
||||||
|
Addresses []Address // Resolved addresses for the target
|
||||||
|
ServiceConfig string // JSON representation of the service config
|
||||||
|
|
||||||
|
// TODO: add Err error
|
||||||
|
// TODO: add ParsedServiceConfig interface{}
|
||||||
|
}
|
||||||
|
|
||||||
// ClientConn contains the callbacks for resolver to notify any updates
|
// ClientConn contains the callbacks for resolver to notify any updates
|
||||||
// to the gRPC ClientConn.
|
// to the gRPC ClientConn.
|
||||||
//
|
//
|
||||||
|
@ -102,12 +115,18 @@ type BuildOption struct {
|
||||||
// testing, the new implementation should embed this interface. This allows
|
// testing, the new implementation should embed this interface. This allows
|
||||||
// gRPC to add new methods to this interface.
|
// gRPC to add new methods to this interface.
|
||||||
type ClientConn interface {
|
type ClientConn interface {
|
||||||
|
// UpdateState updates the state of the ClientConn appropriately.
|
||||||
|
UpdateState(State)
|
||||||
// NewAddress is called by resolver to notify ClientConn a new list
|
// NewAddress is called by resolver to notify ClientConn a new list
|
||||||
// of resolved addresses.
|
// of resolved addresses.
|
||||||
// The address list should be the complete list of resolved addresses.
|
// The address list should be the complete list of resolved addresses.
|
||||||
|
//
|
||||||
|
// Deprecated: Use UpdateState instead.
|
||||||
NewAddress(addresses []Address)
|
NewAddress(addresses []Address)
|
||||||
// NewServiceConfig is called by resolver to notify ClientConn a new
|
// NewServiceConfig is called by resolver to notify ClientConn a new
|
||||||
// service config. The service config should be provided as a json string.
|
// service config. The service config should be provided as a json string.
|
||||||
|
//
|
||||||
|
// Deprecated: Use UpdateState instead.
|
||||||
NewServiceConfig(serviceConfig string)
|
NewServiceConfig(serviceConfig string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,8 +21,10 @@ package grpc
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/channelz"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -33,11 +35,12 @@ type ccResolverWrapper struct {
|
||||||
resolver resolver.Resolver
|
resolver resolver.Resolver
|
||||||
addrCh chan []resolver.Address
|
addrCh chan []resolver.Address
|
||||||
scCh chan string
|
scCh chan string
|
||||||
done chan struct{}
|
done uint32 // accessed atomically; set to 1 when closed.
|
||||||
|
curState resolver.State
|
||||||
}
|
}
|
||||||
|
|
||||||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
// split2 returns the values from strings.SplitN(s, sep, 2).
|
||||||
// If sep is not found, it returns ("", s, false) instead.
|
// If sep is not found, it returns ("", "", false) instead.
|
||||||
func split2(s, sep string) (string, string, bool) {
|
func split2(s, sep string) (string, string, bool) {
|
||||||
spl := strings.SplitN(s, sep, 2)
|
spl := strings.SplitN(s, sep, 2)
|
||||||
if len(spl) < 2 {
|
if len(spl) < 2 {
|
||||||
|
@ -65,8 +68,8 @@ func parseTarget(target string) (ret resolver.Target) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
|
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
|
||||||
// builder for this scheme. It then builds the resolver and starts the
|
// builder for this scheme and builds the resolver. The monitoring goroutine
|
||||||
// monitoring goroutine for it.
|
// for it is not started yet and can be created by calling start().
|
||||||
//
|
//
|
||||||
// If withResolverBuilder dial option is set, the specified resolver will be
|
// If withResolverBuilder dial option is set, the specified resolver will be
|
||||||
// used instead.
|
// used instead.
|
||||||
|
@ -80,7 +83,6 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
||||||
cc: cc,
|
cc: cc,
|
||||||
addrCh: make(chan []resolver.Address, 1),
|
addrCh: make(chan []resolver.Address, 1),
|
||||||
scCh: make(chan string, 1),
|
scCh: make(chan string, 1),
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
@ -91,68 +93,73 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
||||||
return ccr, nil
|
return ccr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccr *ccResolverWrapper) start() {
|
|
||||||
go ccr.watcher()
|
|
||||||
}
|
|
||||||
|
|
||||||
// watcher processes address updates and service config updates sequentially.
|
|
||||||
// Otherwise, we need to resolve possible races between address and service
|
|
||||||
// config (e.g. they specify different balancer types).
|
|
||||||
func (ccr *ccResolverWrapper) watcher() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ccr.done:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case addrs := <-ccr.addrCh:
|
|
||||||
select {
|
|
||||||
case <-ccr.done:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
|
|
||||||
ccr.cc.handleResolvedAddrs(addrs, nil)
|
|
||||||
case sc := <-ccr.scCh:
|
|
||||||
select {
|
|
||||||
case <-ccr.done:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
|
|
||||||
ccr.cc.handleServiceConfig(sc)
|
|
||||||
case <-ccr.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
|
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
|
||||||
ccr.resolver.ResolveNow(o)
|
ccr.resolver.ResolveNow(o)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccr *ccResolverWrapper) close() {
|
func (ccr *ccResolverWrapper) close() {
|
||||||
ccr.resolver.Close()
|
ccr.resolver.Close()
|
||||||
close(ccr.done)
|
atomic.StoreUint32(&ccr.done, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAddress is called by the resolver implemenetion to send addresses to gRPC.
|
func (ccr *ccResolverWrapper) isDone() bool {
|
||||||
|
return atomic.LoadUint32(&ccr.done) == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
|
||||||
|
if ccr.isDone() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
|
||||||
|
if channelz.IsOn() {
|
||||||
|
ccr.addChannelzTraceEvent(s)
|
||||||
|
}
|
||||||
|
ccr.cc.updateResolverState(s)
|
||||||
|
ccr.curState = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAddress is called by the resolver implementation to send addresses to gRPC.
|
||||||
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||||
select {
|
if ccr.isDone() {
|
||||||
case <-ccr.addrCh:
|
return
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
ccr.addrCh <- addrs
|
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
|
||||||
|
if channelz.IsOn() {
|
||||||
|
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
||||||
|
}
|
||||||
|
ccr.curState.Addresses = addrs
|
||||||
|
ccr.cc.updateResolverState(ccr.curState)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServiceConfig is called by the resolver implemenetion to send service
|
// NewServiceConfig is called by the resolver implementation to send service
|
||||||
// configs to gPRC.
|
// configs to gRPC.
|
||||||
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||||
select {
|
if ccr.isDone() {
|
||||||
case <-ccr.scCh:
|
return
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
ccr.scCh <- sc
|
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
|
||||||
|
if channelz.IsOn() {
|
||||||
|
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: sc})
|
||||||
|
}
|
||||||
|
ccr.curState.ServiceConfig = sc
|
||||||
|
ccr.cc.updateResolverState(ccr.curState)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||||
|
if s.ServiceConfig == ccr.curState.ServiceConfig && (len(ccr.curState.Addresses) == 0) == (len(s.Addresses) == 0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var updates []string
|
||||||
|
if s.ServiceConfig != ccr.curState.ServiceConfig {
|
||||||
|
updates = append(updates, "service config updated")
|
||||||
|
}
|
||||||
|
if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
|
||||||
|
updates = append(updates, "resolver returned an empty address list")
|
||||||
|
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
||||||
|
updates = append(updates, "resolver returned new addresses")
|
||||||
|
}
|
||||||
|
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
|
||||||
|
Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
|
||||||
|
Severity: channelz.CtINFO,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ package grpc
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -31,16 +32,15 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/encoding"
|
"google.golang.org/grpc/encoding"
|
||||||
"google.golang.org/grpc/encoding/proto"
|
"google.golang.org/grpc/encoding/proto"
|
||||||
|
"google.golang.org/grpc/internal/transport"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/peer"
|
"google.golang.org/grpc/peer"
|
||||||
"google.golang.org/grpc/stats"
|
"google.golang.org/grpc/stats"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/transport"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Compressor defines the interface gRPC uses to compress a message.
|
// Compressor defines the interface gRPC uses to compress a message.
|
||||||
|
@ -155,17 +155,20 @@ func (d *gzipDecompressor) Type() string {
|
||||||
type callInfo struct {
|
type callInfo struct {
|
||||||
compressorType string
|
compressorType string
|
||||||
failFast bool
|
failFast bool
|
||||||
stream *clientStream
|
stream ClientStream
|
||||||
traceInfo traceInfo // in trace.go
|
|
||||||
maxReceiveMessageSize *int
|
maxReceiveMessageSize *int
|
||||||
maxSendMessageSize *int
|
maxSendMessageSize *int
|
||||||
creds credentials.PerRPCCredentials
|
creds credentials.PerRPCCredentials
|
||||||
contentSubtype string
|
contentSubtype string
|
||||||
codec baseCodec
|
codec baseCodec
|
||||||
|
maxRetryRPCBufferSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultCallInfo() *callInfo {
|
func defaultCallInfo() *callInfo {
|
||||||
return &callInfo{failFast: true}
|
return &callInfo{
|
||||||
|
failFast: true,
|
||||||
|
maxRetryRPCBufferSize: 256 * 1024, // 256KB
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallOption configures a Call before it starts or extracts information from
|
// CallOption configures a Call before it starts or extracts information from
|
||||||
|
@ -250,8 +253,8 @@ func (o PeerCallOption) after(c *callInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FailFast configures the action to take when an RPC is attempted on broken
|
// WaitForReady configures the action to take when an RPC is attempted on broken
|
||||||
// connections or unreachable servers. If failFast is true, the RPC will fail
|
// connections or unreachable servers. If waitForReady is false, the RPC will fail
|
||||||
// immediately. Otherwise, the RPC client will block the call until a
|
// immediately. Otherwise, the RPC client will block the call until a
|
||||||
// connection is available (or the call is canceled or times out) and will
|
// connection is available (or the call is canceled or times out) and will
|
||||||
// retry the call if it fails due to a transient error. gRPC will not retry if
|
// retry the call if it fails due to a transient error. gRPC will not retry if
|
||||||
|
@ -259,7 +262,14 @@ func (o PeerCallOption) after(c *callInfo) {
|
||||||
// the data. Please refer to
|
// the data. Please refer to
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
|
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
|
||||||
//
|
//
|
||||||
// By default, RPCs are "Fail Fast".
|
// By default, RPCs don't "wait for ready".
|
||||||
|
func WaitForReady(waitForReady bool) CallOption {
|
||||||
|
return FailFastCallOption{FailFast: !waitForReady}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailFast is the opposite of WaitForReady.
|
||||||
|
//
|
||||||
|
// Deprecated: use WaitForReady.
|
||||||
func FailFast(failFast bool) CallOption {
|
func FailFast(failFast bool) CallOption {
|
||||||
return FailFastCallOption{FailFast: failFast}
|
return FailFastCallOption{FailFast: failFast}
|
||||||
}
|
}
|
||||||
|
@ -360,13 +370,13 @@ func (o CompressorCallOption) after(c *callInfo) {}
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||||
// more details.
|
// more details.
|
||||||
//
|
//
|
||||||
// If CallCustomCodec is not also used, the content-subtype will be used to
|
// If ForceCodec is not also used, the content-subtype will be used to look up
|
||||||
// look up the Codec to use in the registry controlled by RegisterCodec. See
|
// the Codec to use in the registry controlled by RegisterCodec. See the
|
||||||
// the documentation on RegisterCodec for details on registration. The lookup
|
// documentation on RegisterCodec for details on registration. The lookup of
|
||||||
// of content-subtype is case-insensitive. If no such Codec is found, the call
|
// content-subtype is case-insensitive. If no such Codec is found, the call
|
||||||
// will result in an error with code codes.Internal.
|
// will result in an error with code codes.Internal.
|
||||||
//
|
//
|
||||||
// If CallCustomCodec is also used, that Codec will be used for all request and
|
// If ForceCodec is also used, that Codec will be used for all request and
|
||||||
// response messages, with the content-subtype set to the given contentSubtype
|
// response messages, with the content-subtype set to the given contentSubtype
|
||||||
// here for requests.
|
// here for requests.
|
||||||
func CallContentSubtype(contentSubtype string) CallOption {
|
func CallContentSubtype(contentSubtype string) CallOption {
|
||||||
|
@ -386,7 +396,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error {
|
||||||
}
|
}
|
||||||
func (o ContentSubtypeCallOption) after(c *callInfo) {}
|
func (o ContentSubtypeCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
// CallCustomCodec returns a CallOption that will set the given Codec to be
|
// ForceCodec returns a CallOption that will set the given Codec to be
|
||||||
// used for all request and response messages for a call. The result of calling
|
// used for all request and response messages for a call. The result of calling
|
||||||
// String() will be used as the content-subtype in a case-insensitive manner.
|
// String() will be used as the content-subtype in a case-insensitive manner.
|
||||||
//
|
//
|
||||||
|
@ -398,12 +408,37 @@ func (o ContentSubtypeCallOption) after(c *callInfo) {}
|
||||||
//
|
//
|
||||||
// This function is provided for advanced users; prefer to use only
|
// This function is provided for advanced users; prefer to use only
|
||||||
// CallContentSubtype to select a registered codec instead.
|
// CallContentSubtype to select a registered codec instead.
|
||||||
|
//
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
func ForceCodec(codec encoding.Codec) CallOption {
|
||||||
|
return ForceCodecCallOption{Codec: codec}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForceCodecCallOption is a CallOption that indicates the codec used for
|
||||||
|
// marshaling messages.
|
||||||
|
//
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type ForceCodecCallOption struct {
|
||||||
|
Codec encoding.Codec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o ForceCodecCallOption) before(c *callInfo) error {
|
||||||
|
c.codec = o.Codec
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o ForceCodecCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
|
// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
|
||||||
|
// an encoding.Codec.
|
||||||
|
//
|
||||||
|
// Deprecated: use ForceCodec instead.
|
||||||
func CallCustomCodec(codec Codec) CallOption {
|
func CallCustomCodec(codec Codec) CallOption {
|
||||||
return CustomCodecCallOption{Codec: codec}
|
return CustomCodecCallOption{Codec: codec}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
||||||
// marshaling messages.
|
// marshaling messages.
|
||||||
|
//
|
||||||
// This is an EXPERIMENTAL API.
|
// This is an EXPERIMENTAL API.
|
||||||
type CustomCodecCallOption struct {
|
type CustomCodecCallOption struct {
|
||||||
Codec Codec
|
Codec Codec
|
||||||
|
@ -415,12 +450,33 @@ func (o CustomCodecCallOption) before(c *callInfo) error {
|
||||||
}
|
}
|
||||||
func (o CustomCodecCallOption) after(c *callInfo) {}
|
func (o CustomCodecCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
|
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
|
||||||
|
// used for buffering this RPC's requests for retry purposes.
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
func MaxRetryRPCBufferSize(bytes int) CallOption {
|
||||||
|
return MaxRetryRPCBufferSizeCallOption{bytes}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
|
||||||
|
// memory to be used for caching this RPC for retry purposes.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type MaxRetryRPCBufferSizeCallOption struct {
|
||||||
|
MaxRetryRPCBufferSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
|
||||||
|
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
|
||||||
|
|
||||||
// The format of the payload: compressed or not?
|
// The format of the payload: compressed or not?
|
||||||
type payloadFormat uint8
|
type payloadFormat uint8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
compressionNone payloadFormat = iota // no compression
|
compressionNone payloadFormat = 0 // no compression
|
||||||
compressionMade
|
compressionMade payloadFormat = 1 // compressed
|
||||||
)
|
)
|
||||||
|
|
||||||
// parser reads complete gRPC messages from the underlying reader.
|
// parser reads complete gRPC messages from the underlying reader.
|
||||||
|
@ -444,7 +500,7 @@ type parser struct {
|
||||||
// * io.EOF, when no messages remain
|
// * io.EOF, when no messages remain
|
||||||
// * io.ErrUnexpectedEOF
|
// * io.ErrUnexpectedEOF
|
||||||
// * of type transport.ConnectionError
|
// * of type transport.ConnectionError
|
||||||
// * of type transport.StreamError
|
// * an error from the status package
|
||||||
// No other error values or types must be returned, which also means
|
// No other error values or types must be returned, which also means
|
||||||
// that the underlying io.Reader must not return an incompatible
|
// that the underlying io.Reader must not return an incompatible
|
||||||
// error.
|
// error.
|
||||||
|
@ -477,65 +533,85 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
||||||
return pf, msg, nil
|
return pf, msg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// encode serializes msg and returns a buffer of message header and a buffer of msg.
|
// encode serializes msg and returns a buffer containing the message, or an
|
||||||
// If msg is nil, it generates the message header and an empty msg buffer.
|
// error if it is too large to be transmitted by grpc. If msg is nil, it
|
||||||
// TODO(ddyihai): eliminate extra Compressor parameter.
|
// generates an empty message.
|
||||||
func encode(c baseCodec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) {
|
func encode(c baseCodec, msg interface{}) ([]byte, error) {
|
||||||
var (
|
if msg == nil { // NOTE: typed nils will not be caught by this check
|
||||||
b []byte
|
return nil, nil
|
||||||
cbuf *bytes.Buffer
|
}
|
||||||
)
|
b, err := c.Marshal(msg)
|
||||||
const (
|
if err != nil {
|
||||||
payloadLen = 1
|
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
|
||||||
sizeLen = 4
|
|
||||||
)
|
|
||||||
if msg != nil {
|
|
||||||
var err error
|
|
||||||
b, err = c.Marshal(msg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
|
|
||||||
}
|
|
||||||
if outPayload != nil {
|
|
||||||
outPayload.Payload = msg
|
|
||||||
// TODO truncate large payload.
|
|
||||||
outPayload.Data = b
|
|
||||||
outPayload.Length = len(b)
|
|
||||||
}
|
|
||||||
if compressor != nil || cp != nil {
|
|
||||||
cbuf = new(bytes.Buffer)
|
|
||||||
// Has compressor, check Compressor is set by UseCompressor first.
|
|
||||||
if compressor != nil {
|
|
||||||
z, _ := compressor.Compress(cbuf)
|
|
||||||
if _, err := z.Write(b); err != nil {
|
|
||||||
return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
|
||||||
}
|
|
||||||
z.Close()
|
|
||||||
} else {
|
|
||||||
// If Compressor is not set by UseCompressor, use default Compressor
|
|
||||||
if err := cp.Do(cbuf, b); err != nil {
|
|
||||||
return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b = cbuf.Bytes()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if uint(len(b)) > math.MaxUint32 {
|
if uint(len(b)) > math.MaxUint32 {
|
||||||
return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
|
return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
|
||||||
}
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
bufHeader := make([]byte, payloadLen+sizeLen)
|
// compress returns the input bytes compressed by compressor or cp. If both
|
||||||
if compressor != nil || cp != nil {
|
// compressors are nil, returns nil.
|
||||||
bufHeader[0] = byte(compressionMade)
|
//
|
||||||
|
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
|
||||||
|
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
|
||||||
|
if compressor == nil && cp == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
wrapErr := func(err error) error {
|
||||||
|
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
||||||
|
}
|
||||||
|
cbuf := &bytes.Buffer{}
|
||||||
|
if compressor != nil {
|
||||||
|
z, err := compressor.Compress(cbuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, wrapErr(err)
|
||||||
|
}
|
||||||
|
if _, err := z.Write(in); err != nil {
|
||||||
|
return nil, wrapErr(err)
|
||||||
|
}
|
||||||
|
if err := z.Close(); err != nil {
|
||||||
|
return nil, wrapErr(err)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
bufHeader[0] = byte(compressionNone)
|
if err := cp.Do(cbuf, in); err != nil {
|
||||||
|
return nil, wrapErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cbuf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
payloadLen = 1
|
||||||
|
sizeLen = 4
|
||||||
|
headerLen = payloadLen + sizeLen
|
||||||
|
)
|
||||||
|
|
||||||
|
// msgHeader returns a 5-byte header for the message being transmitted and the
|
||||||
|
// payload, which is compData if non-nil or data otherwise.
|
||||||
|
func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
|
||||||
|
hdr = make([]byte, headerLen)
|
||||||
|
if compData != nil {
|
||||||
|
hdr[0] = byte(compressionMade)
|
||||||
|
data = compData
|
||||||
|
} else {
|
||||||
|
hdr[0] = byte(compressionNone)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write length of b into buf
|
// Write length of payload into buf
|
||||||
binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b)))
|
binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
|
||||||
if outPayload != nil {
|
return hdr, data
|
||||||
outPayload.WireLength = payloadLen + sizeLen + len(b)
|
}
|
||||||
|
|
||||||
|
func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
|
||||||
|
return &stats.OutPayload{
|
||||||
|
Client: client,
|
||||||
|
Payload: msg,
|
||||||
|
Data: data,
|
||||||
|
Length: len(data),
|
||||||
|
WireLength: len(payload) + headerLen,
|
||||||
|
SentTime: t,
|
||||||
}
|
}
|
||||||
return bufHeader, b, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
|
func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
|
||||||
|
@ -554,20 +630,22 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// For the two compressor parameters, both should not be set, but if they are,
|
type payloadInfo struct {
|
||||||
// dc takes precedence over compressor.
|
wireLength int // The compressed length got from wire.
|
||||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
uncompressedBytes []byte
|
||||||
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
|
}
|
||||||
|
|
||||||
|
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
|
||||||
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
if inPayload != nil {
|
if payInfo != nil {
|
||||||
inPayload.WireLength = len(d)
|
payInfo.wireLength = len(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
||||||
return st.Err()
|
return nil, st.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if pf == compressionMade {
|
if pf == compressionMade {
|
||||||
|
@ -576,33 +654,42 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
|
||||||
if dc != nil {
|
if dc != nil {
|
||||||
d, err = dc.Do(bytes.NewReader(d))
|
d, err = dc.Do(bytes.NewReader(d))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||||
}
|
}
|
||||||
d, err = ioutil.ReadAll(dcReader)
|
// Read from LimitReader with limit max+1. So if the underlying
|
||||||
|
// reader is over limit, the result will be bigger than max.
|
||||||
|
d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(d) > maxReceiveMessageSize {
|
if len(d) > maxReceiveMessageSize {
|
||||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||||
// implementation.
|
// implementation.
|
||||||
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// For the two compressor parameters, both should not be set, but if they are,
|
||||||
|
// dc takes precedence over compressor.
|
||||||
|
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||||
|
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
|
||||||
|
d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
if err := c.Unmarshal(d, m); err != nil {
|
if err := c.Unmarshal(d, m); err != nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||||
}
|
}
|
||||||
if inPayload != nil {
|
if payInfo != nil {
|
||||||
inPayload.RecvTime = time.Now()
|
payInfo.uncompressedBytes = d
|
||||||
inPayload.Payload = m
|
|
||||||
// TODO truncate large payload.
|
|
||||||
inPayload.Data = d
|
|
||||||
inPayload.Length = len(d)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -625,23 +712,17 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
|
||||||
// Code returns the error code for err if it was produced by the rpc system.
|
// Code returns the error code for err if it was produced by the rpc system.
|
||||||
// Otherwise, it returns codes.Unknown.
|
// Otherwise, it returns codes.Unknown.
|
||||||
//
|
//
|
||||||
// Deprecated: use status.FromError and Code method instead.
|
// Deprecated: use status.Code instead.
|
||||||
func Code(err error) codes.Code {
|
func Code(err error) codes.Code {
|
||||||
if s, ok := status.FromError(err); ok {
|
return status.Code(err)
|
||||||
return s.Code()
|
|
||||||
}
|
|
||||||
return codes.Unknown
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
||||||
// Otherwise, it returns err.Error() or empty string when err is nil.
|
// Otherwise, it returns err.Error() or empty string when err is nil.
|
||||||
//
|
//
|
||||||
// Deprecated: use status.FromError and Message method instead.
|
// Deprecated: use status.Convert and Message method instead.
|
||||||
func ErrorDesc(err error) string {
|
func ErrorDesc(err error) string {
|
||||||
if s, ok := status.FromError(err); ok {
|
return status.Convert(err).Message()
|
||||||
return s.Message()
|
|
||||||
}
|
|
||||||
return err.Error()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf returns an error containing an error code and a description;
|
// Errorf returns an error containing an error code and a description;
|
||||||
|
@ -652,6 +733,31 @@ func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||||
return status.Errorf(c, format, a...)
|
return status.Errorf(c, format, a...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// toRPCErr converts an error into an error from the status package.
|
||||||
|
func toRPCErr(err error) error {
|
||||||
|
if err == nil || err == io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
return status.Error(codes.Internal, err.Error())
|
||||||
|
}
|
||||||
|
if _, ok := status.FromError(err); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch e := err.(type) {
|
||||||
|
case transport.ConnectionError:
|
||||||
|
return status.Error(codes.Unavailable, e.Desc)
|
||||||
|
default:
|
||||||
|
switch err {
|
||||||
|
case context.DeadlineExceeded:
|
||||||
|
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||||
|
case context.Canceled:
|
||||||
|
return status.Error(codes.Canceled, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return status.Error(codes.Unknown, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
// setCallInfoCodec should only be called after CallOptions have been applied.
|
// setCallInfoCodec should only be called after CallOptions have been applied.
|
||||||
func setCallInfoCodec(c *callInfo) error {
|
func setCallInfoCodec(c *callInfo) error {
|
||||||
if c.codec != nil {
|
if c.codec != nil {
|
||||||
|
@ -707,6 +813,19 @@ func parseDialTarget(target string) (net string, addr string) {
|
||||||
return net, target
|
return net, target
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
|
||||||
|
// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
|
||||||
|
// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
|
||||||
|
// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
|
||||||
|
type channelzData struct {
|
||||||
|
callsStarted int64
|
||||||
|
callsFailed int64
|
||||||
|
callsSucceeded int64
|
||||||
|
// lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
|
||||||
|
// time.Time since it's more costly to atomically update time.Time variable than int64 variable.
|
||||||
|
lastCallStartedTime int64
|
||||||
|
}
|
||||||
|
|
||||||
// The SupportPackageIsVersion variables are referenced from generated protocol
|
// The SupportPackageIsVersion variables are referenced from generated protocol
|
||||||
// buffer files to ensure compatibility with the gRPC version used. The latest
|
// buffer files to ensure compatibility with the gRPC version used. The latest
|
||||||
// support package version is 5.
|
// support package version is 5.
|
||||||
|
@ -721,7 +840,4 @@ const (
|
||||||
SupportPackageIsVersion5 = true
|
SupportPackageIsVersion5 = true
|
||||||
)
|
)
|
||||||
|
|
||||||
// Version is the current grpc version.
|
|
||||||
const Version = "1.12.2"
|
|
||||||
|
|
||||||
const grpcUA = "grpc-go/" + Version
|
const grpcUA = "grpc-go/" + Version
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -30,27 +30,25 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/net/http2"
|
|
||||||
"golang.org/x/net/trace"
|
"golang.org/x/net/trace"
|
||||||
|
|
||||||
"google.golang.org/grpc/channelz"
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/encoding"
|
"google.golang.org/grpc/encoding"
|
||||||
"google.golang.org/grpc/encoding/proto"
|
"google.golang.org/grpc/encoding/proto"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal/binarylog"
|
||||||
|
"google.golang.org/grpc/internal/channelz"
|
||||||
|
"google.golang.org/grpc/internal/transport"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/peer"
|
||||||
"google.golang.org/grpc/stats"
|
"google.golang.org/grpc/stats"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/tap"
|
"google.golang.org/grpc/tap"
|
||||||
"google.golang.org/grpc/transport"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -106,12 +104,8 @@ type Server struct {
|
||||||
channelzRemoveOnce sync.Once
|
channelzRemoveOnce sync.Once
|
||||||
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
||||||
|
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID int64 // channelz unique identification number
|
||||||
czmu sync.RWMutex
|
czData *channelzData
|
||||||
callsStarted int64
|
|
||||||
callsFailed int64
|
|
||||||
callsSucceeded int64
|
|
||||||
lastCallStartedTime time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type options struct {
|
type options struct {
|
||||||
|
@ -126,7 +120,6 @@ type options struct {
|
||||||
maxConcurrentStreams uint32
|
maxConcurrentStreams uint32
|
||||||
maxReceiveMessageSize int
|
maxReceiveMessageSize int
|
||||||
maxSendMessageSize int
|
maxSendMessageSize int
|
||||||
useHandlerImpl bool // use http.Handler-based server
|
|
||||||
unknownStreamDesc *StreamDesc
|
unknownStreamDesc *StreamDesc
|
||||||
keepaliveParams keepalive.ServerParameters
|
keepaliveParams keepalive.ServerParameters
|
||||||
keepalivePolicy keepalive.EnforcementPolicy
|
keepalivePolicy keepalive.EnforcementPolicy
|
||||||
|
@ -135,19 +128,25 @@ type options struct {
|
||||||
writeBufferSize int
|
writeBufferSize int
|
||||||
readBufferSize int
|
readBufferSize int
|
||||||
connectionTimeout time.Duration
|
connectionTimeout time.Duration
|
||||||
|
maxHeaderListSize *uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultServerOptions = options{
|
var defaultServerOptions = options{
|
||||||
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
|
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
|
||||||
maxSendMessageSize: defaultServerMaxSendMessageSize,
|
maxSendMessageSize: defaultServerMaxSendMessageSize,
|
||||||
connectionTimeout: 120 * time.Second,
|
connectionTimeout: 120 * time.Second,
|
||||||
|
writeBufferSize: defaultWriteBufSize,
|
||||||
|
readBufferSize: defaultReadBufSize,
|
||||||
}
|
}
|
||||||
|
|
||||||
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
|
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
|
||||||
type ServerOption func(*options)
|
type ServerOption func(*options)
|
||||||
|
|
||||||
// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
|
// WriteBufferSize determines how much data can be batched before doing a write on the wire.
|
||||||
// before doing a write on the wire.
|
// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
|
||||||
|
// The default value for this buffer is 32KB.
|
||||||
|
// Zero will disable the write buffer such that each write will be on underlying connection.
|
||||||
|
// Note: A Send call may not directly translate to a write.
|
||||||
func WriteBufferSize(s int) ServerOption {
|
func WriteBufferSize(s int) ServerOption {
|
||||||
return func(o *options) {
|
return func(o *options) {
|
||||||
o.writeBufferSize = s
|
o.writeBufferSize = s
|
||||||
|
@ -156,6 +155,9 @@ func WriteBufferSize(s int) ServerOption {
|
||||||
|
|
||||||
// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
|
// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
|
||||||
// for one read syscall.
|
// for one read syscall.
|
||||||
|
// The default value for this buffer is 32KB.
|
||||||
|
// Zero will disable read buffer for a connection so data framer can access the underlying
|
||||||
|
// conn directly.
|
||||||
func ReadBufferSize(s int) ServerOption {
|
func ReadBufferSize(s int) ServerOption {
|
||||||
return func(o *options) {
|
return func(o *options) {
|
||||||
o.readBufferSize = s
|
o.readBufferSize = s
|
||||||
|
@ -180,6 +182,11 @@ func InitialConnWindowSize(s int32) ServerOption {
|
||||||
|
|
||||||
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
|
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
|
||||||
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
|
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
|
||||||
|
if kp.Time > 0 && kp.Time < time.Second {
|
||||||
|
grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s")
|
||||||
|
kp.Time = time.Second
|
||||||
|
}
|
||||||
|
|
||||||
return func(o *options) {
|
return func(o *options) {
|
||||||
o.keepaliveParams = kp
|
o.keepaliveParams = kp
|
||||||
}
|
}
|
||||||
|
@ -242,7 +249,7 @@ func MaxRecvMsgSize(m int) ServerOption {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
|
// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
|
||||||
// If this is not set, gRPC uses the default 4MB.
|
// If this is not set, gRPC uses the default `math.MaxInt32`.
|
||||||
func MaxSendMsgSize(m int) ServerOption {
|
func MaxSendMsgSize(m int) ServerOption {
|
||||||
return func(o *options) {
|
return func(o *options) {
|
||||||
o.maxSendMessageSize = m
|
o.maxSendMessageSize = m
|
||||||
|
@ -335,6 +342,14 @@ func ConnectionTimeout(d time.Duration) ServerOption {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
|
||||||
|
// of header list that the server is prepared to accept.
|
||||||
|
func MaxHeaderListSize(s uint32) ServerOption {
|
||||||
|
return func(o *options) {
|
||||||
|
o.maxHeaderListSize = &s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewServer creates a gRPC server which has no service registered and has not
|
// NewServer creates a gRPC server which has no service registered and has not
|
||||||
// started to accept requests yet.
|
// started to accept requests yet.
|
||||||
func NewServer(opt ...ServerOption) *Server {
|
func NewServer(opt ...ServerOption) *Server {
|
||||||
|
@ -343,12 +358,13 @@ func NewServer(opt ...ServerOption) *Server {
|
||||||
o(&opts)
|
o(&opts)
|
||||||
}
|
}
|
||||||
s := &Server{
|
s := &Server{
|
||||||
lis: make(map[net.Listener]bool),
|
lis: make(map[net.Listener]bool),
|
||||||
opts: opts,
|
opts: opts,
|
||||||
conns: make(map[io.Closer]bool),
|
conns: make(map[io.Closer]bool),
|
||||||
m: make(map[string]*service),
|
m: make(map[string]*service),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
|
czData: new(channelzData),
|
||||||
}
|
}
|
||||||
s.cv = sync.NewCond(&s.mu)
|
s.cv = sync.NewCond(&s.mu)
|
||||||
if EnableTracing {
|
if EnableTracing {
|
||||||
|
@ -357,7 +373,7 @@ func NewServer(opt ...ServerOption) *Server {
|
||||||
}
|
}
|
||||||
|
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
s.channelzID = channelz.RegisterServer(s, "")
|
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -481,7 +497,8 @@ type listenSocket struct {
|
||||||
|
|
||||||
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||||
return &channelz.SocketInternalMetric{
|
return &channelz.SocketInternalMetric{
|
||||||
LocalAddr: l.Listener.Addr(),
|
SocketOptions: channelz.GetSocketOption(l.Listener),
|
||||||
|
LocalAddr: l.Listener.Addr(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -525,7 +542,7 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||||
s.lis[ls] = true
|
s.lis[ls] = true
|
||||||
|
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "")
|
ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
|
||||||
}
|
}
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
@ -597,12 +614,13 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
|
||||||
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
|
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
|
||||||
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
|
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.mu.Lock()
|
// ErrConnDispatched means that the connection was dispatched away from
|
||||||
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
|
// gRPC; those connections should be left open.
|
||||||
s.mu.Unlock()
|
|
||||||
grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
|
|
||||||
// If serverHandshake returns ErrConnDispatched, keep rawConn open.
|
|
||||||
if err != credentials.ErrConnDispatched {
|
if err != credentials.ErrConnDispatched {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
|
||||||
|
s.mu.Unlock()
|
||||||
|
grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
|
||||||
rawConn.Close()
|
rawConn.Close()
|
||||||
}
|
}
|
||||||
rawConn.SetDeadline(time.Time{})
|
rawConn.SetDeadline(time.Time{})
|
||||||
|
@ -617,27 +635,19 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
|
||||||
}
|
}
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
|
||||||
var serve func()
|
// Finish handshaking (HTTP2)
|
||||||
c := conn.(io.Closer)
|
st := s.newHTTP2Transport(conn, authInfo)
|
||||||
if s.opts.useHandlerImpl {
|
if st == nil {
|
||||||
serve = func() { s.serveUsingHandler(conn) }
|
return
|
||||||
} else {
|
|
||||||
// Finish handshaking (HTTP2)
|
|
||||||
st := s.newHTTP2Transport(conn, authInfo)
|
|
||||||
if st == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c = st
|
|
||||||
serve = func() { s.serveStreams(st) }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rawConn.SetDeadline(time.Time{})
|
rawConn.SetDeadline(time.Time{})
|
||||||
if !s.addConn(c) {
|
if !s.addConn(st) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
serve()
|
s.serveStreams(st)
|
||||||
s.removeConn(c)
|
s.removeConn(st)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -656,6 +666,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
||||||
WriteBufferSize: s.opts.writeBufferSize,
|
WriteBufferSize: s.opts.writeBufferSize,
|
||||||
ReadBufferSize: s.opts.readBufferSize,
|
ReadBufferSize: s.opts.readBufferSize,
|
||||||
ChannelzParentID: s.channelzID,
|
ChannelzParentID: s.channelzID,
|
||||||
|
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||||
}
|
}
|
||||||
st, err := transport.NewServerTransport("http2", c, config)
|
st, err := transport.NewServerTransport("http2", c, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -691,27 +702,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||||
|
|
||||||
var _ http.Handler = (*Server)(nil)
|
var _ http.Handler = (*Server)(nil)
|
||||||
|
|
||||||
// serveUsingHandler is called from handleRawConn when s is configured
|
|
||||||
// to handle requests via the http.Handler interface. It sets up a
|
|
||||||
// net/http.Server to handle the just-accepted conn. The http.Server
|
|
||||||
// is configured to route all incoming requests (all HTTP/2 streams)
|
|
||||||
// to ServeHTTP, which creates a new ServerTransport for each stream.
|
|
||||||
// serveUsingHandler blocks until conn closes.
|
|
||||||
//
|
|
||||||
// This codepath is only used when Server.TestingUseHandlerImpl has
|
|
||||||
// been configured. This lets the end2end tests exercise the ServeHTTP
|
|
||||||
// method as one of the environment types.
|
|
||||||
//
|
|
||||||
// conn is the *tls.Conn that's already been authenticated.
|
|
||||||
func (s *Server) serveUsingHandler(conn net.Conn) {
|
|
||||||
h2s := &http2.Server{
|
|
||||||
MaxConcurrentStreams: s.opts.maxConcurrentStreams,
|
|
||||||
}
|
|
||||||
h2s.ServeConn(conn, &http2.ServeConnOpts{
|
|
||||||
Handler: s,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeHTTP implements the Go standard library's http.Handler
|
// ServeHTTP implements the Go standard library's http.Handler
|
||||||
// interface by responding to the gRPC request r, by looking up
|
// interface by responding to the gRPC request r, by looking up
|
||||||
// the requested gRPC method in the gRPC server s.
|
// the requested gRPC method in the gRPC server s.
|
||||||
|
@ -759,12 +749,13 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
|
||||||
|
|
||||||
trInfo = &traceInfo{
|
trInfo = &traceInfo{
|
||||||
tr: tr,
|
tr: tr,
|
||||||
|
firstLine: firstLine{
|
||||||
|
client: false,
|
||||||
|
remoteAddr: st.RemoteAddr(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
trInfo.firstLine.client = false
|
|
||||||
trInfo.firstLine.remoteAddr = st.RemoteAddr()
|
|
||||||
|
|
||||||
if dl, ok := stream.Context().Deadline(); ok {
|
if dl, ok := stream.Context().Deadline(); ok {
|
||||||
trInfo.firstLine.deadline = dl.Sub(time.Now())
|
trInfo.firstLine.deadline = time.Until(dl)
|
||||||
}
|
}
|
||||||
return trInfo
|
return trInfo
|
||||||
}
|
}
|
||||||
|
@ -794,57 +785,47 @@ func (s *Server) removeConn(c io.Closer) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChannelzMetric returns ServerInternalMetric of current server.
|
func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
|
||||||
// This is an EXPERIMENTAL API.
|
|
||||||
func (s *Server) ChannelzMetric() *channelz.ServerInternalMetric {
|
|
||||||
s.czmu.RLock()
|
|
||||||
defer s.czmu.RUnlock()
|
|
||||||
return &channelz.ServerInternalMetric{
|
return &channelz.ServerInternalMetric{
|
||||||
CallsStarted: s.callsStarted,
|
CallsStarted: atomic.LoadInt64(&s.czData.callsStarted),
|
||||||
CallsSucceeded: s.callsSucceeded,
|
CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded),
|
||||||
CallsFailed: s.callsFailed,
|
CallsFailed: atomic.LoadInt64(&s.czData.callsFailed),
|
||||||
LastCallStartedTimestamp: s.lastCallStartedTime,
|
LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) incrCallsStarted() {
|
func (s *Server) incrCallsStarted() {
|
||||||
s.czmu.Lock()
|
atomic.AddInt64(&s.czData.callsStarted, 1)
|
||||||
s.callsStarted++
|
atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
|
||||||
s.lastCallStartedTime = time.Now()
|
|
||||||
s.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) incrCallsSucceeded() {
|
func (s *Server) incrCallsSucceeded() {
|
||||||
s.czmu.Lock()
|
atomic.AddInt64(&s.czData.callsSucceeded, 1)
|
||||||
s.callsSucceeded++
|
|
||||||
s.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) incrCallsFailed() {
|
func (s *Server) incrCallsFailed() {
|
||||||
s.czmu.Lock()
|
atomic.AddInt64(&s.czData.callsFailed, 1)
|
||||||
s.callsFailed++
|
|
||||||
s.czmu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||||
var (
|
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||||
outPayload *stats.OutPayload
|
|
||||||
)
|
|
||||||
if s.opts.statsHandler != nil {
|
|
||||||
outPayload = &stats.OutPayload{}
|
|
||||||
}
|
|
||||||
hdr, data, err := encode(s.getCodec(stream.ContentSubtype()), msg, cp, outPayload, comp)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Errorln("grpc: server failed to encode response: ", err)
|
grpclog.Errorln("grpc: server failed to encode response: ", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(data) > s.opts.maxSendMessageSize {
|
compData, err := compress(data, cp, comp)
|
||||||
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize)
|
if err != nil {
|
||||||
|
grpclog.Errorln("grpc: server failed to compress response: ", err)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
err = t.Write(stream, hdr, data, opts)
|
hdr, payload := msgHeader(data, compData)
|
||||||
if err == nil && outPayload != nil {
|
// TODO(dfawley): should we be checking len(data) instead?
|
||||||
outPayload.SentTime = time.Now()
|
if len(payload) > s.opts.maxSendMessageSize {
|
||||||
s.opts.statsHandler.HandleRPC(stream.Context(), outPayload)
|
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
|
||||||
|
}
|
||||||
|
err = t.Write(stream, hdr, payload, opts)
|
||||||
|
if err == nil && s.opts.statsHandler != nil {
|
||||||
|
s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -880,7 +861,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
}
|
}
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
defer trInfo.tr.Finish()
|
defer trInfo.tr.Finish()
|
||||||
trInfo.firstLine.client = false
|
|
||||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
|
@ -890,6 +870,30 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
binlog := binarylog.GetMethodLogger(stream.Method())
|
||||||
|
if binlog != nil {
|
||||||
|
ctx := stream.Context()
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
logEntry := &binarylog.ClientHeader{
|
||||||
|
Header: md,
|
||||||
|
MethodName: stream.Method(),
|
||||||
|
PeerAddr: nil,
|
||||||
|
}
|
||||||
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
|
logEntry.Timeout = time.Until(deadline)
|
||||||
|
if logEntry.Timeout < 0 {
|
||||||
|
logEntry.Timeout = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if a := md[":authority"]; len(a) > 0 {
|
||||||
|
logEntry.Authority = a[0]
|
||||||
|
}
|
||||||
|
if peer, ok := peer.FromContext(ctx); ok {
|
||||||
|
logEntry.PeerAddr = peer.Addr
|
||||||
|
}
|
||||||
|
binlog.Log(logEntry)
|
||||||
|
}
|
||||||
|
|
||||||
// comp and cp are used for compression. decomp and dc are used for
|
// comp and cp are used for compression. decomp and dc are used for
|
||||||
// decompression. If comp and decomp are both set, they are the same;
|
// decompression. If comp and decomp are both set, they are the same;
|
||||||
// however they are kept separate to ensure that at most one of the
|
// however they are kept separate to ensure that at most one of the
|
||||||
|
@ -926,81 +930,38 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &parser{r: stream}
|
var payInfo *payloadInfo
|
||||||
pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
|
if sh != nil || binlog != nil {
|
||||||
if err == io.EOF {
|
payInfo = &payloadInfo{}
|
||||||
// The entire stream is done (for unary RPC only).
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
|
||||||
err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
|
||||||
}
|
}
|
||||||
|
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if st, ok := status.FromError(err); ok {
|
if st, ok := status.FromError(err); ok {
|
||||||
if e := t.WriteStatus(stream, st); e != nil {
|
if e := t.WriteStatus(stream, st); e != nil {
|
||||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
switch st := err.(type) {
|
|
||||||
case transport.ConnectionError:
|
|
||||||
// Nothing to do here.
|
|
||||||
case transport.StreamError:
|
|
||||||
if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
|
|
||||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.IncrMsgRecv()
|
t.IncrMsgRecv()
|
||||||
}
|
}
|
||||||
if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
|
|
||||||
if e := t.WriteStatus(stream, st); e != nil {
|
|
||||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
|
||||||
}
|
|
||||||
return st.Err()
|
|
||||||
}
|
|
||||||
var inPayload *stats.InPayload
|
|
||||||
if sh != nil {
|
|
||||||
inPayload = &stats.InPayload{
|
|
||||||
RecvTime: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
df := func(v interface{}) error {
|
df := func(v interface{}) error {
|
||||||
if inPayload != nil {
|
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
||||||
inPayload.WireLength = len(req)
|
|
||||||
}
|
|
||||||
if pf == compressionMade {
|
|
||||||
var err error
|
|
||||||
if dc != nil {
|
|
||||||
req, err = dc.Do(bytes.NewReader(req))
|
|
||||||
if err != nil {
|
|
||||||
return status.Errorf(codes.Internal, err.Error())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
tmp, _ := decomp.Decompress(bytes.NewReader(req))
|
|
||||||
req, err = ioutil.ReadAll(tmp)
|
|
||||||
if err != nil {
|
|
||||||
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(req) > s.opts.maxReceiveMessageSize {
|
|
||||||
// TODO: Revisit the error code. Currently keep it consistent with
|
|
||||||
// java implementation.
|
|
||||||
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
|
|
||||||
}
|
|
||||||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil {
|
|
||||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||||
}
|
}
|
||||||
if inPayload != nil {
|
if sh != nil {
|
||||||
inPayload.Payload = v
|
sh.HandleRPC(stream.Context(), &stats.InPayload{
|
||||||
inPayload.Data = req
|
RecvTime: time.Now(),
|
||||||
inPayload.Length = len(req)
|
Payload: v,
|
||||||
sh.HandleRPC(stream.Context(), inPayload)
|
Data: d,
|
||||||
|
Length: len(d),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if binlog != nil {
|
||||||
|
binlog.Log(&binarylog.ClientMessage{
|
||||||
|
Message: d,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
|
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
|
||||||
|
@ -1023,15 +984,25 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
if e := t.WriteStatus(stream, appStatus); e != nil {
|
if e := t.WriteStatus(stream, appStatus); e != nil {
|
||||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
|
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||||
}
|
}
|
||||||
|
if binlog != nil {
|
||||||
|
if h, _ := stream.Header(); h.Len() > 0 {
|
||||||
|
// Only log serverHeader if there was header. Otherwise it can
|
||||||
|
// be trailer only.
|
||||||
|
binlog.Log(&binarylog.ServerHeader{
|
||||||
|
Header: h,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
binlog.Log(&binarylog.ServerTrailer{
|
||||||
|
Trailer: stream.Trailer(),
|
||||||
|
Err: appErr,
|
||||||
|
})
|
||||||
|
}
|
||||||
return appErr
|
return appErr
|
||||||
}
|
}
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
trInfo.tr.LazyLog(stringer("OK"), false)
|
trInfo.tr.LazyLog(stringer("OK"), false)
|
||||||
}
|
}
|
||||||
opts := &transport.Options{
|
opts := &transport.Options{Last: true}
|
||||||
Last: true,
|
|
||||||
Delay: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
|
if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
@ -1046,16 +1017,31 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
switch st := err.(type) {
|
switch st := err.(type) {
|
||||||
case transport.ConnectionError:
|
case transport.ConnectionError:
|
||||||
// Nothing to do here.
|
// Nothing to do here.
|
||||||
case transport.StreamError:
|
|
||||||
if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
|
|
||||||
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
|
panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if binlog != nil {
|
||||||
|
h, _ := stream.Header()
|
||||||
|
binlog.Log(&binarylog.ServerHeader{
|
||||||
|
Header: h,
|
||||||
|
})
|
||||||
|
binlog.Log(&binarylog.ServerTrailer{
|
||||||
|
Trailer: stream.Trailer(),
|
||||||
|
Err: appErr,
|
||||||
|
})
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if binlog != nil {
|
||||||
|
h, _ := stream.Header()
|
||||||
|
binlog.Log(&binarylog.ServerHeader{
|
||||||
|
Header: h,
|
||||||
|
})
|
||||||
|
binlog.Log(&binarylog.ServerMessage{
|
||||||
|
Message: reply,
|
||||||
|
})
|
||||||
|
}
|
||||||
if channelz.IsOn() {
|
if channelz.IsOn() {
|
||||||
t.IncrMsgSent()
|
t.IncrMsgSent()
|
||||||
}
|
}
|
||||||
|
@ -1065,7 +1051,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||||
// TODO: Should we be logging if writing status failed here, like above?
|
// TODO: Should we be logging if writing status failed here, like above?
|
||||||
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
|
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
|
||||||
// error or allow the stats handler to see it?
|
// error or allow the stats handler to see it?
|
||||||
return t.WriteStatus(stream, status.New(codes.OK, ""))
|
err = t.WriteStatus(stream, status.New(codes.OK, ""))
|
||||||
|
if binlog != nil {
|
||||||
|
binlog.Log(&binarylog.ServerTrailer{
|
||||||
|
Trailer: stream.Trailer(),
|
||||||
|
Err: appErr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||||
|
@ -1099,17 +1092,40 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||||
}
|
}
|
||||||
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||||
ss := &serverStream{
|
ss := &serverStream{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
t: t,
|
t: t,
|
||||||
s: stream,
|
s: stream,
|
||||||
p: &parser{r: stream},
|
p: &parser{r: stream},
|
||||||
codec: s.getCodec(stream.ContentSubtype()),
|
codec: s.getCodec(stream.ContentSubtype()),
|
||||||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||||
trInfo: trInfo,
|
trInfo: trInfo,
|
||||||
statsHandler: sh,
|
statsHandler: sh,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ss.binlog = binarylog.GetMethodLogger(stream.Method())
|
||||||
|
if ss.binlog != nil {
|
||||||
|
md, _ := metadata.FromIncomingContext(ctx)
|
||||||
|
logEntry := &binarylog.ClientHeader{
|
||||||
|
Header: md,
|
||||||
|
MethodName: stream.Method(),
|
||||||
|
PeerAddr: nil,
|
||||||
|
}
|
||||||
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
|
logEntry.Timeout = time.Until(deadline)
|
||||||
|
if logEntry.Timeout < 0 {
|
||||||
|
logEntry.Timeout = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if a := md[":authority"]; len(a) > 0 {
|
||||||
|
logEntry.Authority = a[0]
|
||||||
|
}
|
||||||
|
if peer, ok := peer.FromContext(ss.Context()); ok {
|
||||||
|
logEntry.PeerAddr = peer.Addr
|
||||||
|
}
|
||||||
|
ss.binlog.Log(logEntry)
|
||||||
|
}
|
||||||
|
|
||||||
// If dc is set and matches the stream's compression, use it. Otherwise, try
|
// If dc is set and matches the stream's compression, use it. Otherwise, try
|
||||||
// to find a matching registered compressor for decomp.
|
// to find a matching registered compressor for decomp.
|
||||||
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
|
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
|
||||||
|
@ -1169,12 +1185,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||||
if appErr != nil {
|
if appErr != nil {
|
||||||
appStatus, ok := status.FromError(appErr)
|
appStatus, ok := status.FromError(appErr)
|
||||||
if !ok {
|
if !ok {
|
||||||
switch err := appErr.(type) {
|
appStatus = status.New(codes.Unknown, appErr.Error())
|
||||||
case transport.StreamError:
|
|
||||||
appStatus = status.New(err.Code, err.Desc)
|
|
||||||
default:
|
|
||||||
appStatus = status.New(codes.Unknown, appErr.Error())
|
|
||||||
}
|
|
||||||
appErr = appStatus.Err()
|
appErr = appStatus.Err()
|
||||||
}
|
}
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
|
@ -1184,6 +1195,12 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||||
ss.mu.Unlock()
|
ss.mu.Unlock()
|
||||||
}
|
}
|
||||||
t.WriteStatus(ss.s, appStatus)
|
t.WriteStatus(ss.s, appStatus)
|
||||||
|
if ss.binlog != nil {
|
||||||
|
ss.binlog.Log(&binarylog.ServerTrailer{
|
||||||
|
Trailer: ss.s.Trailer(),
|
||||||
|
Err: appErr,
|
||||||
|
})
|
||||||
|
}
|
||||||
// TODO: Should we log an error from WriteStatus here and below?
|
// TODO: Should we log an error from WriteStatus here and below?
|
||||||
return appErr
|
return appErr
|
||||||
}
|
}
|
||||||
|
@ -1192,7 +1209,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||||
ss.trInfo.tr.LazyLog(stringer("OK"), false)
|
ss.trInfo.tr.LazyLog(stringer("OK"), false)
|
||||||
ss.mu.Unlock()
|
ss.mu.Unlock()
|
||||||
}
|
}
|
||||||
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
|
err = t.WriteStatus(ss.s, status.New(codes.OK, ""))
|
||||||
|
if ss.binlog != nil {
|
||||||
|
ss.binlog.Log(&binarylog.ServerTrailer{
|
||||||
|
Trailer: ss.s.Trailer(),
|
||||||
|
Err: appErr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
||||||
|
@ -1221,47 +1245,33 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||||
}
|
}
|
||||||
service := sm[:pos]
|
service := sm[:pos]
|
||||||
method := sm[pos+1:]
|
method := sm[pos+1:]
|
||||||
srv, ok := s.m[service]
|
|
||||||
if !ok {
|
srv, knownService := s.m[service]
|
||||||
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
|
if knownService {
|
||||||
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
|
if md, ok := srv.md[method]; ok {
|
||||||
|
s.processUnaryRPC(t, stream, srv, md, trInfo)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if trInfo != nil {
|
if sd, ok := srv.sd[method]; ok {
|
||||||
trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
|
s.processStreamingRPC(t, stream, srv, sd, trInfo)
|
||||||
trInfo.tr.SetError()
|
return
|
||||||
}
|
}
|
||||||
errDesc := fmt.Sprintf("unknown service %v", service)
|
|
||||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
|
||||||
if trInfo != nil {
|
|
||||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
|
||||||
trInfo.tr.SetError()
|
|
||||||
}
|
|
||||||
grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
|
|
||||||
}
|
|
||||||
if trInfo != nil {
|
|
||||||
trInfo.tr.Finish()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Unary RPC or Streaming RPC?
|
|
||||||
if md, ok := srv.md[method]; ok {
|
|
||||||
s.processUnaryRPC(t, stream, srv, md, trInfo)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if sd, ok := srv.sd[method]; ok {
|
|
||||||
s.processStreamingRPC(t, stream, srv, sd, trInfo)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if trInfo != nil {
|
|
||||||
trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
|
|
||||||
trInfo.tr.SetError()
|
|
||||||
}
|
}
|
||||||
|
// Unknown service, or known server unknown method.
|
||||||
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
|
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
|
||||||
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
|
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
errDesc := fmt.Sprintf("unknown method %v", method)
|
var errDesc string
|
||||||
|
if !knownService {
|
||||||
|
errDesc = fmt.Sprintf("unknown service %v", service)
|
||||||
|
} else {
|
||||||
|
errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
|
||||||
|
}
|
||||||
|
if trInfo != nil {
|
||||||
|
trInfo.tr.LazyPrintf("%s", errDesc)
|
||||||
|
trInfo.tr.SetError()
|
||||||
|
}
|
||||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||||
|
@ -1410,12 +1420,6 @@ func (s *Server) GracefulStop() {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
|
||||||
internal.TestingUseHandlerImpl = func(arg interface{}) {
|
|
||||||
arg.(*Server).opts.useHandlerImpl = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// contentSubtype must be lowercase
|
// contentSubtype must be lowercase
|
||||||
// cannot return nil
|
// cannot return nil
|
||||||
func (s *Server) getCodec(contentSubtype string) baseCodec {
|
func (s *Server) getCodec(contentSubtype string) baseCodec {
|
||||||
|
@ -1484,3 +1488,11 @@ func Method(ctx context.Context) (string, bool) {
|
||||||
}
|
}
|
||||||
return s.Method(), true
|
return s.Method(), true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type channelzServer struct {
|
||||||
|
s *Server
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
|
||||||
|
return c.s.channelzMetric()
|
||||||
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -56,6 +57,8 @@ type MethodConfig struct {
|
||||||
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
// MaxRespSize is the maximum allowed payload size for an individual response in a
|
||||||
// stream (server->client) in bytes.
|
// stream (server->client) in bytes.
|
||||||
MaxRespSize *int
|
MaxRespSize *int
|
||||||
|
// RetryPolicy configures retry options for the method.
|
||||||
|
retryPolicy *retryPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceConfig is provided by the service provider and contains parameters for how
|
// ServiceConfig is provided by the service provider and contains parameters for how
|
||||||
|
@ -68,13 +71,96 @@ type ServiceConfig struct {
|
||||||
// LB is the load balancer the service providers recommends. The balancer specified
|
// LB is the load balancer the service providers recommends. The balancer specified
|
||||||
// via grpc.WithBalancer will override this.
|
// via grpc.WithBalancer will override this.
|
||||||
LB *string
|
LB *string
|
||||||
// Methods contains a map for the methods in this service.
|
|
||||||
// If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
|
// Methods contains a map for the methods in this service. If there is an
|
||||||
// If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
|
// exact match for a method (i.e. /service/method) in the map, use the
|
||||||
// Otherwise, the method has no MethodConfig to use.
|
// corresponding MethodConfig. If there's no exact match, look for the
|
||||||
|
// default config for the service (/service/) and use the corresponding
|
||||||
|
// MethodConfig if it exists. Otherwise, the method has no MethodConfig to
|
||||||
|
// use.
|
||||||
Methods map[string]MethodConfig
|
Methods map[string]MethodConfig
|
||||||
|
|
||||||
stickinessMetadataKey *string
|
// If a retryThrottlingPolicy is provided, gRPC will automatically throttle
|
||||||
|
// retry attempts and hedged RPCs when the client’s ratio of failures to
|
||||||
|
// successes exceeds a threshold.
|
||||||
|
//
|
||||||
|
// For each server name, the gRPC client will maintain a token_count which is
|
||||||
|
// initially set to maxTokens, and can take values between 0 and maxTokens.
|
||||||
|
//
|
||||||
|
// Every outgoing RPC (regardless of service or method invoked) will change
|
||||||
|
// token_count as follows:
|
||||||
|
//
|
||||||
|
// - Every failed RPC will decrement the token_count by 1.
|
||||||
|
// - Every successful RPC will increment the token_count by tokenRatio.
|
||||||
|
//
|
||||||
|
// If token_count is less than or equal to maxTokens / 2, then RPCs will not
|
||||||
|
// be retried and hedged RPCs will not be sent.
|
||||||
|
retryThrottling *retryThrottlingPolicy
|
||||||
|
// healthCheckConfig must be set as one of the requirement to enable LB channel
|
||||||
|
// health check.
|
||||||
|
healthCheckConfig *healthCheckConfig
|
||||||
|
// rawJSONString stores service config json string that get parsed into
|
||||||
|
// this service config struct.
|
||||||
|
rawJSONString string
|
||||||
|
}
|
||||||
|
|
||||||
|
// healthCheckConfig defines the go-native version of the LB channel health check config.
|
||||||
|
type healthCheckConfig struct {
|
||||||
|
// serviceName is the service name to use in the health-checking request.
|
||||||
|
ServiceName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryPolicy defines the go-native version of the retry policy defined by the
|
||||||
|
// service config here:
|
||||||
|
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
|
||||||
|
type retryPolicy struct {
|
||||||
|
// MaxAttempts is the maximum number of attempts, including the original RPC.
|
||||||
|
//
|
||||||
|
// This field is required and must be two or greater.
|
||||||
|
maxAttempts int
|
||||||
|
|
||||||
|
// Exponential backoff parameters. The initial retry attempt will occur at
|
||||||
|
// random(0, initialBackoffMS). In general, the nth attempt will occur at
|
||||||
|
// random(0,
|
||||||
|
// min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
|
||||||
|
//
|
||||||
|
// These fields are required and must be greater than zero.
|
||||||
|
initialBackoff time.Duration
|
||||||
|
maxBackoff time.Duration
|
||||||
|
backoffMultiplier float64
|
||||||
|
|
||||||
|
// The set of status codes which may be retried.
|
||||||
|
//
|
||||||
|
// Status codes are specified as strings, e.g., "UNAVAILABLE".
|
||||||
|
//
|
||||||
|
// This field is required and must be non-empty.
|
||||||
|
// Note: a set is used to store this for easy lookup.
|
||||||
|
retryableStatusCodes map[codes.Code]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonRetryPolicy struct {
|
||||||
|
MaxAttempts int
|
||||||
|
InitialBackoff string
|
||||||
|
MaxBackoff string
|
||||||
|
BackoffMultiplier float64
|
||||||
|
RetryableStatusCodes []codes.Code
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryThrottlingPolicy defines the go-native version of the retry throttling
|
||||||
|
// policy defined by the service config here:
|
||||||
|
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
|
||||||
|
type retryThrottlingPolicy struct {
|
||||||
|
// The number of tokens starts at maxTokens. The token_count will always be
|
||||||
|
// between 0 and maxTokens.
|
||||||
|
//
|
||||||
|
// This field is required and must be greater than zero.
|
||||||
|
MaxTokens float64
|
||||||
|
// The amount of tokens to add on each successful RPC. Typically this will
|
||||||
|
// be some number between 0 and 1, e.g., 0.1.
|
||||||
|
//
|
||||||
|
// This field is required and must be greater than zero. Up to 3 decimal
|
||||||
|
// places are supported.
|
||||||
|
TokenRatio float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseDuration(s *string) (*time.Duration, error) {
|
func parseDuration(s *string) (*time.Duration, error) {
|
||||||
|
@ -144,30 +230,33 @@ type jsonMC struct {
|
||||||
Timeout *string
|
Timeout *string
|
||||||
MaxRequestMessageBytes *int64
|
MaxRequestMessageBytes *int64
|
||||||
MaxResponseMessageBytes *int64
|
MaxResponseMessageBytes *int64
|
||||||
|
RetryPolicy *jsonRetryPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
|
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
|
||||||
type jsonSC struct {
|
type jsonSC struct {
|
||||||
LoadBalancingPolicy *string
|
LoadBalancingPolicy *string
|
||||||
StickinessMetadataKey *string
|
MethodConfig *[]jsonMC
|
||||||
MethodConfig *[]jsonMC
|
RetryThrottling *retryThrottlingPolicy
|
||||||
|
HealthCheckConfig *healthCheckConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseServiceConfig(js string) (ServiceConfig, error) {
|
func parseServiceConfig(js string) (*ServiceConfig, error) {
|
||||||
var rsc jsonSC
|
var rsc jsonSC
|
||||||
err := json.Unmarshal([]byte(js), &rsc)
|
err := json.Unmarshal([]byte(js), &rsc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||||
return ServiceConfig{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sc := ServiceConfig{
|
sc := ServiceConfig{
|
||||||
LB: rsc.LoadBalancingPolicy,
|
LB: rsc.LoadBalancingPolicy,
|
||||||
Methods: make(map[string]MethodConfig),
|
Methods: make(map[string]MethodConfig),
|
||||||
|
retryThrottling: rsc.RetryThrottling,
|
||||||
stickinessMetadataKey: rsc.StickinessMetadataKey,
|
healthCheckConfig: rsc.HealthCheckConfig,
|
||||||
|
rawJSONString: js,
|
||||||
}
|
}
|
||||||
if rsc.MethodConfig == nil {
|
if rsc.MethodConfig == nil {
|
||||||
return sc, nil
|
return &sc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, m := range *rsc.MethodConfig {
|
for _, m := range *rsc.MethodConfig {
|
||||||
|
@ -177,13 +266,17 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
|
||||||
d, err := parseDuration(m.Timeout)
|
d, err := parseDuration(m.Timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||||
return ServiceConfig{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mc := MethodConfig{
|
mc := MethodConfig{
|
||||||
WaitForReady: m.WaitForReady,
|
WaitForReady: m.WaitForReady,
|
||||||
Timeout: d,
|
Timeout: d,
|
||||||
}
|
}
|
||||||
|
if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||||
|
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if m.MaxRequestMessageBytes != nil {
|
if m.MaxRequestMessageBytes != nil {
|
||||||
if *m.MaxRequestMessageBytes > int64(maxInt) {
|
if *m.MaxRequestMessageBytes > int64(maxInt) {
|
||||||
mc.MaxReqSize = newInt(maxInt)
|
mc.MaxReqSize = newInt(maxInt)
|
||||||
|
@ -205,7 +298,54 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return sc, nil
|
if sc.retryThrottling != nil {
|
||||||
|
if sc.retryThrottling.MaxTokens <= 0 ||
|
||||||
|
sc.retryThrottling.MaxTokens > 1000 ||
|
||||||
|
sc.retryThrottling.TokenRatio <= 0 {
|
||||||
|
// Illegal throttling config; disable throttling.
|
||||||
|
sc.retryThrottling = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &sc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
|
||||||
|
if jrp == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
ib, err := parseDuration(&jrp.InitialBackoff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mb, err := parseDuration(&jrp.MaxBackoff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if jrp.MaxAttempts <= 1 ||
|
||||||
|
*ib <= 0 ||
|
||||||
|
*mb <= 0 ||
|
||||||
|
jrp.BackoffMultiplier <= 0 ||
|
||||||
|
len(jrp.RetryableStatusCodes) == 0 {
|
||||||
|
grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rp := &retryPolicy{
|
||||||
|
maxAttempts: jrp.MaxAttempts,
|
||||||
|
initialBackoff: *ib,
|
||||||
|
maxBackoff: *mb,
|
||||||
|
backoffMultiplier: jrp.BackoffMultiplier,
|
||||||
|
retryableStatusCodes: make(map[codes.Code]bool),
|
||||||
|
}
|
||||||
|
if rp.maxAttempts > 5 {
|
||||||
|
// TODO(retry): Make the max maxAttempts configurable.
|
||||||
|
rp.maxAttempts = 5
|
||||||
|
}
|
||||||
|
for _, code := range jrp.RetryableStatusCodes {
|
||||||
|
rp.retryableStatusCodes[code] = true
|
||||||
|
}
|
||||||
|
return rp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func min(a, b *int) *int {
|
func min(a, b *int) *int {
|
||||||
|
|
|
@ -19,9 +19,8 @@
|
||||||
package stats
|
package stats
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConnTagInfo defines the relevant information needed by connection context tagger.
|
// ConnTagInfo defines the relevant information needed by connection context tagger.
|
||||||
|
|
|
@ -24,10 +24,11 @@
|
||||||
package stats // import "google.golang.org/grpc/stats"
|
package stats // import "google.golang.org/grpc/stats"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"google.golang.org/grpc/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RPCStats contains stats information about RPCs.
|
// RPCStats contains stats information about RPCs.
|
||||||
|
@ -173,6 +174,9 @@ type End struct {
|
||||||
BeginTime time.Time
|
BeginTime time.Time
|
||||||
// EndTime is the time when the RPC ends.
|
// EndTime is the time when the RPC ends.
|
||||||
EndTime time.Time
|
EndTime time.Time
|
||||||
|
// Trailer contains the trailer metadata received from the server. This
|
||||||
|
// field is only valid if this End is from the client side.
|
||||||
|
Trailer metadata.MD
|
||||||
// Error is the error the RPC ended with. It is an error generated from
|
// Error is the error the RPC ended with. It is an error generated from
|
||||||
// status.Status and can be converted back to status.Status using
|
// status.Status and can be converted back to status.Status using
|
||||||
// status.FromError if non-nil.
|
// status.FromError if non-nil.
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
package status
|
package status
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
@ -126,7 +127,9 @@ func FromError(err error) (s *Status, ok bool) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
|
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
|
||||||
}
|
}
|
||||||
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
if se, ok := err.(interface {
|
||||||
|
GRPCStatus() *Status
|
||||||
|
}); ok {
|
||||||
return se.GRPCStatus(), true
|
return se.GRPCStatus(), true
|
||||||
}
|
}
|
||||||
return New(codes.Unknown, err.Error()), false
|
return New(codes.Unknown, err.Error()), false
|
||||||
|
@ -182,8 +185,26 @@ func Code(err error) codes.Code {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return codes.OK
|
return codes.OK
|
||||||
}
|
}
|
||||||
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
if se, ok := err.(interface {
|
||||||
|
GRPCStatus() *Status
|
||||||
|
}); ok {
|
||||||
return se.GRPCStatus().Code()
|
return se.GRPCStatus().Code()
|
||||||
}
|
}
|
||||||
return codes.Unknown
|
return codes.Unknown
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FromContextError converts a context error into a Status. It returns a
|
||||||
|
// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
|
||||||
|
// non-nil and not a context error.
|
||||||
|
func FromContextError(err error) *Status {
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return New(codes.OK, "")
|
||||||
|
case context.DeadlineExceeded:
|
||||||
|
return New(codes.DeadlineExceeded, err.Error())
|
||||||
|
case context.Canceled:
|
||||||
|
return New(codes.Canceled, err.Error())
|
||||||
|
default:
|
||||||
|
return New(codes.Unknown, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -21,7 +21,7 @@
|
||||||
package tap
|
package tap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.org/x/net/context"
|
"context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Info defines the relevant information needed by the handles.
|
// Info defines the relevant information needed by the handles.
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/trace"
|
"golang.org/x/net/trace"
|
||||||
|
@ -53,13 +54,25 @@ type traceInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// firstLine is the first line of an RPC trace.
|
// firstLine is the first line of an RPC trace.
|
||||||
|
// It may be mutated after construction; remoteAddr specifically may change
|
||||||
|
// during client-side use.
|
||||||
type firstLine struct {
|
type firstLine struct {
|
||||||
|
mu sync.Mutex
|
||||||
client bool // whether this is a client (outgoing) RPC
|
client bool // whether this is a client (outgoing) RPC
|
||||||
remoteAddr net.Addr
|
remoteAddr net.Addr
|
||||||
deadline time.Duration // may be zero
|
deadline time.Duration // may be zero
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *firstLine) SetRemoteAddr(addr net.Addr) {
|
||||||
|
f.mu.Lock()
|
||||||
|
f.remoteAddr = addr
|
||||||
|
f.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (f *firstLine) String() string {
|
func (f *firstLine) String() string {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
|
||||||
var line bytes.Buffer
|
var line bytes.Buffer
|
||||||
io.WriteString(&line, "RPC: ")
|
io.WriteString(&line, "RPC: ")
|
||||||
if f.client {
|
if f.client {
|
||||||
|
|
|
@ -1,51 +0,0 @@
|
||||||
// +build go1.6,!go1.7
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package transport
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// dialContext connects to the address on the named network.
|
|
||||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
|
||||||
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContextErr converts the error from context package into a StreamError.
|
|
||||||
func ContextErr(err error) StreamError {
|
|
||||||
switch err {
|
|
||||||
case context.DeadlineExceeded:
|
|
||||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
|
||||||
case context.Canceled:
|
|
||||||
return streamErrorf(codes.Canceled, "%v", err)
|
|
||||||
}
|
|
||||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// contextFromRequest returns a background context.
|
|
||||||
func contextFromRequest(r *http.Request) context.Context {
|
|
||||||
return context.Background()
|
|
||||||
}
|
|
|
@ -1,52 +0,0 @@
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package transport
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
|
|
||||||
netctx "golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// dialContext connects to the address on the named network.
|
|
||||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
|
||||||
return (&net.Dialer{}).DialContext(ctx, network, address)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContextErr converts the error from context package into a StreamError.
|
|
||||||
func ContextErr(err error) StreamError {
|
|
||||||
switch err {
|
|
||||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
|
||||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
|
||||||
case context.Canceled, netctx.Canceled:
|
|
||||||
return streamErrorf(codes.Canceled, "%v", err)
|
|
||||||
}
|
|
||||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// contextFromRequest returns a context from the HTTP Request.
|
|
||||||
func contextFromRequest(r *http.Request) context.Context {
|
|
||||||
return r.Context()
|
|
||||||
}
|
|
14
vendor/google.golang.org/grpc/naming/go18.go → vendor/google.golang.org/grpc/version.go
generated
vendored
14
vendor/google.golang.org/grpc/naming/go18.go → vendor/google.golang.org/grpc/version.go
generated
vendored
|
@ -1,8 +1,6 @@
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Copyright 2017 gRPC authors.
|
* Copyright 2018 gRPC authors.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -18,11 +16,7 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package naming
|
package grpc
|
||||||
|
|
||||||
import "net"
|
// Version is the current grpc version.
|
||||||
|
const Version = "1.20.1"
|
||||||
var (
|
|
||||||
lookupHost = net.DefaultResolver.LookupHost
|
|
||||||
lookupSRV = net.DefaultResolver.LookupSRV
|
|
||||||
)
|
|
Loading…
Reference in New Issue