mirror of https://github.com/docker/cli.git
Merge pull request #4626 from thaJeztah/bump_grpc
vendor: google.golang.org/grpc v1.56.3
This commit is contained in:
commit
4f0b466b1b
|
@ -85,6 +85,6 @@ require (
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
golang.org/x/tools v0.8.0 // indirect
|
golang.org/x/tools v0.8.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||||
google.golang.org/grpc v1.56.2 // indirect
|
google.golang.org/grpc v1.56.3 // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
@ -358,8 +358,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
|
google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
|
||||||
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||||
|
|
|
@ -171,15 +171,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
ID: http2.SettingMaxFrameSize,
|
ID: http2.SettingMaxFrameSize,
|
||||||
Val: http2MaxFrameLen,
|
Val: http2MaxFrameLen,
|
||||||
}}
|
}}
|
||||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
if config.MaxStreams != math.MaxUint32 {
|
||||||
// permitted in the HTTP2 spec.
|
|
||||||
maxStreams := config.MaxStreams
|
|
||||||
if maxStreams == 0 {
|
|
||||||
maxStreams = math.MaxUint32
|
|
||||||
} else {
|
|
||||||
isettings = append(isettings, http2.Setting{
|
isettings = append(isettings, http2.Setting{
|
||||||
ID: http2.SettingMaxConcurrentStreams,
|
ID: http2.SettingMaxConcurrentStreams,
|
||||||
Val: maxStreams,
|
Val: config.MaxStreams,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
dynamicWindow := true
|
dynamicWindow := true
|
||||||
|
@ -258,7 +253,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
framer: framer,
|
framer: framer,
|
||||||
readerDone: make(chan struct{}),
|
readerDone: make(chan struct{}),
|
||||||
writerDone: make(chan struct{}),
|
writerDone: make(chan struct{}),
|
||||||
maxStreams: maxStreams,
|
maxStreams: config.MaxStreams,
|
||||||
inTapHandle: config.InTapHandle,
|
inTapHandle: config.InTapHandle,
|
||||||
fc: &trInFlow{limit: uint32(icwz)},
|
fc: &trInFlow{limit: uint32(icwz)},
|
||||||
state: reachable,
|
state: reachable,
|
||||||
|
|
|
@ -115,12 +115,6 @@ type serviceInfo struct {
|
||||||
mdata interface{}
|
mdata interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverWorkerData struct {
|
|
||||||
st transport.ServerTransport
|
|
||||||
wg *sync.WaitGroup
|
|
||||||
stream *transport.Stream
|
|
||||||
}
|
|
||||||
|
|
||||||
// Server is a gRPC server to serve RPC requests.
|
// Server is a gRPC server to serve RPC requests.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
opts serverOptions
|
opts serverOptions
|
||||||
|
@ -145,7 +139,7 @@ type Server struct {
|
||||||
channelzID *channelz.Identifier
|
channelzID *channelz.Identifier
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
|
|
||||||
serverWorkerChannel chan *serverWorkerData
|
serverWorkerChannel chan func()
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverOptions struct {
|
type serverOptions struct {
|
||||||
|
@ -177,6 +171,7 @@ type serverOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultServerOptions = serverOptions{
|
var defaultServerOptions = serverOptions{
|
||||||
|
maxConcurrentStreams: math.MaxUint32,
|
||||||
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
|
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
|
||||||
maxSendMessageSize: defaultServerMaxSendMessageSize,
|
maxSendMessageSize: defaultServerMaxSendMessageSize,
|
||||||
connectionTimeout: 120 * time.Second,
|
connectionTimeout: 120 * time.Second,
|
||||||
|
@ -387,6 +382,9 @@ func MaxSendMsgSize(m int) ServerOption {
|
||||||
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
|
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
|
||||||
// of concurrent streams to each ServerTransport.
|
// of concurrent streams to each ServerTransport.
|
||||||
func MaxConcurrentStreams(n uint32) ServerOption {
|
func MaxConcurrentStreams(n uint32) ServerOption {
|
||||||
|
if n == 0 {
|
||||||
|
n = math.MaxUint32
|
||||||
|
}
|
||||||
return newFuncServerOption(func(o *serverOptions) {
|
return newFuncServerOption(func(o *serverOptions) {
|
||||||
o.maxConcurrentStreams = n
|
o.maxConcurrentStreams = n
|
||||||
})
|
})
|
||||||
|
@ -567,24 +565,19 @@ const serverWorkerResetThreshold = 1 << 16
|
||||||
// [1] https://github.com/golang/go/issues/18138
|
// [1] https://github.com/golang/go/issues/18138
|
||||||
func (s *Server) serverWorker() {
|
func (s *Server) serverWorker() {
|
||||||
for completed := 0; completed < serverWorkerResetThreshold; completed++ {
|
for completed := 0; completed < serverWorkerResetThreshold; completed++ {
|
||||||
data, ok := <-s.serverWorkerChannel
|
f, ok := <-s.serverWorkerChannel
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.handleSingleStream(data)
|
f()
|
||||||
}
|
}
|
||||||
go s.serverWorker()
|
go s.serverWorker()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleSingleStream(data *serverWorkerData) {
|
|
||||||
defer data.wg.Done()
|
|
||||||
s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
|
|
||||||
}
|
|
||||||
|
|
||||||
// initServerWorkers creates worker goroutines and a channel to process incoming
|
// initServerWorkers creates worker goroutines and a channel to process incoming
|
||||||
// connections to reduce the time spent overall on runtime.morestack.
|
// connections to reduce the time spent overall on runtime.morestack.
|
||||||
func (s *Server) initServerWorkers() {
|
func (s *Server) initServerWorkers() {
|
||||||
s.serverWorkerChannel = make(chan *serverWorkerData)
|
s.serverWorkerChannel = make(chan func())
|
||||||
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
||||||
go s.serverWorker()
|
go s.serverWorker()
|
||||||
}
|
}
|
||||||
|
@ -943,21 +936,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||||
defer st.Close(errors.New("finished serving streams for the server transport"))
|
defer st.Close(errors.New("finished serving streams for the server transport"))
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
||||||
st.HandleStreams(func(stream *transport.Stream) {
|
st.HandleStreams(func(stream *transport.Stream) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
||||||
|
streamQuota.acquire()
|
||||||
|
f := func() {
|
||||||
|
defer streamQuota.release()
|
||||||
|
defer wg.Done()
|
||||||
|
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||||
|
}
|
||||||
|
|
||||||
if s.opts.numServerWorkers > 0 {
|
if s.opts.numServerWorkers > 0 {
|
||||||
data := &serverWorkerData{st: st, wg: &wg, stream: stream}
|
|
||||||
select {
|
select {
|
||||||
case s.serverWorkerChannel <- data:
|
case s.serverWorkerChannel <- f:
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
// If all stream workers are busy, fallback to the default code path.
|
// If all stream workers are busy, fallback to the default code path.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
go func() {
|
go f()
|
||||||
defer wg.Done()
|
|
||||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
|
||||||
}()
|
|
||||||
}, func(ctx context.Context, method string) context.Context {
|
}, func(ctx context.Context, method string) context.Context {
|
||||||
if !EnableTracing {
|
if !EnableTracing {
|
||||||
return ctx
|
return ctx
|
||||||
|
@ -2052,3 +2050,32 @@ func validateSendCompressor(name, clientCompressors string) error {
|
||||||
}
|
}
|
||||||
return fmt.Errorf("client does not support compressor %q", name)
|
return fmt.Errorf("client does not support compressor %q", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// atomicSemaphore implements a blocking, counting semaphore. acquire should be
|
||||||
|
// called synchronously; release may be called asynchronously.
|
||||||
|
type atomicSemaphore struct {
|
||||||
|
n int64
|
||||||
|
wait chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *atomicSemaphore) acquire() {
|
||||||
|
if atomic.AddInt64(&q.n, -1) < 0 {
|
||||||
|
// We ran out of quota. Block until a release happens.
|
||||||
|
<-q.wait
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *atomicSemaphore) release() {
|
||||||
|
// N.B. the "<= 0" check below should allow for this to work with multiple
|
||||||
|
// concurrent calls to acquire, but also note that with synchronous calls to
|
||||||
|
// acquire, as our system does, n will never be less than -1. There are
|
||||||
|
// fairness issues (queuing) to consider if this was to be generalized.
|
||||||
|
if atomic.AddInt64(&q.n, 1) <= 0 {
|
||||||
|
// An acquire was waiting on us. Unblock it.
|
||||||
|
q.wait <- struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHandlerQuota(n uint32) *atomicSemaphore {
|
||||||
|
return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)}
|
||||||
|
}
|
||||||
|
|
|
@ -19,4 +19,4 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.56.2"
|
const Version = "1.56.3"
|
||||||
|
|
|
@ -373,7 +373,7 @@ golang.org/x/tools/internal/typesinternal
|
||||||
# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
google.golang.org/genproto/googleapis/rpc/status
|
google.golang.org/genproto/googleapis/rpc/status
|
||||||
# google.golang.org/grpc v1.56.2
|
# google.golang.org/grpc v1.56.3
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
google.golang.org/grpc
|
google.golang.org/grpc
|
||||||
google.golang.org/grpc/attributes
|
google.golang.org/grpc/attributes
|
||||||
|
|
Loading…
Reference in New Issue