vendor: golang.org/x/net v0.5.0

full diff: https://github.com/golang/net/compare/v0.4.0...v0.5.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2023-01-23 11:55:08 +01:00
parent d7f21ea9c8
commit 526e5e7c95
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
6 changed files with 149 additions and 120 deletions

View File

@ -70,7 +70,7 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
golang.org/x/crypto v0.2.0 // indirect golang.org/x/crypto v0.2.0 // indirect
golang.org/x/net v0.4.0 // indirect golang.org/x/net v0.5.0 // indirect
golang.org/x/time v0.1.0 // indirect golang.org/x/time v0.1.0 // indirect
google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 // indirect google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 // indirect
google.golang.org/grpc v1.48.0 // indirect google.golang.org/grpc v1.48.0 // indirect

View File

@ -505,8 +505,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=

View File

@ -6,23 +6,91 @@
package http2 package http2
// flow is the flow control window's size. // inflowMinRefresh is the minimum number of bytes we'll send for a
type flow struct { // flow control window update.
const inflowMinRefresh = 4 << 10
// inflow accounts for an inbound flow control window.
// It tracks both the latest window sent to the peer (used for enforcement)
// and the accumulated unsent window.
type inflow struct {
avail int32
unsent int32
}
// set sets the initial window.
func (f *inflow) init(n int32) {
f.avail = n
}
// add adds n bytes to the window, with a maximum window size of max,
// indicating that the peer can now send us more data.
// For example, the user read from a {Request,Response} body and consumed
// some of the buffered data, so the peer can now send more.
// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer.
// Window updates are accumulated and sent when the unsent capacity
// is at least inflowMinRefresh or will at least double the peer's available window.
func (f *inflow) add(n int) (connAdd int32) {
if n < 0 {
panic("negative update")
}
unsent := int64(f.unsent) + int64(n)
// "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets."
// RFC 7540 Section 6.9.1.
const maxWindow = 1<<31 - 1
if unsent+int64(f.avail) > maxWindow {
panic("flow control update exceeds maximum window size")
}
f.unsent = int32(unsent)
if f.unsent < inflowMinRefresh && f.unsent < f.avail {
// If there aren't at least inflowMinRefresh bytes of window to send,
// and this update won't at least double the window, buffer the update for later.
return 0
}
f.avail += f.unsent
f.unsent = 0
return int32(unsent)
}
// take attempts to take n bytes from the peer's flow control window.
// It reports whether the window has available capacity.
func (f *inflow) take(n uint32) bool {
if n > uint32(f.avail) {
return false
}
f.avail -= int32(n)
return true
}
// takeInflows attempts to take n bytes from two inflows,
// typically connection-level and stream-level flows.
// It reports whether both windows have available capacity.
func takeInflows(f1, f2 *inflow, n uint32) bool {
if n > uint32(f1.avail) || n > uint32(f2.avail) {
return false
}
f1.avail -= int32(n)
f2.avail -= int32(n)
return true
}
// outflow is the outbound flow control window's size.
type outflow struct {
_ incomparable _ incomparable
// n is the number of DATA bytes we're allowed to send. // n is the number of DATA bytes we're allowed to send.
// A flow is kept both on a conn and a per-stream. // An outflow is kept both on a conn and a per-stream.
n int32 n int32
// conn points to the shared connection-level flow that is // conn points to the shared connection-level outflow that is
// shared by all streams on that conn. It is nil for the flow // shared by all streams on that conn. It is nil for the outflow
// that's on the conn directly. // that's on the conn directly.
conn *flow conn *outflow
} }
func (f *flow) setConnFlow(cf *flow) { f.conn = cf } func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf }
func (f *flow) available() int32 { func (f *outflow) available() int32 {
n := f.n n := f.n
if f.conn != nil && f.conn.n < n { if f.conn != nil && f.conn.n < n {
n = f.conn.n n = f.conn.n
@ -30,7 +98,7 @@ func (f *flow) available() int32 {
return n return n
} }
func (f *flow) take(n int32) { func (f *outflow) take(n int32) {
if n > f.available() { if n > f.available() {
panic("internal error: took too much") panic("internal error: took too much")
} }
@ -42,7 +110,7 @@ func (f *flow) take(n int32) {
// add adds n bytes (positive or negative) to the flow control window. // add adds n bytes (positive or negative) to the flow control window.
// It returns false if the sum would exceed 2^31-1. // It returns false if the sum would exceed 2^31-1.
func (f *flow) add(n int32) bool { func (f *outflow) add(n int32) bool {
sum := f.n + n sum := f.n + n
if (sum > n) == (f.n > 0) { if (sum > n) == (f.n > 0) {
f.n = sum f.n = sum

View File

@ -448,7 +448,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
// configured value for inflow, that will be updated when we send a // configured value for inflow, that will be updated when we send a
// WINDOW_UPDATE shortly after sending SETTINGS. // WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(initialWindowSize) sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize) sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
@ -563,8 +563,8 @@ type serverConn struct {
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
bodyReadCh chan bodyReadMsg // from handlers -> serve bodyReadCh chan bodyReadMsg // from handlers -> serve
serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
flow flow // conn-wide (not stream-specific) outbound flow control flow outflow // conn-wide (not stream-specific) outbound flow control
inflow flow // conn-wide inbound flow control inflow inflow // conn-wide inbound flow control
tlsState *tls.ConnectionState // shared by all handlers, like net/http tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string remoteAddrStr string
writeSched WriteScheduler writeSched WriteScheduler
@ -641,10 +641,10 @@ type stream struct {
cancelCtx func() cancelCtx func()
// owned by serverConn's serve loop: // owned by serverConn's serve loop:
bodyBytes int64 // body bytes seen so far bodyBytes int64 // body bytes seen so far
declBodyBytes int64 // or -1 if undeclared declBodyBytes int64 // or -1 if undeclared
flow flow // limits writing from Handler to client flow outflow // limits writing from Handler to client
inflow flow // what the client is allowed to POST/etc to us inflow inflow // what the client is allowed to POST/etc to us
state streamState state streamState
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen gotTrailerHeader bool // HEADER frame for trailers was seen
@ -1503,7 +1503,7 @@ func (sc *serverConn) processFrame(f Frame) error {
if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) { if sc.inGoAway && (sc.goAwayCode != ErrCodeNo || f.Header().StreamID > sc.maxClientStreamID) {
if f, ok := f.(*DataFrame); ok { if f, ok := f.(*DataFrame); ok {
if sc.inflow.available() < int32(f.Length) { if !sc.inflow.take(f.Length) {
return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl)) return sc.countError("data_flow", streamError(f.Header().StreamID, ErrCodeFlowControl))
} }
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
@ -1775,14 +1775,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
// But still enforce their connection-level flow control, // But still enforce their connection-level flow control,
// and return any flow control bytes since we're not going // and return any flow control bytes since we're not going
// to consume them. // to consume them.
if sc.inflow.available() < int32(f.Length) { if !sc.inflow.take(f.Length) {
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
} }
// Deduct the flow control from inflow, since we're
// going to immediately add it back in
// sendWindowUpdate, which also schedules sending the
// frames.
sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
if st != nil && st.resetQueued { if st != nil && st.resetQueued {
@ -1797,10 +1792,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Sender sending more than they'd declared? // Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
if sc.inflow.available() < int32(f.Length) { if !sc.inflow.take(f.Length) {
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl)) return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
} }
sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
@ -1811,10 +1805,9 @@ func (sc *serverConn) processData(f *DataFrame) error {
} }
if f.Length > 0 { if f.Length > 0 {
// Check whether the client has flow control quota. // Check whether the client has flow control quota.
if st.inflow.available() < int32(f.Length) { if !takeInflows(&sc.inflow, &st.inflow, f.Length) {
return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl)) return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
} }
st.inflow.take(int32(f.Length))
if len(data) > 0 { if len(data) > 0 {
wrote, err := st.body.Write(data) wrote, err := st.body.Write(data)
@ -1830,10 +1823,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Return any padded flow control now, since we won't // Return any padded flow control now, since we won't
// refund it later on body reads. // refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 { // Call sendWindowUpdate even if there is no padding,
sc.sendWindowUpdate32(nil, pad) // to return buffered flow control credit if the sent
sc.sendWindowUpdate32(st, pad) // window has shrunk.
} pad := int32(f.Length) - int32(len(data))
sc.sendWindowUpdate32(nil, pad)
sc.sendWindowUpdate32(st, pad)
} }
if f.StreamEnded() { if f.StreamEnded() {
st.endStream() st.endStream()
@ -2105,8 +2100,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.cw.Init() st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize) st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.conn = &sc.inflow // link to conn-level counter st.inflow.init(sc.srv.initialStreamRecvWindowSize())
st.inflow.add(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout != 0 { if sc.hs.WriteTimeout != 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
} }
@ -2388,47 +2382,28 @@ func (sc *serverConn) noteBodyRead(st *stream, n int) {
} }
// st may be nil for conn-level // st may be nil for conn-level
func (sc *serverConn) sendWindowUpdate(st *stream, n int) { func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
sc.serveG.check() sc.sendWindowUpdate(st, int(n))
// "The legal range for the increment to the flow control
// window is 1 to 2^31-1 (2,147,483,647) octets."
// A Go Read call on 64-bit machines could in theory read
// a larger Read than this. Very unlikely, but we handle it here
// rather than elsewhere for now.
const maxUint31 = 1<<31 - 1
for n > maxUint31 {
sc.sendWindowUpdate32(st, maxUint31)
n -= maxUint31
}
sc.sendWindowUpdate32(st, int32(n))
} }
// st may be nil for conn-level // st may be nil for conn-level
func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
sc.serveG.check() sc.serveG.check()
if n == 0 { var streamID uint32
var send int32
if st == nil {
send = sc.inflow.add(n)
} else {
streamID = st.id
send = st.inflow.add(n)
}
if send == 0 {
return return
} }
if n < 0 {
panic("negative update")
}
var streamID uint32
if st != nil {
streamID = st.id
}
sc.writeFrame(FrameWriteRequest{ sc.writeFrame(FrameWriteRequest{
write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, write: writeWindowUpdate{streamID: streamID, n: uint32(send)},
stream: st, stream: st,
}) })
var ok bool
if st == nil {
ok = sc.inflow.add(n)
} else {
ok = st.inflow.add(n)
}
if !ok {
panic("internal error; sent too many window updates without decrements?")
}
} }
// requestBody is the Handler's Request.Body type. // requestBody is the Handler's Request.Body type.

View File

@ -47,10 +47,6 @@ const (
// we buffer per stream. // we buffer per stream.
transportDefaultStreamFlow = 4 << 20 transportDefaultStreamFlow = 4 << 20
// transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
// a stream-level WINDOW_UPDATE for at a time.
transportDefaultStreamMinRefresh = 4 << 10
defaultUserAgent = "Go-http-client/2.0" defaultUserAgent = "Go-http-client/2.0"
// initialMaxConcurrentStreams is a connections maxConcurrentStreams until // initialMaxConcurrentStreams is a connections maxConcurrentStreams until
@ -310,8 +306,8 @@ type ClientConn struct {
mu sync.Mutex // guards following mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes cond *sync.Cond // hold mu; broadcast on flow/closed changes
flow flow // our conn-level flow control quota (cs.flow is per stream) flow outflow // our conn-level flow control quota (cs.outflow is per stream)
inflow flow // peer's conn-level flow control inflow inflow // peer's conn-level flow control
doNotReuse bool // whether conn is marked to not be reused for any future requests doNotReuse bool // whether conn is marked to not be reused for any future requests
closing bool closing bool
closed bool closed bool
@ -376,10 +372,10 @@ type clientStream struct {
respHeaderRecv chan struct{} // closed when headers are received respHeaderRecv chan struct{} // closed when headers are received
res *http.Response // set if respHeaderRecv is closed res *http.Response // set if respHeaderRecv is closed
flow flow // guarded by cc.mu flow outflow // guarded by cc.mu
inflow flow // guarded by cc.mu inflow inflow // guarded by cc.mu
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
readErr error // sticky read error; owned by transportResponseBody.Read readErr error // sticky read error; owned by transportResponseBody.Read
reqBody io.ReadCloser reqBody io.ReadCloser
reqBodyContentLength int64 // -1 means unknown reqBodyContentLength int64 // -1 means unknown
@ -811,7 +807,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.bw.Write(clientPreface) cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...) cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
cc.bw.Flush() cc.bw.Flush()
if cc.werr != nil { if cc.werr != nil {
cc.Close() cc.Close()
@ -2073,8 +2069,7 @@ type resAndError struct {
func (cc *ClientConn) addStreamLocked(cs *clientStream) { func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize)) cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow) cs.flow.setConnFlow(&cc.flow)
cs.inflow.add(transportDefaultStreamFlow) cs.inflow.init(transportDefaultStreamFlow)
cs.inflow.setConnFlow(&cc.inflow)
cs.ID = cc.nextStreamID cs.ID = cc.nextStreamID
cc.nextStreamID += 2 cc.nextStreamID += 2
cc.streams[cs.ID] = cs cc.streams[cs.ID] = cs
@ -2533,21 +2528,10 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
} }
cc.mu.Lock() cc.mu.Lock()
var connAdd, streamAdd int32 connAdd := cc.inflow.add(n)
// Check the conn-level first, before the stream-level. var streamAdd int32
if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
connAdd = transportDefaultConnFlow - v
cc.inflow.add(connAdd)
}
if err == nil { // No need to refresh if the stream is over or failed. if err == nil { // No need to refresh if the stream is over or failed.
// Consider any buffered body data (read from the conn but not streamAdd = cs.inflow.add(n)
// consumed by the client) when computing flow control for this
// stream.
v := int(cs.inflow.available()) + cs.bufPipe.Len()
if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
streamAdd = int32(transportDefaultStreamFlow - v)
cs.inflow.add(streamAdd)
}
} }
cc.mu.Unlock() cc.mu.Unlock()
@ -2575,17 +2559,15 @@ func (b transportResponseBody) Close() error {
if unread > 0 { if unread > 0 {
cc.mu.Lock() cc.mu.Lock()
// Return connection-level flow control. // Return connection-level flow control.
if unread > 0 { connAdd := cc.inflow.add(unread)
cc.inflow.add(int32(unread))
}
cc.mu.Unlock() cc.mu.Unlock()
// TODO(dneil): Acquiring this mutex can block indefinitely. // TODO(dneil): Acquiring this mutex can block indefinitely.
// Move flow control return to a goroutine? // Move flow control return to a goroutine?
cc.wmu.Lock() cc.wmu.Lock()
// Return connection-level flow control. // Return connection-level flow control.
if unread > 0 { if connAdd > 0 {
cc.fr.WriteWindowUpdate(0, uint32(unread)) cc.fr.WriteWindowUpdate(0, uint32(connAdd))
} }
cc.bw.Flush() cc.bw.Flush()
cc.wmu.Unlock() cc.wmu.Unlock()
@ -2628,13 +2610,18 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
// But at least return their flow control: // But at least return their flow control:
if f.Length > 0 { if f.Length > 0 {
cc.mu.Lock() cc.mu.Lock()
cc.inflow.add(int32(f.Length)) ok := cc.inflow.take(f.Length)
connAdd := cc.inflow.add(int(f.Length))
cc.mu.Unlock() cc.mu.Unlock()
if !ok {
cc.wmu.Lock() return ConnectionError(ErrCodeFlowControl)
cc.fr.WriteWindowUpdate(0, uint32(f.Length)) }
cc.bw.Flush() if connAdd > 0 {
cc.wmu.Unlock() cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(connAdd))
cc.bw.Flush()
cc.wmu.Unlock()
}
} }
return nil return nil
} }
@ -2665,9 +2652,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
} }
// Check connection-level flow control. // Check connection-level flow control.
cc.mu.Lock() cc.mu.Lock()
if cs.inflow.available() >= int32(f.Length) { if !takeInflows(&cc.inflow, &cs.inflow, f.Length) {
cs.inflow.take(int32(f.Length))
} else {
cc.mu.Unlock() cc.mu.Unlock()
return ConnectionError(ErrCodeFlowControl) return ConnectionError(ErrCodeFlowControl)
} }
@ -2689,19 +2674,20 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
} }
} }
if refund > 0 { sendConn := cc.inflow.add(refund)
cc.inflow.add(int32(refund)) var sendStream int32
if !didReset { if !didReset {
cs.inflow.add(int32(refund)) sendStream = cs.inflow.add(refund)
}
} }
cc.mu.Unlock() cc.mu.Unlock()
if refund > 0 { if sendConn > 0 || sendStream > 0 {
cc.wmu.Lock() cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(refund)) if sendConn > 0 {
if !didReset { cc.fr.WriteWindowUpdate(0, uint32(sendConn))
cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) }
if sendStream > 0 {
cc.fr.WriteWindowUpdate(cs.ID, uint32(sendStream))
} }
cc.bw.Flush() cc.bw.Flush()
cc.wmu.Unlock() cc.wmu.Unlock()

2
vendor/modules.txt vendored
View File

@ -265,7 +265,7 @@ go.etcd.io/etcd/raft/v3/raftpb
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519
golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pbkdf2
# golang.org/x/net v0.4.0 # golang.org/x/net v0.5.0
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/net/http/httpguts golang.org/x/net/http/httpguts
golang.org/x/net/http2 golang.org/x/net/http2