mirror of https://github.com/docker/cli.git
vendor: golang.org/x/net v0.17.0
full diff: https://github.com/golang/net/compare/v0.10.0...v0.17.0 This fixes the same CVE as go1.21.3 and go1.20.10; - net/http: rapid stream resets can cause excessive work A malicious HTTP/2 client which rapidly creates requests and immediately resets them can cause excessive server resource consumption. While the total number of requests is bounded to the http2.Server.MaxConcurrentStreams setting, resetting an in-progress request allows the attacker to create a new request while the existing one is still executing. HTTP/2 servers now bound the number of simultaneously executing handler goroutines to the stream concurrency limit. New requests arriving when at the limit (which can only happen after the client has reset an existing, in-flight request) will be queued until a handler exits. If the request queue grows too large, the server will terminate the connection. This issue is also fixed in golang.org/x/net/http2 v0.17.0, for users manually configuring HTTP/2. The default stream concurrency limit is 250 streams (requests) per HTTP/2 connection. This value may be adjusted using the golang.org/x/net/http2 package; see the Server.MaxConcurrentStreams setting and the ConfigureServer function. This is CVE-2023-39325 and Go issue https://go.dev/issue/63417. This is also tracked by CVE-2023-44487. Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
612a171557
commit
a27466fb6f
|
@ -81,7 +81,7 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.4.1 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
|
|
|
@ -310,8 +310,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
|
@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
|||
if s.NewWriteScheduler != nil {
|
||||
sc.writeSched = s.NewWriteScheduler()
|
||||
} else {
|
||||
sc.writeSched = NewPriorityWriteScheduler(nil)
|
||||
sc.writeSched = newRoundRobinWriteScheduler()
|
||||
}
|
||||
|
||||
// These start at the RFC-specified defaults. If there is a higher
|
||||
|
@ -581,9 +581,11 @@ type serverConn struct {
|
|||
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
|
||||
curClientStreams uint32 // number of open streams initiated by the client
|
||||
curPushedStreams uint32 // number of open streams initiated by server push
|
||||
curHandlers uint32 // number of running handler goroutines
|
||||
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
|
||||
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
|
||||
streams map[uint32]*stream
|
||||
unstartedHandlers []unstartedHandler
|
||||
initialStreamSendWindowSize int32
|
||||
maxFrameSize int32
|
||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||
|
@ -981,6 +983,8 @@ func (sc *serverConn) serve() {
|
|||
return
|
||||
case gracefulShutdownMsg:
|
||||
sc.startGracefulShutdownInternal()
|
||||
case handlerDoneMsg:
|
||||
sc.handlerDone()
|
||||
default:
|
||||
panic("unknown timer")
|
||||
}
|
||||
|
@ -1012,14 +1016,6 @@ func (sc *serverConn) serve() {
|
|||
}
|
||||
}
|
||||
|
||||
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
|
||||
select {
|
||||
case <-sc.doneServing:
|
||||
case <-sharedCh:
|
||||
close(privateCh)
|
||||
}
|
||||
}
|
||||
|
||||
type serverMessage int
|
||||
|
||||
// Message values sent to serveMsgCh.
|
||||
|
@ -1028,6 +1024,7 @@ var (
|
|||
idleTimerMsg = new(serverMessage)
|
||||
shutdownTimerMsg = new(serverMessage)
|
||||
gracefulShutdownMsg = new(serverMessage)
|
||||
handlerDoneMsg = new(serverMessage)
|
||||
)
|
||||
|
||||
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
|
||||
|
@ -1900,10 +1897,12 @@ func (st *stream) copyTrailersToHandlerRequest() {
|
|||
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's ReadTimeout has fired.
|
||||
func (st *stream) onReadTimeout() {
|
||||
if st.body != nil {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
}
|
||||
}
|
||||
|
||||
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's WriteTimeout has fired.
|
||||
|
@ -2020,13 +2019,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
if st.body != nil {
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
return sc.scheduleHandler(id, rw, req, handler)
|
||||
}
|
||||
|
||||
func (sc *serverConn) upgradeRequest(req *http.Request) {
|
||||
|
@ -2046,6 +2042,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
|
|||
sc.conn.SetReadDeadline(time.Time{})
|
||||
}
|
||||
|
||||
// This is the first request on the connection,
|
||||
// so start the handler directly rather than going
|
||||
// through scheduleHandler.
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
|
||||
}
|
||||
|
||||
|
@ -2286,8 +2286,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response
|
|||
return &responseWriter{rws: rws}
|
||||
}
|
||||
|
||||
type unstartedHandler struct {
|
||||
streamID uint32
|
||||
rw *responseWriter
|
||||
req *http.Request
|
||||
handler func(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
// scheduleHandler starts a handler goroutine,
|
||||
// or schedules one to start as soon as an existing handler finishes.
|
||||
func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
|
||||
sc.serveG.check()
|
||||
maxHandlers := sc.advMaxStreams
|
||||
if sc.curHandlers < maxHandlers {
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
}
|
||||
if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
|
||||
return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
|
||||
}
|
||||
sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
|
||||
streamID: streamID,
|
||||
rw: rw,
|
||||
req: req,
|
||||
handler: handler,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *serverConn) handlerDone() {
|
||||
sc.serveG.check()
|
||||
sc.curHandlers--
|
||||
i := 0
|
||||
maxHandlers := sc.advMaxStreams
|
||||
for ; i < len(sc.unstartedHandlers); i++ {
|
||||
u := sc.unstartedHandlers[i]
|
||||
if sc.streams[u.streamID] == nil {
|
||||
// This stream was reset before its goroutine had a chance to start.
|
||||
continue
|
||||
}
|
||||
if sc.curHandlers >= maxHandlers {
|
||||
break
|
||||
}
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(u.rw, u.req, u.handler)
|
||||
sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
|
||||
}
|
||||
sc.unstartedHandlers = sc.unstartedHandlers[i:]
|
||||
if len(sc.unstartedHandlers) == 0 {
|
||||
sc.unstartedHandlers = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Run on its own goroutine.
|
||||
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
||||
defer sc.sendServeMsg(handlerDoneMsg)
|
||||
didPanic := true
|
||||
defer func() {
|
||||
rw.rws.stream.cancelCtx()
|
||||
|
@ -2429,7 +2483,7 @@ type requestBody struct {
|
|||
conn *serverConn
|
||||
closeOnce sync.Once // for use by Close only
|
||||
sawEOF bool // for use by Read only
|
||||
pipe *pipe // non-nil if we have a HTTP entity message body
|
||||
pipe *pipe // non-nil if we have an HTTP entity message body
|
||||
needsContinue bool // need to send a 100-continue
|
||||
}
|
||||
|
||||
|
@ -2569,7 +2623,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
|||
clen = ""
|
||||
}
|
||||
}
|
||||
if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
|
||||
_, hasContentLength := rws.snapHeader["Content-Length"]
|
||||
if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
|
||||
clen = strconv.Itoa(len(p))
|
||||
}
|
||||
_, hasContentType := rws.snapHeader["Content-Type"]
|
||||
|
@ -2774,7 +2829,7 @@ func (w *responseWriter) FlushError() error {
|
|||
err = rws.bw.Flush()
|
||||
} else {
|
||||
// The bufio.Writer won't call chunkWriter.Write
|
||||
// (writeChunk with zero bytes, so we have to do it
|
||||
// (writeChunk with zero bytes), so we have to do it
|
||||
// ourselves to force the HTTP response header and/or
|
||||
// final DATA frame (with END_STREAM) to be sent.
|
||||
_, err = chunkWriter{rws}.Write(nil)
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"io/fs"
|
||||
"log"
|
||||
"math"
|
||||
"math/bits"
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -291,7 +292,6 @@ func (t *Transport) initConnPool() {
|
|||
type ClientConn struct {
|
||||
t *Transport
|
||||
tconn net.Conn // usually *tls.Conn, except specialized impls
|
||||
tconnClosed bool
|
||||
tlsState *tls.ConnectionState // nil only for specialized impls
|
||||
reused uint32 // whether conn is being reused; atomic
|
||||
singleUse bool // whether being used for a single http.Request
|
||||
|
@ -518,11 +518,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
func authorityAddr(scheme string, authority string) (addr string) {
|
||||
host, port, err := net.SplitHostPort(authority)
|
||||
if err != nil { // authority didn't have a port
|
||||
host = authority
|
||||
port = ""
|
||||
}
|
||||
if port == "" { // authority's port was empty
|
||||
port = "443"
|
||||
if scheme == "http" {
|
||||
port = "80"
|
||||
}
|
||||
host = authority
|
||||
}
|
||||
if a, err := idna.ToASCII(host); err == nil {
|
||||
host = a
|
||||
|
@ -1268,21 +1271,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
|
||||
cancelRequest := func(cs *clientStream, err error) error {
|
||||
cs.cc.mu.Lock()
|
||||
defer cs.cc.mu.Unlock()
|
||||
cs.abortStreamLocked(err)
|
||||
if cs.ID != 0 {
|
||||
// This request may have failed because of a problem with the connection,
|
||||
// or for some unrelated reason. (For example, the user might have canceled
|
||||
// the request without waiting for a response.) Mark the connection as
|
||||
// not reusable, since trying to reuse a dead connection is worse than
|
||||
// unnecessarily creating a new one.
|
||||
bodyClosed := cs.reqBodyClosed
|
||||
cs.cc.mu.Unlock()
|
||||
// Wait for the request body to be closed.
|
||||
//
|
||||
// If cs.ID is 0, then the request was never allocated a stream ID and
|
||||
// whatever went wrong was unrelated to the connection. We might have
|
||||
// timed out waiting for a stream slot when StrictMaxConcurrentStreams
|
||||
// is set, for example, in which case retrying on a different connection
|
||||
// will not help.
|
||||
cs.cc.doNotReuse = true
|
||||
// If nothing closed the body before now, abortStreamLocked
|
||||
// will have started a goroutine to close it.
|
||||
//
|
||||
// Closing the body before returning avoids a race condition
|
||||
// with net/http checking its readTrackingBody to see if the
|
||||
// body was read from or closed. See golang/go#60041.
|
||||
//
|
||||
// The body is closed in a separate goroutine without the
|
||||
// connection mutex held, but dropping the mutex before waiting
|
||||
// will keep us from holding it indefinitely if the body
|
||||
// close is slow for some reason.
|
||||
if bodyClosed != nil {
|
||||
<-bodyClosed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -1301,11 +1306,14 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
return handleResponseHeaders()
|
||||
default:
|
||||
waitDone()
|
||||
return nil, cancelRequest(cs, cs.abortErr)
|
||||
return nil, cs.abortErr
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil, cancelRequest(cs, ctx.Err())
|
||||
err := ctx.Err()
|
||||
cs.abortStream(err)
|
||||
return nil, cancelRequest(cs, err)
|
||||
case <-cs.reqCancel:
|
||||
cs.abortStream(errRequestCanceled)
|
||||
return nil, cancelRequest(cs, errRequestCanceled)
|
||||
}
|
||||
}
|
||||
|
@ -1672,7 +1680,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
|
|||
return int(n) // doesn't truncate; max is 512K
|
||||
}
|
||||
|
||||
var bufPool sync.Pool // of *[]byte
|
||||
// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
|
||||
// streaming requests using small frame sizes occupy large buffers initially allocated for prior
|
||||
// requests needing big buffers. The size ranges are as follows:
|
||||
// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
|
||||
// {256 KB, 512 KB], {512 KB, infinity}
|
||||
// In practice, the maximum scratch buffer size should not exceed 512 KB due to
|
||||
// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
|
||||
// It exists mainly as a safety measure, for potential future increases in max buffer size.
|
||||
var bufPools [7]sync.Pool // of *[]byte
|
||||
func bufPoolIndex(size int) int {
|
||||
if size <= 16384 {
|
||||
return 0
|
||||
}
|
||||
size -= 1
|
||||
bits := bits.Len(uint(size))
|
||||
index := bits - 14
|
||||
if index >= len(bufPools) {
|
||||
return len(bufPools) - 1
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
|
||||
cc := cs.cc
|
||||
|
@ -1690,12 +1718,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
|
|||
// Scratch buffer for reading into & writing from.
|
||||
scratchLen := cs.frameScratchBufferLen(maxFrameSize)
|
||||
var buf []byte
|
||||
if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
|
||||
defer bufPool.Put(bp)
|
||||
index := bufPoolIndex(scratchLen)
|
||||
if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
|
||||
defer bufPools[index].Put(bp)
|
||||
buf = *bp
|
||||
} else {
|
||||
buf = make([]byte, scratchLen)
|
||||
defer bufPool.Put(&buf)
|
||||
defer bufPools[index].Put(&buf)
|
||||
}
|
||||
|
||||
var sawEOF bool
|
||||
|
@ -1863,6 +1892,9 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !httpguts.ValidHostHeader(host) {
|
||||
return nil, errors.New("http2: invalid Host header")
|
||||
}
|
||||
|
||||
var path string
|
||||
if req.Method != "CONNECT" {
|
||||
|
@ -1899,7 +1931,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
|||
// 8.1.2.3 Request Pseudo-Header Fields
|
||||
// The :path pseudo-header field includes the path and query parts of the
|
||||
// target URI (the path-absolute production and optionally a '?' character
|
||||
// followed by the query production (see Sections 3.3 and 3.4 of
|
||||
// followed by the query production, see Sections 3.3 and 3.4 of
|
||||
// [RFC3986]).
|
||||
f(":authority", host)
|
||||
m := req.Method
|
||||
|
|
|
@ -185,6 +185,7 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
|
|||
// writeQueue is used by implementations of WriteScheduler.
|
||||
type writeQueue struct {
|
||||
s []FrameWriteRequest
|
||||
prev, next *writeQueue
|
||||
}
|
||||
|
||||
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
type roundRobinWriteScheduler struct {
|
||||
// control contains control frames (SETTINGS, PING, etc.).
|
||||
control writeQueue
|
||||
|
||||
// streams maps stream ID to a queue.
|
||||
streams map[uint32]*writeQueue
|
||||
|
||||
// stream queues are stored in a circular linked list.
|
||||
// head is the next stream to write, or nil if there are no streams open.
|
||||
head *writeQueue
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool writeQueuePool
|
||||
}
|
||||
|
||||
// newRoundRobinWriteScheduler constructs a new write scheduler.
|
||||
// The round robin scheduler priorizes control frames
|
||||
// like SETTINGS and PING over DATA frames.
|
||||
// When there are no control frames to send, it performs a round-robin
|
||||
// selection from the ready streams.
|
||||
func newRoundRobinWriteScheduler() WriteScheduler {
|
||||
ws := &roundRobinWriteScheduler{
|
||||
streams: make(map[uint32]*writeQueue),
|
||||
}
|
||||
return ws
|
||||
}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||
if ws.streams[streamID] != nil {
|
||||
panic(fmt.Errorf("stream %d already opened", streamID))
|
||||
}
|
||||
q := ws.queuePool.get()
|
||||
ws.streams[streamID] = q
|
||||
if ws.head == nil {
|
||||
ws.head = q
|
||||
q.next = q
|
||||
q.prev = q
|
||||
} else {
|
||||
// Queues are stored in a ring.
|
||||
// Insert the new stream before ws.head, putting it at the end of the list.
|
||||
q.prev = ws.head.prev
|
||||
q.next = ws.head
|
||||
q.prev.next = q
|
||||
q.next.prev = q
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) {
|
||||
q := ws.streams[streamID]
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
if q.next == q {
|
||||
// This was the only open stream.
|
||||
ws.head = nil
|
||||
} else {
|
||||
q.prev.next = q.next
|
||||
q.next.prev = q.prev
|
||||
if ws.head == q {
|
||||
ws.head = q.next
|
||||
}
|
||||
}
|
||||
delete(ws.streams, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) {
|
||||
if wr.isControl() {
|
||||
ws.control.push(wr)
|
||||
return
|
||||
}
|
||||
q := ws.streams[wr.StreamID()]
|
||||
if q == nil {
|
||||
// This is a closed stream.
|
||||
// wr should not be a HEADERS or DATA frame.
|
||||
// We push the request onto the control queue.
|
||||
if wr.DataSize() > 0 {
|
||||
panic("add DATA on non-open stream")
|
||||
}
|
||||
ws.control.push(wr)
|
||||
return
|
||||
}
|
||||
q.push(wr)
|
||||
}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||
// Control and RST_STREAM frames first.
|
||||
if !ws.control.empty() {
|
||||
return ws.control.shift(), true
|
||||
}
|
||||
if ws.head == nil {
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
||||
q := ws.head
|
||||
for {
|
||||
if wr, ok := q.consume(math.MaxInt32); ok {
|
||||
ws.head = q.next
|
||||
return wr, true
|
||||
}
|
||||
q = q.next
|
||||
if q == ws.head {
|
||||
break
|
||||
}
|
||||
}
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
|
@ -121,7 +121,7 @@ func CheckJoiners(enable bool) Option {
|
|||
}
|
||||
}
|
||||
|
||||
// StrictDomainName limits the set of permissable ASCII characters to those
|
||||
// StrictDomainName limits the set of permissible ASCII characters to those
|
||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
|
||||
// but is only useful if ValidateLabels is set.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -6,27 +6,6 @@
|
|||
|
||||
package idna
|
||||
|
||||
// appendMapping appends the mapping for the respective rune. isMapped must be
|
||||
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
||||
func (c info) appendMapping(b []byte, s string) []byte {
|
||||
index := int(c >> indexShift)
|
||||
if c&xorBit == 0 {
|
||||
s := mappings[index:]
|
||||
return append(b, s[1:s[0]+1]...)
|
||||
}
|
||||
b = append(b, s...)
|
||||
if c&inlineXOR == inlineXOR {
|
||||
// TODO: support and handle two-byte inline masks
|
||||
b[len(b)-1] ^= byte(index)
|
||||
} else {
|
||||
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
||||
index++
|
||||
b[p] ^= xorData[index]
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Sparse block handling code.
|
||||
|
||||
type valueRange struct {
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.16
|
||||
// +build !go1.16
|
||||
|
||||
package idna
|
||||
|
||||
// appendMapping appends the mapping for the respective rune. isMapped must be
|
||||
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
||||
func (c info) appendMapping(b []byte, s string) []byte {
|
||||
index := int(c >> indexShift)
|
||||
if c&xorBit == 0 {
|
||||
s := mappings[index:]
|
||||
return append(b, s[1:s[0]+1]...)
|
||||
}
|
||||
b = append(b, s...)
|
||||
if c&inlineXOR == inlineXOR {
|
||||
// TODO: support and handle two-byte inline masks
|
||||
b[len(b)-1] ^= byte(index)
|
||||
} else {
|
||||
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
||||
index++
|
||||
b[p] ^= xorData[index]
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package idna
|
||||
|
||||
// appendMapping appends the mapping for the respective rune. isMapped must be
|
||||
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
||||
func (c info) appendMapping(b []byte, s string) []byte {
|
||||
index := int(c >> indexShift)
|
||||
if c&xorBit == 0 {
|
||||
p := index
|
||||
return append(b, mappings[mappingIndex[p]:mappingIndex[p+1]]...)
|
||||
}
|
||||
b = append(b, s...)
|
||||
if c&inlineXOR == inlineXOR {
|
||||
// TODO: support and handle two-byte inline masks
|
||||
b[len(b)-1] ^= byte(index)
|
||||
} else {
|
||||
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
||||
index++
|
||||
b[p] ^= xorData[index]
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -317,7 +317,7 @@ golang.org/x/crypto/pbkdf2
|
|||
# golang.org/x/mod v0.10.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.10.0
|
||||
# golang.org/x/net v0.17.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/net/http/httpguts
|
||||
golang.org/x/net/http2
|
||||
|
|
Loading…
Reference in New Issue