vendor: golang.org/x/net v0.17.0
full diff: https://github.com/golang/net/compare/v0.13.0...v0.17.0
This fixes the same CVE as go1.21.3 and go1.20.10;
- net/http: rapid stream resets can cause excessive work
A malicious HTTP/2 client which rapidly creates requests and
immediately resets them can cause excessive server resource consumption.
While the total number of requests is bounded to the
http2.Server.MaxConcurrentStreams setting, resetting an in-progress
request allows the attacker to create a new request while the existing
one is still executing.
HTTP/2 servers now bound the number of simultaneously executing
handler goroutines to the stream concurrency limit. New requests
arriving when at the limit (which can only happen after the client
has reset an existing, in-flight request) will be queued until a
handler exits. If the request queue grows too large, the server
will terminate the connection.
This issue is also fixed in golang.org/x/net/http2 v0.17.0,
for users manually configuring HTTP/2.
The default stream concurrency limit is 250 streams (requests)
per HTTP/2 connection. This value may be adjusted using the
golang.org/x/net/http2 package; see the Server.MaxConcurrentStreams
setting and the ConfigureServer function.
This is CVE-2023-39325 and Go issue https://go.dev/issue/63417.
This is also tracked by CVE-2023-44487.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 1800dd0876
)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
8864727bae
commit
dde5995051
7 changed files with 101 additions and 80 deletions
|
@ -87,7 +87,7 @@ require (
|
|||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
github.com/vishvananda/netns v0.0.2
|
||||
go.etcd.io/bbolt v1.3.7
|
||||
golang.org/x/net v0.13.0
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/sys v0.13.0
|
||||
golang.org/x/text v0.13.0
|
||||
|
|
|
@ -1661,8 +1661,8 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
|
||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
|
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
|
@ -1,51 +0,0 @@
|
|||
#
|
||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
||||
# a recent nghttp2 build.
|
||||
#
|
||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
||||
# Go tests use this curl binary for integration tests.
|
||||
#
|
||||
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git-core build-essential wget
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
autotools-dev libtool pkg-config zlib1g-dev \
|
||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
||||
automake autoconf
|
||||
|
||||
# The list of packages nghttp2 recommends for h2load:
|
||||
RUN apt-get install -y --no-install-recommends make binutils \
|
||||
autoconf automake autotools-dev \
|
||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
||||
cython python3.4-dev python-setuptools
|
||||
|
||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
||||
ENV NGHTTP2_VER 895da9a
|
||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
||||
|
||||
WORKDIR /root/nghttp2
|
||||
RUN git reset --hard $NGHTTP2_VER
|
||||
RUN autoreconf -i
|
||||
RUN automake
|
||||
RUN autoconf
|
||||
RUN ./configure
|
||||
RUN make
|
||||
RUN make install
|
||||
|
||||
WORKDIR /root
|
||||
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
|
||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
||||
WORKDIR /root/curl-7.45.0
|
||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
||||
RUN make
|
||||
RUN make install
|
||||
RUN ldconfig
|
||||
|
||||
CMD ["-h"]
|
||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
||||
|
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
curlimage:
|
||||
docker build -t gohttp2/curl .
|
||||
|
86
vendor/golang.org/x/net/http2/server.go
generated
vendored
86
vendor/golang.org/x/net/http2/server.go
generated
vendored
|
@ -581,9 +581,11 @@ type serverConn struct {
|
|||
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
|
||||
curClientStreams uint32 // number of open streams initiated by the client
|
||||
curPushedStreams uint32 // number of open streams initiated by server push
|
||||
curHandlers uint32 // number of running handler goroutines
|
||||
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
|
||||
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
|
||||
streams map[uint32]*stream
|
||||
unstartedHandlers []unstartedHandler
|
||||
initialStreamSendWindowSize int32
|
||||
maxFrameSize int32
|
||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||
|
@ -981,6 +983,8 @@ func (sc *serverConn) serve() {
|
|||
return
|
||||
case gracefulShutdownMsg:
|
||||
sc.startGracefulShutdownInternal()
|
||||
case handlerDoneMsg:
|
||||
sc.handlerDone()
|
||||
default:
|
||||
panic("unknown timer")
|
||||
}
|
||||
|
@ -1012,14 +1016,6 @@ func (sc *serverConn) serve() {
|
|||
}
|
||||
}
|
||||
|
||||
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
|
||||
select {
|
||||
case <-sc.doneServing:
|
||||
case <-sharedCh:
|
||||
close(privateCh)
|
||||
}
|
||||
}
|
||||
|
||||
type serverMessage int
|
||||
|
||||
// Message values sent to serveMsgCh.
|
||||
|
@ -1028,6 +1024,7 @@ var (
|
|||
idleTimerMsg = new(serverMessage)
|
||||
shutdownTimerMsg = new(serverMessage)
|
||||
gracefulShutdownMsg = new(serverMessage)
|
||||
handlerDoneMsg = new(serverMessage)
|
||||
)
|
||||
|
||||
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
|
||||
|
@ -1900,9 +1897,11 @@ func (st *stream) copyTrailersToHandlerRequest() {
|
|||
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
// when the stream's ReadTimeout has fired.
|
||||
func (st *stream) onReadTimeout() {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
if st.body != nil {
|
||||
// Wrap the ErrDeadlineExceeded to avoid callers depending on us
|
||||
// returning the bare error.
|
||||
st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
|
||||
}
|
||||
}
|
||||
|
||||
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
|
||||
|
@ -2020,13 +2019,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout != 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
if st.body != nil {
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
return sc.scheduleHandler(id, rw, req, handler)
|
||||
}
|
||||
|
||||
func (sc *serverConn) upgradeRequest(req *http.Request) {
|
||||
|
@ -2046,6 +2042,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
|
|||
sc.conn.SetReadDeadline(time.Time{})
|
||||
}
|
||||
|
||||
// This is the first request on the connection,
|
||||
// so start the handler directly rather than going
|
||||
// through scheduleHandler.
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
|
||||
}
|
||||
|
||||
|
@ -2286,8 +2286,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response
|
|||
return &responseWriter{rws: rws}
|
||||
}
|
||||
|
||||
type unstartedHandler struct {
|
||||
streamID uint32
|
||||
rw *responseWriter
|
||||
req *http.Request
|
||||
handler func(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
// scheduleHandler starts a handler goroutine,
|
||||
// or schedules one to start as soon as an existing handler finishes.
|
||||
func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
|
||||
sc.serveG.check()
|
||||
maxHandlers := sc.advMaxStreams
|
||||
if sc.curHandlers < maxHandlers {
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(rw, req, handler)
|
||||
return nil
|
||||
}
|
||||
if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
|
||||
return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
|
||||
}
|
||||
sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
|
||||
streamID: streamID,
|
||||
rw: rw,
|
||||
req: req,
|
||||
handler: handler,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *serverConn) handlerDone() {
|
||||
sc.serveG.check()
|
||||
sc.curHandlers--
|
||||
i := 0
|
||||
maxHandlers := sc.advMaxStreams
|
||||
for ; i < len(sc.unstartedHandlers); i++ {
|
||||
u := sc.unstartedHandlers[i]
|
||||
if sc.streams[u.streamID] == nil {
|
||||
// This stream was reset before its goroutine had a chance to start.
|
||||
continue
|
||||
}
|
||||
if sc.curHandlers >= maxHandlers {
|
||||
break
|
||||
}
|
||||
sc.curHandlers++
|
||||
go sc.runHandler(u.rw, u.req, u.handler)
|
||||
sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
|
||||
}
|
||||
sc.unstartedHandlers = sc.unstartedHandlers[i:]
|
||||
if len(sc.unstartedHandlers) == 0 {
|
||||
sc.unstartedHandlers = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Run on its own goroutine.
|
||||
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
||||
defer sc.sendServeMsg(handlerDoneMsg)
|
||||
didPanic := true
|
||||
defer func() {
|
||||
rw.rws.stream.cancelCtx()
|
||||
|
|
33
vendor/golang.org/x/net/http2/transport.go
generated
vendored
33
vendor/golang.org/x/net/http2/transport.go
generated
vendored
|
@ -19,6 +19,7 @@ import (
|
|||
"io/fs"
|
||||
"log"
|
||||
"math"
|
||||
"math/bits"
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -290,8 +291,7 @@ func (t *Transport) initConnPool() {
|
|||
// HTTP/2 server.
|
||||
type ClientConn struct {
|
||||
t *Transport
|
||||
tconn net.Conn // usually *tls.Conn, except specialized impls
|
||||
tconnClosed bool
|
||||
tconn net.Conn // usually *tls.Conn, except specialized impls
|
||||
tlsState *tls.ConnectionState // nil only for specialized impls
|
||||
reused uint32 // whether conn is being reused; atomic
|
||||
singleUse bool // whether being used for a single http.Request
|
||||
|
@ -1680,7 +1680,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
|
|||
return int(n) // doesn't truncate; max is 512K
|
||||
}
|
||||
|
||||
var bufPool sync.Pool // of *[]byte
|
||||
// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
|
||||
// streaming requests using small frame sizes occupy large buffers initially allocated for prior
|
||||
// requests needing big buffers. The size ranges are as follows:
|
||||
// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
|
||||
// {256 KB, 512 KB], {512 KB, infinity}
|
||||
// In practice, the maximum scratch buffer size should not exceed 512 KB due to
|
||||
// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
|
||||
// It exists mainly as a safety measure, for potential future increases in max buffer size.
|
||||
var bufPools [7]sync.Pool // of *[]byte
|
||||
func bufPoolIndex(size int) int {
|
||||
if size <= 16384 {
|
||||
return 0
|
||||
}
|
||||
size -= 1
|
||||
bits := bits.Len(uint(size))
|
||||
index := bits - 14
|
||||
if index >= len(bufPools) {
|
||||
return len(bufPools) - 1
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
|
||||
cc := cs.cc
|
||||
|
@ -1698,12 +1718,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
|
|||
// Scratch buffer for reading into & writing from.
|
||||
scratchLen := cs.frameScratchBufferLen(maxFrameSize)
|
||||
var buf []byte
|
||||
if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
|
||||
defer bufPool.Put(bp)
|
||||
index := bufPoolIndex(scratchLen)
|
||||
if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
|
||||
defer bufPools[index].Put(bp)
|
||||
buf = *bp
|
||||
} else {
|
||||
buf = make([]byte, scratchLen)
|
||||
defer bufPool.Put(&buf)
|
||||
defer bufPools[index].Put(&buf)
|
||||
}
|
||||
|
||||
var sawEOF bool
|
||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -1081,7 +1081,7 @@ golang.org/x/crypto/pkcs12/internal/rc2
|
|||
golang.org/x/crypto/salsa20/salsa
|
||||
golang.org/x/crypto/ssh
|
||||
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
# golang.org/x/net v0.13.0
|
||||
# golang.org/x/net v0.17.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/net/bpf
|
||||
golang.org/x/net/context
|
||||
|
|
Loading…
Reference in a new issue