Skip to content

Commit a264db1

Browse files
authored
Patch moby-engine to address CVE-2023-44487 (#9276)
1 parent a653950 commit a264db1

2 files changed

Lines changed: 205 additions & 1 deletion

File tree

Lines changed: 200 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,200 @@
1+
From acdb7b9731b3d1eb14352328d2976d4b7baaafea Mon Sep 17 00:00:00 2001
2+
From: Mitch Zhu <mitchzhu@microsoft.com>
3+
Date: Fri, 31 May 2024 17:00:00 +0000
4+
Subject: [PATCH] Address CVE-2023-44487
5+
6+
---
7+
.../grpc/internal/transport/http2_server.go | 11 +--
8+
vendor/google.golang.org/grpc/server.go | 77 +++++++++++++------
9+
2 files changed, 57 insertions(+), 31 deletions(-)
10+
11+
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
12+
index 3dd1564..9d9a3fd 100644
13+
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
14+
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
15+
@@ -165,15 +165,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
16+
ID: http2.SettingMaxFrameSize,
17+
Val: http2MaxFrameLen,
18+
}}
19+
- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
20+
- // permitted in the HTTP2 spec.
21+
- maxStreams := config.MaxStreams
22+
- if maxStreams == 0 {
23+
- maxStreams = math.MaxUint32
24+
- } else {
25+
+ if config.MaxStreams != math.MaxUint32 {
26+
isettings = append(isettings, http2.Setting{
27+
ID: http2.SettingMaxConcurrentStreams,
28+
- Val: maxStreams,
29+
+ Val: config.MaxStreams,
30+
})
31+
}
32+
dynamicWindow := true
33+
@@ -252,7 +247,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
34+
framer: framer,
35+
readerDone: make(chan struct{}),
36+
writerDone: make(chan struct{}),
37+
- maxStreams: maxStreams,
38+
+ maxStreams: config.MaxStreams,
39+
inTapHandle: config.InTapHandle,
40+
fc: &trInFlow{limit: uint32(icwz)},
41+
state: reachable,
42+
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
43+
index f4dde72..17d39cf 100644
44+
--- a/vendor/google.golang.org/grpc/server.go
45+
+++ b/vendor/google.golang.org/grpc/server.go
46+
@@ -115,12 +115,6 @@ type serviceInfo struct {
47+
mdata interface{}
48+
}
49+
50+
-type serverWorkerData struct {
51+
- st transport.ServerTransport
52+
- wg *sync.WaitGroup
53+
- stream *transport.Stream
54+
-}
55+
-
56+
// Server is a gRPC server to serve RPC requests.
57+
type Server struct {
58+
opts serverOptions
59+
@@ -145,7 +139,7 @@ type Server struct {
60+
channelzID *channelz.Identifier
61+
czData *channelzData
62+
63+
- serverWorkerChannels []chan *serverWorkerData
64+
+ serverWorkerChannel chan func()
65+
}
66+
67+
type serverOptions struct {
68+
@@ -177,6 +171,7 @@ type serverOptions struct {
69+
}
70+
71+
var defaultServerOptions = serverOptions{
72+
+ maxConcurrentStreams: math.MaxUint32,
73+
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
74+
maxSendMessageSize: defaultServerMaxSendMessageSize,
75+
connectionTimeout: 120 * time.Second,
76+
@@ -387,6 +382,9 @@ func MaxSendMsgSize(m int) ServerOption {
77+
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
78+
// of concurrent streams to each ServerTransport.
79+
func MaxConcurrentStreams(n uint32) ServerOption {
80+
+ if n == 0 {
81+
+ n = math.MaxUint32
82+
+ }
83+
return newFuncServerOption(func(o *serverOptions) {
84+
o.maxConcurrentStreams = n
85+
})
86+
@@ -565,35 +563,31 @@ const serverWorkerResetThreshold = 1 << 16
87+
// re-allocations (see the runtime.morestack problem [1]).
88+
//
89+
// [1] https://github.com/golang/go/issues/18138
90+
-func (s *Server) serverWorker(ch chan *serverWorkerData) {
91+
+func (s *Server) serverWorker() {
92+
// To make sure all server workers don't reset at the same time, choose a
93+
// random number of iterations before resetting.
94+
threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
95+
for completed := 0; completed < threshold; completed++ {
96+
- data, ok := <-ch
97+
+ f, ok := <-s.serverWorkerChannel
98+
if !ok {
99+
return
100+
}
101+
- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
102+
- data.wg.Done()
103+
+ f()
104+
}
105+
- go s.serverWorker(ch)
106+
+ go s.serverWorker()
107+
}
108+
109+
// initServerWorkers creates worker goroutines and channels to process incoming
110+
// connections to reduce the time spent overall on runtime.morestack.
111+
func (s *Server) initServerWorkers() {
112+
- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
113+
+ s.serverWorkerChannel = make(chan func())
114+
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
115+
- s.serverWorkerChannels[i] = make(chan *serverWorkerData)
116+
- go s.serverWorker(s.serverWorkerChannels[i])
117+
+ go s.serverWorker()
118+
}
119+
}
120+
121+
func (s *Server) stopServerWorkers() {
122+
- for i := uint32(0); i < s.opts.numServerWorkers; i++ {
123+
- close(s.serverWorkerChannels[i])
124+
- }
125+
+ close(s.serverWorkerChannel)
126+
}
127+
128+
// NewServer creates a gRPC server which has no service registered and has not
129+
@@ -945,13 +939,20 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
130+
defer st.Close()
131+
var wg sync.WaitGroup
132+
133+
- var roundRobinCounter uint32
134+
+ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
135+
st.HandleStreams(func(stream *transport.Stream) {
136+
wg.Add(1)
137+
+
138+
+ streamQuota.acquire()
139+
+ f := func() {
140+
+ defer streamQuota.release()
141+
+ defer wg.Done()
142+
+ s.handleStream(st, stream, s.traceInfo(st, stream))
143+
+ }
144+
+
145+
if s.opts.numServerWorkers > 0 {
146+
- data := &serverWorkerData{st: st, wg: &wg, stream: stream}
147+
select {
148+
- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
149+
+ case s.serverWorkerChannel <- f:
150+
default:
151+
// If all stream workers are busy, fallback to the default code path.
152+
go func() {
153+
@@ -961,8 +962,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
154+
}
155+
} else {
156+
go func() {
157+
- defer wg.Done()
158+
- s.handleStream(st, stream, s.traceInfo(st, stream))
159+
+ go f()
160+
}()
161+
}
162+
}, func(ctx context.Context, method string) context.Context {
163+
@@ -1978,3 +1978,34 @@ type channelzServer struct {
164+
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
165+
return c.s.channelzMetric()
166+
}
167+
+
168+
+// atomicSemaphore implements a blocking, counting semaphore. acquire should be
169+
+// called synchronously; release may be called asynchronously.
170+
+type atomicSemaphore struct {
171+
+ n atomic.Int64
172+
+ wait chan struct{}
173+
+}
174+
+
175+
+func (q *atomicSemaphore) acquire() {
176+
+ if q.n.Add(-1) < 0 {
177+
+ // We ran out of quota. Block until a release happens.
178+
+ <-q.wait
179+
+ }
180+
+}
181+
+
182+
+func (q *atomicSemaphore) release() {
183+
+ // N.B. the "<= 0" check below should allow for this to work with multiple
184+
+ // concurrent calls to acquire, but also note that with synchronous calls to
185+
+ // acquire, as our system does, n will never be less than -1. There are
186+
+ // fairness issues (queuing) to consider if this was to be generalized.
187+
+ if q.n.Add(1) <= 0 {
188+
+ // An acquire was waiting on us. Unblock it.
189+
+ q.wait <- struct{}{}
190+
+ }
191+
+}
192+
+
193+
+func newHandlerQuota(n uint32) *atomicSemaphore {
194+
+ a := &atomicSemaphore{wait: make(chan struct{}, 1)}
195+
+ a.n.Store(int64(n))
196+
+ return a
197+
+}
198+
--
199+
2.34.1
200+

SPECS/moby-engine/moby-engine.spec

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
Summary: The open-source application container engine
44
Name: moby-engine
55
Version: 24.0.9
6-
Release: 3%{?dist}
6+
Release: 4%{?dist}
77
License: ASL 2.0
88
Group: Tools/Container
99
URL: https://mobyproject.org
@@ -21,6 +21,7 @@ Patch1: CVE-2024-23651.patch
2121
# Remove once we upgrade this package at least to version 25.0+.
2222
Patch2: CVE-2024-23652.patch
2323
Patch3: CVE-2023-45288.patch
24+
Patch4: CVE-2023-44487.patch
2425

2526
%{?systemd_requires}
2627

@@ -126,6 +127,9 @@ fi
126127
%{_unitdir}/*
127128

128129
%changelog
130+
* Fri May 31 2024 Mitch Zhu <mitchzhu@microsoft.com> - 24.0.9-4
131+
- Fix for CVE-2023-44487
132+
129133
* Fri May 03 2024 Chris Gunn <chrisgun@microsoft.com> - 24.0.9-3
130134
- Fix for CVE-2023-45288
131135

0 commit comments

Comments
 (0)