Skip to content

Commit 17b5775

Browse files
authored
kata-containers(-cc): Fix CVE-2023-44487 (#12931)
Manual backport of the fix for grpc for version 1.47.0. The upstream grpc CVE fix was only backported to 1.56.3. Signed-off-by: Manuel Huber <mahuber@microsoft.com>
1 parent d029ed6 commit 17b5775

4 files changed

Lines changed: 464 additions & 2 deletions

File tree

Lines changed: 227 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,227 @@
1+
From 99f0248a3060aa55599e867ece965119851f19c8 Mon Sep 17 00:00:00 2001
2+
From: Manuel Huber <mahuber@microsoft.com>
3+
Date: Wed, 5 Mar 2025 16:18:29 +0000
4+
Subject: [PATCH] runtime: fix CVE-2023-44487
5+
6+
Fix this CVE in the grpc vendor dependency.
7+
First, apply 6eabd7e1834e47b20f55cbe9d473fc607c693358,
8+
then apply the actual CVE fix that was backported to the
9+
oldest version (1.53.0), i.e., commit
10+
5efd7bd73e11fea58d1c7f1c110902e78a286299 in modified form.
11+
Modified, because v1.47.0 does not yet have the _test files.
12+
13+
Signed-off-by: Manuel Huber <mahuber@microsoft.com>
14+
---
15+
.../grpc/internal/transport/http2_server.go | 11 +--
16+
.../vendor/google.golang.org/grpc/server.go | 93 +++++++++++--------
17+
2 files changed, 59 insertions(+), 45 deletions(-)
18+
19+
diff --git a/src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_server.go
20+
index 45d7bd145e..f5edd95f69 100644
21+
--- a/src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_server.go
22+
+++ b/src/runtime/vendor/google.golang.org/grpc/internal/transport/http2_server.go
23+
@@ -165,15 +165,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
24+
ID: http2.SettingMaxFrameSize,
25+
Val: http2MaxFrameLen,
26+
}}
27+
- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
28+
- // permitted in the HTTP2 spec.
29+
- maxStreams := config.MaxStreams
30+
- if maxStreams == 0 {
31+
- maxStreams = math.MaxUint32
32+
- } else {
33+
+ if config.MaxStreams != math.MaxUint32 {
34+
isettings = append(isettings, http2.Setting{
35+
ID: http2.SettingMaxConcurrentStreams,
36+
- Val: maxStreams,
37+
+ Val: config.MaxStreams,
38+
})
39+
}
40+
dynamicWindow := true
41+
@@ -252,7 +247,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
42+
framer: framer,
43+
readerDone: make(chan struct{}),
44+
writerDone: make(chan struct{}),
45+
- maxStreams: maxStreams,
46+
+ maxStreams: config.MaxStreams,
47+
inTapHandle: config.InTapHandle,
48+
fc: &trInFlow{limit: uint32(icwz)},
49+
state: reachable,
50+
diff --git a/src/runtime/vendor/google.golang.org/grpc/server.go b/src/runtime/vendor/google.golang.org/grpc/server.go
51+
index 65de84b300..63dc911d65 100644
52+
--- a/src/runtime/vendor/google.golang.org/grpc/server.go
53+
+++ b/src/runtime/vendor/google.golang.org/grpc/server.go
54+
@@ -43,7 +43,6 @@ import (
55+
"google.golang.org/grpc/internal"
56+
"google.golang.org/grpc/internal/binarylog"
57+
"google.golang.org/grpc/internal/channelz"
58+
- "google.golang.org/grpc/internal/grpcrand"
59+
"google.golang.org/grpc/internal/grpcsync"
60+
"google.golang.org/grpc/internal/transport"
61+
"google.golang.org/grpc/keepalive"
62+
@@ -107,12 +106,6 @@ type serviceInfo struct {
63+
mdata interface{}
64+
}
65+
66+
-type serverWorkerData struct {
67+
- st transport.ServerTransport
68+
- wg *sync.WaitGroup
69+
- stream *transport.Stream
70+
-}
71+
-
72+
// Server is a gRPC server to serve RPC requests.
73+
type Server struct {
74+
opts serverOptions
75+
@@ -137,7 +130,7 @@ type Server struct {
76+
channelzID *channelz.Identifier
77+
czData *channelzData
78+
79+
- serverWorkerChannels []chan *serverWorkerData
80+
+ serverWorkerChannel chan func()
81+
}
82+
83+
type serverOptions struct {
84+
@@ -168,6 +161,7 @@ type serverOptions struct {
85+
}
86+
87+
var defaultServerOptions = serverOptions{
88+
+ maxConcurrentStreams: math.MaxUint32,
89+
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
90+
maxSendMessageSize: defaultServerMaxSendMessageSize,
91+
connectionTimeout: 120 * time.Second,
92+
@@ -361,6 +355,9 @@ func MaxSendMsgSize(m int) ServerOption {
93+
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
94+
// of concurrent streams to each ServerTransport.
95+
func MaxConcurrentStreams(n uint32) ServerOption {
96+
+ if n == 0 {
97+
+ n = math.MaxUint32
98+
+ }
99+
return newFuncServerOption(func(o *serverOptions) {
100+
o.maxConcurrentStreams = n
101+
})
102+
@@ -520,40 +517,33 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
103+
const serverWorkerResetThreshold = 1 << 16
104+
105+
// serverWorkers blocks on a *transport.Stream channel forever and waits for
106+
-// data to be fed by serveStreams. This allows different requests to be
107+
+// data to be fed by serveStreams. This allows multiple requests to be
108+
// processed by the same goroutine, removing the need for expensive stack
109+
// re-allocations (see the runtime.morestack problem [1]).
110+
//
111+
// [1] https://github.com/golang/go/issues/18138
112+
-func (s *Server) serverWorker(ch chan *serverWorkerData) {
113+
- // To make sure all server workers don't reset at the same time, choose a
114+
- // random number of iterations before resetting.
115+
- threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
116+
- for completed := 0; completed < threshold; completed++ {
117+
- data, ok := <-ch
118+
+func (s *Server) serverWorker() {
119+
+ for completed := 0; completed < serverWorkerResetThreshold; completed++ {
120+
+ f, ok := <-s.serverWorkerChannel
121+
if !ok {
122+
return
123+
}
124+
- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
125+
- data.wg.Done()
126+
+ f()
127+
}
128+
- go s.serverWorker(ch)
129+
+ go s.serverWorker()
130+
}
131+
132+
-// initServerWorkers creates worker goroutines and channels to process incoming
133+
+// initServerWorkers creates worker goroutines and a channel to process incoming
134+
// connections to reduce the time spent overall on runtime.morestack.
135+
func (s *Server) initServerWorkers() {
136+
- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
137+
+ s.serverWorkerChannel = make(chan func())
138+
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
139+
- s.serverWorkerChannels[i] = make(chan *serverWorkerData)
140+
- go s.serverWorker(s.serverWorkerChannels[i])
141+
+ go s.serverWorker()
142+
}
143+
}
144+
145+
func (s *Server) stopServerWorkers() {
146+
- for i := uint32(0); i < s.opts.numServerWorkers; i++ {
147+
- close(s.serverWorkerChannels[i])
148+
- }
149+
+ close(s.serverWorkerChannel)
150+
}
151+
152+
// NewServer creates a gRPC server which has no service registered and has not
153+
@@ -902,26 +892,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
154+
defer st.Close()
155+
var wg sync.WaitGroup
156+
157+
- var roundRobinCounter uint32
158+
+ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
159+
st.HandleStreams(func(stream *transport.Stream) {
160+
wg.Add(1)
161+
+
162+
+ streamQuota.acquire()
163+
+ f := func() {
164+
+ defer streamQuota.release()
165+
+ defer wg.Done()
166+
+ s.handleStream(st, stream, s.traceInfo(st, stream))
167+
+ }
168+
+
169+
if s.opts.numServerWorkers > 0 {
170+
- data := &serverWorkerData{st: st, wg: &wg, stream: stream}
171+
select {
172+
- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
173+
+ case s.serverWorkerChannel <- f:
174+
+ return
175+
default:
176+
// If all stream workers are busy, fallback to the default code path.
177+
- go func() {
178+
- s.handleStream(st, stream, s.traceInfo(st, stream))
179+
- wg.Done()
180+
- }()
181+
}
182+
- } else {
183+
- go func() {
184+
- defer wg.Done()
185+
- s.handleStream(st, stream, s.traceInfo(st, stream))
186+
- }()
187+
}
188+
+ go f()
189+
}, func(ctx context.Context, method string) context.Context {
190+
if !EnableTracing {
191+
return ctx
192+
@@ -1885,3 +1875,32 @@ type channelzServer struct {
193+
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
194+
return c.s.channelzMetric()
195+
}
196+
+
197+
+// atomicSemaphore implements a blocking, counting semaphore. acquire should be
198+
+// called synchronously; release may be called asynchronously.
199+
+type atomicSemaphore struct {
200+
+ n int64
201+
+ wait chan struct{}
202+
+}
203+
+
204+
+func (q *atomicSemaphore) acquire() {
205+
+ if atomic.AddInt64(&q.n, -1) < 0 {
206+
+ // We ran out of quota. Block until a release happens.
207+
+ <-q.wait
208+
+ }
209+
+}
210+
+
211+
+func (q *atomicSemaphore) release() {
212+
+ // N.B. the "<= 0" check below should allow for this to work with multiple
213+
+ // concurrent calls to acquire, but also note that with synchronous calls to
214+
+ // acquire, as our system does, n will never be less than -1. There are
215+
+ // fairness issues (queuing) to consider if this was to be generalized.
216+
+ if atomic.AddInt64(&q.n, 1) <= 0 {
217+
+ // An acquire was waiting on us. Unblock it.
218+
+ q.wait <- struct{}{}
219+
+ }
220+
+}
221+
+
222+
+func newHandlerQuota(n uint32) *atomicSemaphore {
223+
+ return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)}
224+
+}
225+
--
226+
2.39.4
227+

SPECS/kata-containers-cc/kata-containers-cc.spec

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
Name: kata-containers-cc
1515
Version: 3.2.0.azl2
16-
Release: 5%{?dist}
16+
Release: 6%{?dist}
1717
Summary: Kata Confidential Containers package developed for Confidential Containers on AKS
1818
License: ASL 2.0
1919
Vendor: Microsoft Corporation
@@ -24,6 +24,7 @@ Source2: mariner-coco-build-uvm.sh
2424
Patch0: CVE-2023-45288.patch
2525
Patch1: CVE-2023-39325.patch
2626
Patch2: CVE-2024-24786.patch
27+
Patch3: CVE-2023-44487.patch
2728

2829
ExclusiveArch: x86_64
2930

@@ -291,6 +292,9 @@ install -D -m 0755 %{_builddir}/%{name}-%{version}/tools/osbuilder/image-builder
291292
%exclude %{osbuilder}/tools/osbuilder/rootfs-builder/ubuntu
292293

293294
%changelog
295+
* Mon Mar 10 2025 Manuel Huber <mahuber@microsoft.com> - 3.2.0.azl2-6
296+
- Add patch for CVE-2023-44487
297+
294298
* Wed Nov 27 2024 Aadhar Agarwal <aadagarwal@microsoft.com> - 3.2.0.azl2-5
295299
- Add patches for CVE-2023-45288, CVE-2023-39325 and CVE-2024-24786
296300

0 commit comments

Comments
 (0)