Skip to content

Commit 4663a70

Browse files
keesAlex Shi
authored andcommitted
usercopy: remove page-spanning test for now
A custom allocator without __GFP_COMP that copies to userspace has been found in vmw_execbuf_process[1], so this disables the page-span checker by placing it behind a CONFIG for future work where such things can be tracked down later. [1] https://bugzilla.redhat.com/show_bug.cgi?id=1373326 Reported-by: Vinson Lee <vlee@freedesktop.org> Fixes: f5509cc18daa ("mm: Hardened usercopy") Signed-off-by: Kees Cook <keescook@chromium.org> (cherry picked from commit 8e1f74ea02cf4562404c48c6882214821552c13f) Signed-off-by: Alex Shi <alex.shi@linaro.org>
1 parent 455003e commit 4663a70

2 files changed

Lines changed: 46 additions & 26 deletions

File tree

mm/usercopy.c

Lines changed: 35 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -134,30 +134,15 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
134134
return NULL;
135135
}
136136

137-
static inline const char *check_heap_object(const void *ptr, unsigned long n,
138-
bool to_user)
137+
/* Checks for allocs that are marked in some way as spanning multiple pages. */
138+
static inline const char *check_page_span(const void *ptr, unsigned long n,
139+
struct page *page, bool to_user)
139140
{
140-
struct page *page, *endpage;
141+
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
141142
const void *end = ptr + n - 1;
143+
struct page *endpage;
142144
bool is_reserved, is_cma;
143145

144-
/*
145-
* Some architectures (arm64) return true for virt_addr_valid() on
146-
* vmalloced addresses. Work around this by checking for vmalloc
147-
* first.
148-
*/
149-
if (is_vmalloc_addr(ptr))
150-
return NULL;
151-
152-
if (!virt_addr_valid(ptr))
153-
return NULL;
154-
155-
page = virt_to_head_page(ptr);
156-
157-
/* Check slab allocator for flags and size. */
158-
if (PageSlab(page))
159-
return __check_heap_object(ptr, n, page);
160-
161146
/*
162147
* Sometimes the kernel data regions are not marked Reserved (see
163148
* check below). And sometimes [_sdata,_edata) does not cover
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
186171
((unsigned long)end & (unsigned long)PAGE_MASK)))
187172
return NULL;
188173

189-
/* Allow if start and end are inside the same compound page. */
174+
/* Allow if fully inside the same compound (__GFP_COMP) page. */
190175
endpage = virt_to_head_page(end);
191176
if (likely(endpage == page))
192177
return NULL;
@@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
199184
is_reserved = PageReserved(page);
200185
is_cma = is_migrate_cma_page(page);
201186
if (!is_reserved && !is_cma)
202-
goto reject;
187+
return "<spans multiple pages>";
203188

204189
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
205190
page = virt_to_head_page(ptr);
206191
if (is_reserved && !PageReserved(page))
207-
goto reject;
192+
return "<spans Reserved and non-Reserved pages>";
208193
if (is_cma && !is_migrate_cma_page(page))
209-
goto reject;
194+
return "<spans CMA and non-CMA pages>";
210195
}
196+
#endif
211197

212198
return NULL;
199+
}
200+
201+
static inline const char *check_heap_object(const void *ptr, unsigned long n,
202+
bool to_user)
203+
{
204+
struct page *page;
205+
206+
/*
207+
* Some architectures (arm64) return true for virt_addr_valid() on
208+
* vmalloced addresses. Work around this by checking for vmalloc
209+
* first.
210+
*/
211+
if (is_vmalloc_addr(ptr))
212+
return NULL;
213+
214+
if (!virt_addr_valid(ptr))
215+
return NULL;
216+
217+
page = virt_to_head_page(ptr);
218+
219+
/* Check slab allocator for flags and size. */
220+
if (PageSlab(page))
221+
return __check_heap_object(ptr, n, page);
213222

214-
reject:
215-
return "<spans multiple pages>";
223+
/* Verify object does not incorrectly span multiple pages. */
224+
return check_page_span(ptr, n, page, to_user);
216225
}
217226

218227
/*

security/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,17 @@ config HARDENED_USERCOPY
146146
or are part of the kernel text. This kills entire classes
147147
of heap overflow exploits and similar kernel memory exposures.
148148

149+
config HARDENED_USERCOPY_PAGESPAN
150+
bool "Refuse to copy allocations that span multiple pages"
151+
depends on HARDENED_USERCOPY
152+
depends on !COMPILE_TEST
153+
help
154+
When a multi-page allocation is done without __GFP_COMP,
155+
hardened usercopy will reject attempts to copy it. There are,
156+
however, several cases of this in the kernel that have not all
157+
been removed. This config is intended to be used only while
158+
trying to find such users.
159+
149160
source security/selinux/Kconfig
150161
source security/smack/Kconfig
151162
source security/tomoyo/Kconfig

0 commit comments

Comments
 (0)