Skip to content

Commit 4f80bcb

Browse files
keesAlex Shi
authored andcommitted
x86/uaccess: Enable hardened usercopy
Enables CONFIG_HARDENED_USERCOPY checks on x86. This is done both in copy_*_user() and __copy_*_user() because copy_*_user() actually calls down to _copy_*_user() and not __copy_*_user(). Based on code from PaX and grsecurity. Signed-off-by: Kees Cook <keescook@chromium.org> Tested-by: Valdis Kletnieks <valdis.kletnieks@vt.edu> (cherry picked from commit 5b710f34e194c6b7710f69fdb5d798fdf35b98c1) Signed-off-by: Alex Shi <alex.shi@linaro.org>
1 parent 41a69b5 commit 4f80bcb

4 files changed

Lines changed: 11 additions & 4 deletions

File tree

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ config X86
7777
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
7878
select HAVE_AOUT if X86_32
7979
select HAVE_ARCH_AUDITSYSCALL
80+
select HAVE_ARCH_HARDENED_USERCOPY
8081
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
8182
select HAVE_ARCH_JUMP_LABEL
8283
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP

arch/x86/include/asm/uaccess.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -731,9 +731,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
731731
* case, and do only runtime checking for non-constant sizes.
732732
*/
733733

734-
if (likely(sz < 0 || sz >= n))
734+
if (likely(sz < 0 || sz >= n)) {
735+
check_object_size(to, n, false);
735736
n = _copy_from_user(to, from, n);
736-
else if(__builtin_constant_p(n))
737+
} else if (__builtin_constant_p(n))
737738
copy_from_user_overflow();
738739
else
739740
__copy_from_user_overflow(sz, n);
@@ -749,9 +750,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
749750
might_fault();
750751

751752
/* See the comment in copy_from_user() above. */
752-
if (likely(sz < 0 || sz >= n))
753+
if (likely(sz < 0 || sz >= n)) {
754+
check_object_size(from, n, true);
753755
n = _copy_to_user(to, from, n);
754-
else if(__builtin_constant_p(n))
756+
} else if (__builtin_constant_p(n))
755757
copy_to_user_overflow();
756758
else
757759
__copy_to_user_overflow(sz, n);

arch/x86/include/asm/uaccess_32.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
3737
static __always_inline unsigned long __must_check
3838
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
3939
{
40+
check_object_size(from, n, true);
4041
return __copy_to_user_ll(to, from, n);
4142
}
4243

@@ -95,6 +96,7 @@ static __always_inline unsigned long
9596
__copy_from_user(void *to, const void __user *from, unsigned long n)
9697
{
9798
might_fault();
99+
check_object_size(to, n, false);
98100
if (__builtin_constant_p(n)) {
99101
unsigned long ret;
100102

arch/x86/include/asm/uaccess_64.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
5353
{
5454
int ret = 0;
5555

56+
check_object_size(dst, size, false);
5657
if (!__builtin_constant_p(size))
5758
return copy_user_generic(dst, (__force void *)src, size);
5859
switch (size) {
@@ -117,6 +118,7 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
117118
{
118119
int ret = 0;
119120

121+
check_object_size(src, size, true);
120122
if (!__builtin_constant_p(size))
121123
return copy_user_generic((__force void *)dst, src, size);
122124
switch (size) {

0 commit comments

Comments
 (0)