Skip to content

Commit 9a6d5a0

Browse files
torvaldsAlex Shi
authored andcommitted
x86: fix SMAP in 32-bit environments
In commit 11f1a4b9755f ("x86: reorganize SMAP handling in user space accesses") I changed how the stac/clac instructions were generated around the user space accesses, which then made it possible to do batched accesses efficiently for user string copies etc. However, in doing so, I completely spaced out, and didn't even think about the 32-bit case. And nobody really even seemed to notice, because SMAP doesn't even exist until modern Skylake processors, and you'd have to be crazy to run 32-bit kernels on a modern CPU. Which brings us to Andy Lutomirski. He actually tested the 32-bit kernel on new hardware, and noticed that it doesn't work. My bad. The trivial fix is to add the required uaccess begin/end markers around the raw accesses in <asm/uaccess_32.h>. I feel a bit bad about this patch, just because that header file really should be cleaned up to avoid all the duplicated code in it, and this commit just expands on the problem. But this just fixes the bug without any bigger cleanup surgery. Reported-and-tested-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> (cherry picked from commit de9e478b9d49f3a0214310d921450cf5bb4a21e6) Signed-off-by: Alex Shi <alex.shi@linaro.org>
1 parent ea2e77f commit 9a6d5a0

1 file changed

Lines changed: 26 additions & 0 deletions

File tree

arch/x86/include/asm/uaccess_32.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,20 +48,28 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
4848

4949
switch (n) {
5050
case 1:
51+
__uaccess_begin();
5152
__put_user_size(*(u8 *)from, (u8 __user *)to,
5253
1, ret, 1);
54+
__uaccess_end();
5355
return ret;
5456
case 2:
57+
__uaccess_begin();
5558
__put_user_size(*(u16 *)from, (u16 __user *)to,
5659
2, ret, 2);
60+
__uaccess_end();
5761
return ret;
5862
case 4:
63+
__uaccess_begin();
5964
__put_user_size(*(u32 *)from, (u32 __user *)to,
6065
4, ret, 4);
66+
__uaccess_end();
6167
return ret;
6268
case 8:
69+
__uaccess_begin();
6370
__put_user_size(*(u64 *)from, (u64 __user *)to,
6471
8, ret, 8);
72+
__uaccess_end();
6573
return ret;
6674
}
6775
}
@@ -103,13 +111,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
103111

104112
switch (n) {
105113
case 1:
114+
__uaccess_begin();
106115
__get_user_size(*(u8 *)to, from, 1, ret, 1);
116+
__uaccess_end();
107117
return ret;
108118
case 2:
119+
__uaccess_begin();
109120
__get_user_size(*(u16 *)to, from, 2, ret, 2);
121+
__uaccess_end();
110122
return ret;
111123
case 4:
124+
__uaccess_begin();
112125
__get_user_size(*(u32 *)to, from, 4, ret, 4);
126+
__uaccess_end();
113127
return ret;
114128
}
115129
}
@@ -148,13 +162,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
148162

149163
switch (n) {
150164
case 1:
165+
__uaccess_begin();
151166
__get_user_size(*(u8 *)to, from, 1, ret, 1);
167+
__uaccess_end();
152168
return ret;
153169
case 2:
170+
__uaccess_begin();
154171
__get_user_size(*(u16 *)to, from, 2, ret, 2);
172+
__uaccess_end();
155173
return ret;
156174
case 4:
175+
__uaccess_begin();
157176
__get_user_size(*(u32 *)to, from, 4, ret, 4);
177+
__uaccess_end();
158178
return ret;
159179
}
160180
}
@@ -170,13 +190,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
170190

171191
switch (n) {
172192
case 1:
193+
__uaccess_begin();
173194
__get_user_size(*(u8 *)to, from, 1, ret, 1);
195+
__uaccess_end();
174196
return ret;
175197
case 2:
198+
__uaccess_begin();
176199
__get_user_size(*(u16 *)to, from, 2, ret, 2);
200+
__uaccess_end();
177201
return ret;
178202
case 4:
203+
__uaccess_begin();
179204
__get_user_size(*(u32 *)to, from, 4, ret, 4);
205+
__uaccess_end();
180206
return ret;
181207
}
182208
}

0 commit comments

Comments
 (0)