Skip to content

Commit 1408a46

Browse files
author
Alex Shi
committed
Merge branch 'v4.4/topic/mm-kaslr' into linux-linaro-lsk-v4.4
2 parents 96597f6 + 0f0c7c1 commit 1408a46

9 files changed

Lines changed: 56 additions & 44 deletions

File tree

arch/arm64/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -775,7 +775,7 @@ config RELOCATABLE
775775

776776
config RANDOMIZE_BASE
777777
bool "Randomize the address of the kernel image"
778-
select ARM64_MODULE_PLTS
778+
select ARM64_MODULE_PLTS if MODULES
779779
select RELOCATABLE
780780
help
781781
Randomizes the virtual address at which the kernel image is

arch/arm64/Kconfig.debug

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,13 +64,13 @@ config DEBUG_SET_MODULE_RONX
6464

6565
config DEBUG_RODATA
6666
bool "Make kernel text and rodata read-only"
67+
default y
6768
help
6869
If this is set, kernel text and rodata will be made read-only. This
6970
is to help catch accidental or malicious attempts to change the
70-
kernel's executable code. Additionally splits rodata from kernel
71-
text so it can be made explicitly non-executable.
71+
kernel's executable code.
7272

73-
If in doubt, say Y
73+
If in doubt, say Y
7474

7575
config DEBUG_ALIGN_RODATA
7676
depends on DEBUG_RODATA

arch/arm64/Makefile

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o
6060

6161
# The byte offset of the kernel image in RAM from the start of RAM.
6262
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
63-
TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
63+
TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
64+
int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
65+
rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
6466
else
6567
TEXT_OFFSET := 0x00080000
6668
endif

arch/arm64/include/asm/module.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#define __ASM_MODULE_H
1818

1919
#include <asm-generic/module.h>
20+
#include <asm/memory.h>
2021

2122
#define MODULE_ARCH_VERMAGIC "aarch64"
2223

@@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
3233
Elf64_Sym *sym);
3334

3435
#ifdef CONFIG_RANDOMIZE_BASE
36+
#ifdef CONFIG_MODVERSIONS
37+
#define ARCH_RELOCATES_KCRCTAB
38+
#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
39+
#endif
3540
extern u64 module_alloc_base;
3641
#else
3742
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)

arch/arm64/include/asm/spinlock.h

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,29 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
3131
unsigned int tmp;
3232
arch_spinlock_t lockval;
3333

34+
/*
35+
* Ensure prior spin_lock operations to other locks have completed
36+
* on this CPU before we test whether "lock" is locked.
37+
*/
38+
smp_mb();
39+
3440
asm volatile(
3541
" sevl\n"
3642
"1: wfe\n"
3743
"2: ldaxr %w0, %2\n"
3844
" eor %w1, %w0, %w0, ror #16\n"
3945
" cbnz %w1, 1b\n"
46+
/* Serialise against any concurrent lockers */
4047
ARM64_LSE_ATOMIC_INSN(
4148
/* LL/SC */
4249
" stxr %w1, %w0, %2\n"
43-
" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
44-
/* LSE atomics */
4550
" nop\n"
46-
" nop\n")
51+
" nop\n",
52+
/* LSE atomics */
53+
" mov %w1, %w0\n"
54+
" cas %w0, %w0, %2\n"
55+
" eor %w1, %w1, %w0\n")
56+
" cbnz %w1, 2b\n"
4757
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
4858
:
4959
: "memory");
@@ -148,6 +158,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
148158

149159
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
150160
{
161+
smp_mb(); /* See arch_spin_unlock_wait */
151162
return !arch_spin_value_unlocked(READ_ONCE(*lock));
152163
}
153164

arch/arm64/kernel/head.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -695,6 +695,9 @@ ENTRY(__enable_mmu)
695695
isb
696696
bl __create_page_tables // recreate kernel mapping
697697

698+
tlbi vmalle1 // Remove any stale TLB entries
699+
dsb nsh
700+
698701
msr sctlr_el1, x19 // re-enable the MMU
699702
isb
700703
ic iallu // flush instructions fetched

arch/arm64/kernel/stacktrace.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,9 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
4343
unsigned long fp = frame->fp;
4444
unsigned long irq_stack_ptr;
4545

46+
if (!tsk)
47+
tsk = current;
48+
4649
/*
4750
* Switching between stacks is valid when tracing current and in
4851
* non-preemptible context.
@@ -67,7 +70,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
6770
frame->pc = *(unsigned long *)(fp + 8);
6871

6972
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
70-
if (tsk && tsk->ret_stack &&
73+
if (tsk->ret_stack &&
7174
(frame->pc == (unsigned long)return_to_handler)) {
7275
/*
7376
* This is a case where function graph tracer has

arch/arm64/kernel/traps.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
6464

6565
/*
6666
* We need to switch to kernel mode so that we can use __get_user
67-
* to safely read from kernel space. Note that we now dump the
68-
* code first, just in case the backtrace kills us.
67+
* to safely read from kernel space.
6968
*/
7069
fs = get_fs();
7170
set_fs(KERNEL_DS);
@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
111110
print_ip_sym(where);
112111
}
113112

114-
static void dump_instr(const char *lvl, struct pt_regs *regs)
113+
static void __dump_instr(const char *lvl, struct pt_regs *regs)
115114
{
116115
unsigned long addr = instruction_pointer(regs);
117-
mm_segment_t fs;
118116
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
119117
int i;
120118

121-
/*
122-
* We need to switch to kernel mode so that we can use __get_user
123-
* to safely read from kernel space. Note that we now dump the
124-
* code first, just in case the backtrace kills us.
125-
*/
126-
fs = get_fs();
127-
set_fs(KERNEL_DS);
128-
129119
for (i = -4; i < 1; i++) {
130120
unsigned int val, bad;
131121

@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
139129
}
140130
}
141131
printk("%sCode: %s\n", lvl, str);
132+
}
142133

143-
set_fs(fs);
134+
static void dump_instr(const char *lvl, struct pt_regs *regs)
135+
{
136+
if (!user_mode(regs)) {
137+
mm_segment_t fs = get_fs();
138+
set_fs(KERNEL_DS);
139+
__dump_instr(lvl, regs);
140+
set_fs(fs);
141+
} else {
142+
__dump_instr(lvl, regs);
143+
}
144144
}
145145

146146
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
@@ -149,6 +149,11 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
149149
unsigned long irq_stack_ptr;
150150
int skip;
151151

152+
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
153+
154+
if (!tsk)
155+
tsk = current;
156+
152157
/*
153158
* Switching between stacks is valid when tracing current and in
154159
* non-preemptible context.
@@ -158,11 +163,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
158163
else
159164
irq_stack_ptr = 0;
160165

161-
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
162-
163-
if (!tsk)
164-
tsk = current;
165-
166166
if (tsk == current) {
167167
frame.fp = (unsigned long)__builtin_frame_address(0);
168168
frame.sp = current_stack_pointer;

arch/arm64/mm/hugetlbpage.c

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -51,20 +51,8 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
5151
*pgsize = PAGE_SIZE;
5252
if (!pte_cont(pte))
5353
return 1;
54-
if (!pgd_present(*pgd)) {
55-
VM_BUG_ON(!pgd_present(*pgd));
56-
return 1;
57-
}
5854
pud = pud_offset(pgd, addr);
59-
if (!pud_present(*pud)) {
60-
VM_BUG_ON(!pud_present(*pud));
61-
return 1;
62-
}
6355
pmd = pmd_offset(pud, addr);
64-
if (!pmd_present(*pmd)) {
65-
VM_BUG_ON(!pmd_present(*pmd));
66-
return 1;
67-
}
6856
if ((pte_t *)pmd == ptep) {
6957
*pgsize = PMD_SIZE;
7058
return CONT_PMDS;
@@ -212,7 +200,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
212200
ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
213201
/* save the 1st pte to return */
214202
pte = ptep_get_and_clear(mm, addr, cpte);
215-
for (i = 1; i < ncontig; ++i) {
203+
for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
216204
/*
217205
* If HW_AFDBM is enabled, then the HW could
218206
* turn on the dirty bit for any of the page
@@ -250,8 +238,8 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
250238
pfn = pte_pfn(*cpte);
251239
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
252240
*cpte, &pgsize);
253-
for (i = 0; i < ncontig; ++i, ++cpte) {
254-
changed = ptep_set_access_flags(vma, addr, cpte,
241+
for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
242+
changed |= ptep_set_access_flags(vma, addr, cpte,
255243
pfn_pte(pfn,
256244
hugeprot),
257245
dirty);
@@ -273,7 +261,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
273261

274262
cpte = huge_pte_offset(mm, addr);
275263
ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
276-
for (i = 0; i < ncontig; ++i, ++cpte)
264+
for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
277265
ptep_set_wrprotect(mm, addr, cpte);
278266
} else {
279267
ptep_set_wrprotect(mm, addr, ptep);
@@ -291,7 +279,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
291279
cpte = huge_pte_offset(vma->vm_mm, addr);
292280
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
293281
*cpte, &pgsize);
294-
for (i = 0; i < ncontig; ++i, ++cpte)
282+
for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
295283
ptep_clear_flush(vma, addr, cpte);
296284
} else {
297285
ptep_clear_flush(vma, addr, ptep);

0 commit comments

Comments
 (0)