Skip to content

Commit d0a12e9

Browse files
Ard BiesheuvelAlex Shi
authored andcommitted
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by the bootloader in the /chosen/kaslr-seed DT property. Depending on the size of the address space (VA_BITS) and the page size, the entropy in the virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all 4 levels), with the sidenote that displacements that result in the kernel image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB granule kernels, respectively) are not allowed, and will be rounded up to an acceptable value. If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is randomized independently from the core kernel. This makes it less likely that the location of core kernel data structures can be determined by an adversary, but causes all function calls from modules into the core kernel to be resolved via entries in the module PLTs. If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is randomized by choosing a page aligned 128 MB region inside the interval [_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of entropy (depending on page size), independently of the kernel randomization, but still guarantees that modules are within the range of relative branch and jump instructions (with the caveat that, since the module region is shared with other uses of the vmalloc area, modules may need to be loaded further away if the module region is exhausted) Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit f80fb3a3d50843a401dac4b566b3b131da8077a2) Signed-off-by: Alex Shi <alex.shi@linaro.org>
1 parent 89328d4 commit d0a12e9

10 files changed

Lines changed: 329 additions & 22 deletions

File tree

arch/arm64/Kconfig

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -748,6 +748,35 @@ config RELOCATABLE
748748
relocation pass at runtime even if the kernel is loaded at the
749749
same address it was linked at.
750750

751+
config RANDOMIZE_BASE
752+
bool "Randomize the address of the kernel image"
753+
select ARM64_MODULE_PLTS
754+
select RELOCATABLE
755+
help
756+
Randomizes the virtual address at which the kernel image is
757+
loaded, as a security feature that deters exploit attempts
758+
relying on knowledge of the location of kernel internals.
759+
760+
It is the bootloader's job to provide entropy, by passing a
761+
random u64 value in /chosen/kaslr-seed at kernel entry.
762+
763+
If unsure, say N.
764+
765+
config RANDOMIZE_MODULE_REGION_FULL
766+
bool "Randomize the module region independently from the core kernel"
767+
depends on RANDOMIZE_BASE
768+
default y
769+
help
770+
Randomizes the location of the module region without considering the
771+
location of the core kernel. This way, it is impossible for modules
772+
to leak information about the location of core kernel data structures
773+
but it does imply that function calls between modules and the core
774+
kernel will need to be resolved via veneers in the module PLT.
775+
776+
When this option is not set, the module region will be randomized over
777+
a limited range that contains the [_stext, _etext] interval of the
778+
core kernel, so branch relocations are always in range.
779+
751780
endmenu
752781

753782
menu "Boot options"

arch/arm64/include/asm/memory.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
#define KIMAGE_VADDR (MODULES_END)
5454
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
5555
#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
56-
#define MODULES_VSIZE (SZ_64M)
56+
#define MODULES_VSIZE (SZ_128M)
5757
#define PCI_IO_END (PAGE_OFFSET - SZ_2M)
5858
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
5959
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
@@ -139,6 +139,9 @@ extern phys_addr_t memstart_addr;
139139
/* PHYS_OFFSET - the physical address of the start of memory. */
140140
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
141141

142+
/* the virtual base of the kernel image (minus TEXT_OFFSET) */
143+
extern u64 kimage_vaddr;
144+
142145
/* the offset between the kernel virtual and physical mappings */
143146
extern u64 kimage_voffset;
144147

arch/arm64/include/asm/module.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,10 @@ struct mod_arch_specific {
3131
u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
3232
Elf64_Sym *sym);
3333

34+
#ifdef CONFIG_RANDOMIZE_BASE
35+
extern u64 module_alloc_base;
36+
#else
37+
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
38+
#endif
39+
3440
#endif /* __ASM_MODULE_H */

arch/arm64/kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ arm64-obj-$(CONFIG_PCI) += pci.o
4343
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
4444
arm64-obj-$(CONFIG_ACPI) += acpi.o
4545
arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
46+
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
4647

4748
obj-y += $(arm64-obj-y) vdso/
4849
obj-m += $(arm64-obj-m)

arch/arm64/kernel/head.S

Lines changed: 51 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ section_table:
210210
ENTRY(stext)
211211
bl preserve_boot_args
212212
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
213+
mov x23, xzr // KASLR offset, defaults to 0
213214
adrp x24, __PHYS_OFFSET
214215
bl set_cpu_boot_mode_flag
215216
bl __create_page_tables // x25=TTBR0, x26=TTBR1
@@ -313,7 +314,7 @@ ENDPROC(preserve_boot_args)
313314
__create_page_tables:
314315
adrp x25, idmap_pg_dir
315316
adrp x26, swapper_pg_dir
316-
mov x27, lr
317+
mov x28, lr
317318

318319
/*
319320
* Invalidate the idmap and swapper page tables to avoid potential
@@ -392,6 +393,7 @@ __create_page_tables:
392393
*/
393394
mov x0, x26 // swapper_pg_dir
394395
ldr x5, =KIMAGE_VADDR
396+
add x5, x5, x23 // add KASLR displacement
395397
create_pgd_entry x0, x5, x3, x6
396398
ldr w6, kernel_img_size
397399
add x6, x6, x5
@@ -408,8 +410,7 @@ __create_page_tables:
408410
dmb sy
409411
bl __inval_cache_range
410412

411-
mov lr, x27
412-
ret
413+
ret x28
413414
ENDPROC(__create_page_tables)
414415

415416
kernel_img_size:
@@ -421,6 +422,7 @@ kernel_img_size:
421422
*/
422423
.set initial_sp, init_thread_union + THREAD_START_SP
423424
__mmap_switched:
425+
mov x28, lr // preserve LR
424426
adr_l x8, vectors // load VBAR_EL1 with virtual
425427
msr vbar_el1, x8 // vector table address
426428
isb
@@ -449,19 +451,26 @@ __mmap_switched:
449451
ldr x13, [x9, #-8]
450452
cmp w12, #R_AARCH64_RELATIVE
451453
b.ne 1f
452-
str x13, [x11]
454+
add x13, x13, x23 // relocate
455+
str x13, [x11, x23]
453456
b 0b
454457

455458
1: cmp w12, #R_AARCH64_ABS64
456459
b.ne 0b
457460
add x12, x12, x12, lsl #1 // symtab offset: 24x top word
458461
add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
462+
ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
459463
ldr x15, [x12, #8] // Elf64_Sym::st_value
464+
cmp w14, #-0xf // SHN_ABS (0xfff1) ?
465+
add x14, x15, x23 // relocate
466+
csel x15, x14, x15, ne
460467
add x15, x13, x15
461-
str x15, [x11]
468+
str x15, [x11, x23]
462469
b 0b
463470

464-
2:
471+
2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr
472+
dc cvac, x8 // value visible to secondaries
473+
dsb sy // with MMU off
465474
#endif
466475

467476
adr_l sp, initial_sp, x4
@@ -470,13 +479,23 @@ __mmap_switched:
470479
msr sp_el0, x4 // Save thread_info
471480
str_l x21, __fdt_pointer, x5 // Save FDT pointer
472481

473-
ldr x4, =KIMAGE_VADDR // Save the offset between
482+
ldr_l x4, kimage_vaddr // Save the offset between
474483
sub x4, x4, x24 // the kernel virtual and
475484
str_l x4, kimage_voffset, x5 // physical mappings
476485

477486
mov x29, #0
478487
#ifdef CONFIG_KASAN
479488
bl kasan_early_init
489+
#endif
490+
#ifdef CONFIG_RANDOMIZE_BASE
491+
cbnz x23, 0f // already running randomized?
492+
mov x0, x21 // pass FDT address in x0
493+
bl kaslr_early_init // parse FDT for KASLR options
494+
cbz x0, 0f // KASLR disabled? just proceed
495+
mov x23, x0 // record KASLR offset
496+
ret x28 // we must enable KASLR, return
497+
// to __enable_mmu()
498+
0:
480499
#endif
481500
b start_kernel
482501
ENDPROC(__mmap_switched)
@@ -486,6 +505,10 @@ ENDPROC(__mmap_switched)
486505
* hotplug and needs to have the same protections as the text region
487506
*/
488507
.section ".text","ax"
508+
509+
ENTRY(kimage_vaddr)
510+
.quad _text - TEXT_OFFSET
511+
489512
/*
490513
* If we're fortunate enough to boot at EL2, ensure that the world is
491514
* sane before dropping to EL1.
@@ -651,7 +674,7 @@ ENTRY(secondary_startup)
651674
adrp x26, swapper_pg_dir
652675
bl __cpu_setup // initialise processor
653676

654-
ldr x8, =KIMAGE_VADDR
677+
ldr x8, kimage_vaddr
655678
ldr w9, 0f
656679
sub x27, x8, w9, sxtw // address to jump to after enabling the MMU
657680
b __enable_mmu
@@ -684,6 +707,7 @@ ENDPROC(__secondary_switched)
684707
*/
685708
.section ".idmap.text", "ax"
686709
__enable_mmu:
710+
mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value
687711
mrs x1, ID_AA64MMFR0_EL1
688712
ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
689713
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
@@ -701,6 +725,25 @@ __enable_mmu:
701725
ic iallu
702726
dsb nsh
703727
isb
728+
#ifdef CONFIG_RANDOMIZE_BASE
729+
mov x19, x0 // preserve new SCTLR_EL1 value
730+
blr x27
731+
732+
/*
733+
* If we return here, we have a KASLR displacement in x23 which we need
734+
* to take into account by discarding the current kernel mapping and
735+
* creating a new one.
736+
*/
737+
msr sctlr_el1, x18 // disable the MMU
738+
isb
739+
bl __create_page_tables // recreate kernel mapping
740+
741+
msr sctlr_el1, x19 // re-enable the MMU
742+
isb
743+
ic ialluis // flush instructions fetched
744+
isb // via old mapping
745+
add x27, x27, x23 // relocated __mmap_switched
746+
#endif
704747
br x27
705748
ENDPROC(__enable_mmu)
706749

arch/arm64/kernel/kaslr.c

Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
/*
2+
* Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
3+
*
4+
* This program is free software; you can redistribute it and/or modify
5+
* it under the terms of the GNU General Public License version 2 as
6+
* published by the Free Software Foundation.
7+
*/
8+
9+
#include <linux/crc32.h>
10+
#include <linux/init.h>
11+
#include <linux/libfdt.h>
12+
#include <linux/mm_types.h>
13+
#include <linux/sched.h>
14+
#include <linux/types.h>
15+
16+
#include <asm/fixmap.h>
17+
#include <asm/kernel-pgtable.h>
18+
#include <asm/memory.h>
19+
#include <asm/mmu.h>
20+
#include <asm/pgtable.h>
21+
#include <asm/sections.h>
22+
23+
u64 __read_mostly module_alloc_base;
24+
25+
static __init u64 get_kaslr_seed(void *fdt)
26+
{
27+
int node, len;
28+
u64 *prop;
29+
u64 ret;
30+
31+
node = fdt_path_offset(fdt, "/chosen");
32+
if (node < 0)
33+
return 0;
34+
35+
prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
36+
if (!prop || len != sizeof(u64))
37+
return 0;
38+
39+
ret = fdt64_to_cpu(*prop);
40+
*prop = 0;
41+
return ret;
42+
}
43+
44+
static __init const u8 *get_cmdline(void *fdt)
45+
{
46+
static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
47+
48+
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
49+
int node;
50+
const u8 *prop;
51+
52+
node = fdt_path_offset(fdt, "/chosen");
53+
if (node < 0)
54+
goto out;
55+
56+
prop = fdt_getprop(fdt, node, "bootargs", NULL);
57+
if (!prop)
58+
goto out;
59+
return prop;
60+
}
61+
out:
62+
return default_cmdline;
63+
}
64+
65+
extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
66+
pgprot_t prot);
67+
68+
/*
69+
* This routine will be executed with the kernel mapped at its default virtual
70+
* address, and if it returns successfully, the kernel will be remapped, and
71+
* start_kernel() will be executed from a randomized virtual offset. The
72+
* relocation will result in all absolute references (e.g., static variables
73+
* containing function pointers) to be reinitialized, and zero-initialized
74+
* .bss variables will be reset to 0.
75+
*/
76+
u64 __init kaslr_early_init(u64 dt_phys)
77+
{
78+
void *fdt;
79+
u64 seed, offset, mask, module_range;
80+
const u8 *cmdline, *str;
81+
int size;
82+
83+
/*
84+
* Set a reasonable default for module_alloc_base in case
85+
* we end up running with module randomization disabled.
86+
*/
87+
module_alloc_base = (u64)_etext - MODULES_VSIZE;
88+
89+
/*
90+
* Try to map the FDT early. If this fails, we simply bail,
91+
* and proceed with KASLR disabled. We will make another
92+
* attempt at mapping the FDT in setup_machine()
93+
*/
94+
early_fixmap_init();
95+
fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
96+
if (!fdt)
97+
return 0;
98+
99+
/*
100+
* Retrieve (and wipe) the seed from the FDT
101+
*/
102+
seed = get_kaslr_seed(fdt);
103+
if (!seed)
104+
return 0;
105+
106+
/*
107+
* Check if 'nokaslr' appears on the command line, and
108+
* return 0 if that is the case.
109+
*/
110+
cmdline = get_cmdline(fdt);
111+
str = strstr(cmdline, "nokaslr");
112+
if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
113+
return 0;
114+
115+
/*
116+
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
117+
* kernel image offset from the seed. Let's place the kernel in the
118+
* lower half of the VMALLOC area (VA_BITS - 2).
119+
* Even if we could randomize at page granularity for 16k and 64k pages,
120+
* let's always round to 2 MB so we don't interfere with the ability to
121+
* map using contiguous PTEs
122+
*/
123+
mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
124+
offset = seed & mask;
125+
126+
/*
127+
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
128+
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
129+
* happens, increase the KASLR offset by the size of the kernel image.
130+
*/
131+
if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
132+
(((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
133+
offset = (offset + (u64)(_end - _text)) & mask;
134+
135+
if (IS_ENABLED(CONFIG_KASAN))
136+
/*
137+
* KASAN does not expect the module region to intersect the
138+
* vmalloc region, since shadow memory is allocated for each
139+
* module at load time, whereas the vmalloc region is shadowed
140+
* by KASAN zero pages. So keep modules out of the vmalloc
141+
* region if KASAN is enabled.
142+
*/
143+
return offset;
144+
145+
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
146+
/*
147+
* Randomize the module region independently from the core
148+
* kernel. This prevents modules from leaking any information
149+
* about the address of the kernel itself, but results in
150+
* branches between modules and the core kernel that are
151+
* resolved via PLTs. (Branches between modules will be
152+
* resolved normally.)
153+
*/
154+
module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE;
155+
module_alloc_base = VMALLOC_START;
156+
} else {
157+
/*
158+
* Randomize the module region by setting module_alloc_base to
159+
* a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
160+
* _stext) . This guarantees that the resulting region still
161+
* covers [_stext, _etext], and that all relative branches can
162+
* be resolved without veneers.
163+
*/
164+
module_range = MODULES_VSIZE - (u64)(_etext - _stext);
165+
module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
166+
}
167+
168+
/* use the lower 21 bits to randomize the base of the module region */
169+
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
170+
module_alloc_base &= PAGE_MASK;
171+
172+
return offset;
173+
}

0 commit comments

Comments
 (0)