Skip to content

Commit 8be7375

Browse files
author
Ard Biesheuvel
committed
arm64: relocatable: deal with physically misaligned kernel images
When booting a relocatable kernel image, there is no practical reason to refuse an image whose load address is not exactly TEXT_OFFSET bytes above a 2 MB aligned base address, as long as the physical and virtual misalignment with respect to the swapper block size are equal, and are both aligned to THREAD_SIZE. Since the virtual misalignment is under our control when we first enter the kernel proper, we can simply choose its value to be equal to the physical misalignment. So treat the misalignment of the physical load address as the initial KASLR offset, and fix up the remaining code to deal with that. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com> (cherry picked from commit 08cdac619c81b3fa8cd73aeed2330ffe0a0b73ca) Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
1 parent f7b2bdb commit 8be7375

2 files changed

Lines changed: 9 additions & 6 deletions

File tree

arch/arm64/kernel/head.S

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
#include <linux/irqchip/arm-gic-v3.h>
2626

2727
#include <asm/assembler.h>
28+
#include <asm/boot.h>
2829
#include <asm/ptrace.h>
2930
#include <asm/asm-offsets.h>
3031
#include <asm/cache.h>
@@ -211,8 +212,8 @@ efi_header_end:
211212
ENTRY(stext)
212213
bl preserve_boot_args
213214
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
214-
mov x23, xzr // KASLR offset, defaults to 0
215215
adrp x24, __PHYS_OFFSET
216+
and x23, x24, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
216217
bl set_cpu_boot_mode_flag
217218
bl __create_page_tables // x25=TTBR0, x26=TTBR1
218219
/*
@@ -447,11 +448,13 @@ __primary_switched:
447448
bl kasan_early_init
448449
#endif
449450
#ifdef CONFIG_RANDOMIZE_BASE
450-
cbnz x23, 0f // already running randomized?
451+
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
452+
b.ne 0f
451453
mov x0, x21 // pass FDT address in x0
454+
mov x1, x23 // pass modulo offset in x1
452455
bl kaslr_early_init // parse FDT for KASLR options
453456
cbz x0, 0f // KASLR disabled? just proceed
454-
mov x23, x0 // record KASLR offset
457+
orr x23, x23, x0 // record KASLR offset
455458
ret x28 // we must enable KASLR, return
456459
// to __enable_mmu()
457460
0:

arch/arm64/kernel/kaslr.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
7474
* containing function pointers) to be reinitialized, and zero-initialized
7575
* .bss variables will be reset to 0.
7676
*/
77-
u64 __init kaslr_early_init(u64 dt_phys)
77+
u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
7878
{
7979
void *fdt;
8080
u64 seed, offset, mask, module_range;
@@ -132,8 +132,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
132132
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
133133
* happens, increase the KASLR offset by the size of the kernel image.
134134
*/
135-
if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
136-
(((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
135+
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
136+
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
137137
offset = (offset + (u64)(_end - _text)) & mask;
138138

139139
if (IS_ENABLED(CONFIG_KASAN))

0 commit comments

Comments
 (0)