Skip to content

Commit ac248a8

Browse files
committed
Merge remote-tracking branch 'lsk/v4.4/topic/mm-kaslr' into linux-linaro-lsk-v4.4
2 parents bd06e78 + 5808d4c commit ac248a8

10 files changed

Lines changed: 168 additions & 114 deletions

File tree

arch/arm64/Kconfig.debug

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ config DEBUG_RODATA
7373
If in doubt, say Y
7474

7575
config DEBUG_ALIGN_RODATA
76-
depends on DEBUG_RODATA && ARM64_4K_PAGES
76+
depends on DEBUG_RODATA
7777
bool "Align linker sections up to SECTION_SIZE"
7878
help
7979
If this option is enabled, sections that may potentially be marked as

arch/arm64/include/asm/assembler.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,4 +233,24 @@ lr .req x30 // link register
233233
.long \sym\()_hi32
234234
.endm
235235

236+
/*
237+
* mov_q - move an immediate constant into a 64-bit register using
238+
* between 2 and 4 movz/movk instructions (depending on the
239+
* magnitude and sign of the operand)
240+
*/
241+
.macro mov_q, reg, val
242+
.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
243+
movz \reg, :abs_g1_s:\val
244+
.else
245+
.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
246+
movz \reg, :abs_g2_s:\val
247+
.else
248+
movz \reg, :abs_g3:\val
249+
movk \reg, :abs_g2_nc:\val
250+
.endif
251+
movk \reg, :abs_g1_nc:\val
252+
.endif
253+
movk \reg, :abs_g0_nc:\val
254+
.endm
255+
236256
#endif /* __ASM_ASSEMBLER_H */

arch/arm64/kernel/efi-entry.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ ENTRY(entry)
6161
*/
6262
mov x20, x0 // DTB address
6363
ldr x0, [sp, #16] // relocated _text address
64-
movz x21, #:abs_g0:stext_offset
64+
ldr w21, =stext_offset
6565
add x21, x0, x21
6666

6767
/*

arch/arm64/kernel/head.S

Lines changed: 84 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
#include <linux/irqchip/arm-gic-v3.h>
2626

2727
#include <asm/assembler.h>
28+
#include <asm/boot.h>
2829
#include <asm/ptrace.h>
2930
#include <asm/asm-offsets.h>
3031
#include <asm/cache.h>
@@ -100,8 +101,6 @@ _head:
100101
#endif
101102

102103
#ifdef CONFIG_EFI
103-
.globl __efistub_stext_offset
104-
.set __efistub_stext_offset, stext - _head
105104
.align 3
106105
pe_header:
107106
.ascii "PE"
@@ -121,11 +120,11 @@ optional_header:
121120
.short 0x20b // PE32+ format
122121
.byte 0x02 // MajorLinkerVersion
123122
.byte 0x14 // MinorLinkerVersion
124-
.long _end - stext // SizeOfCode
123+
.long _end - efi_header_end // SizeOfCode
125124
.long 0 // SizeOfInitializedData
126125
.long 0 // SizeOfUninitializedData
127126
.long __efistub_entry - _head // AddressOfEntryPoint
128-
.long __efistub_stext_offset // BaseOfCode
127+
.long efi_header_end - _head // BaseOfCode
129128

130129
extra_header_fields:
131130
.quad 0 // ImageBase
@@ -142,7 +141,7 @@ extra_header_fields:
142141
.long _end - _head // SizeOfImage
143142

144143
// Everything before the kernel image is considered part of the header
145-
.long __efistub_stext_offset // SizeOfHeaders
144+
.long efi_header_end - _head // SizeOfHeaders
146145
.long 0 // CheckSum
147146
.short 0xa // Subsystem (EFI application)
148147
.short 0 // DllCharacteristics
@@ -186,10 +185,10 @@ section_table:
186185
.byte 0
187186
.byte 0
188187
.byte 0 // end of 0 padding of section name
189-
.long _end - stext // VirtualSize
190-
.long __efistub_stext_offset // VirtualAddress
191-
.long _edata - stext // SizeOfRawData
192-
.long __efistub_stext_offset // PointerToRawData
188+
.long _end - efi_header_end // VirtualSize
189+
.long efi_header_end - _head // VirtualAddress
190+
.long _edata - efi_header_end // SizeOfRawData
191+
.long efi_header_end - _head // PointerToRawData
193192

194193
.long 0 // PointerToRelocations (0 for executables)
195194
.long 0 // PointerToLineNumbers (0 for executables)
@@ -198,20 +197,23 @@ section_table:
198197
.long 0xe0500020 // Characteristics (section flags)
199198

200199
/*
201-
* EFI will load stext onwards at the 4k section alignment
200+
* EFI will load .text onwards at the 4k section alignment
202201
* described in the PE/COFF header. To ensure that instruction
203202
* sequences using an adrp and a :lo12: immediate will function
204-
* correctly at this alignment, we must ensure that stext is
203+
* correctly at this alignment, we must ensure that .text is
205204
* placed at a 4k boundary in the Image to begin with.
206205
*/
207206
.align 12
207+
efi_header_end:
208208
#endif
209209

210+
__INIT
211+
210212
ENTRY(stext)
211213
bl preserve_boot_args
212214
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
213-
mov x23, xzr // KASLR offset, defaults to 0
214215
adrp x24, __PHYS_OFFSET
216+
and x23, x24, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
215217
bl set_cpu_boot_mode_flag
216218
bl __create_page_tables // x25=TTBR0, x26=TTBR1
217219
/*
@@ -220,13 +222,11 @@ ENTRY(stext)
220222
* On return, the CPU will be ready for the MMU to be turned on and
221223
* the TCR will have been set.
222224
*/
223-
ldr x27, 0f // address to jump to after
225+
bl __cpu_setup // initialise processor
226+
adr_l x27, __primary_switch // address to jump to after
224227
// MMU has been enabled
225-
adr_l lr, __enable_mmu // return (PIC) address
226-
b __cpu_setup // initialise processor
228+
b __enable_mmu
227229
ENDPROC(stext)
228-
.align 3
229-
0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR
230230

231231
/*
232232
* Preserve the arguments passed by the bootloader in x0 .. x3
@@ -336,7 +336,7 @@ __create_page_tables:
336336
cmp x0, x6
337337
b.lo 1b
338338

339-
ldr x7, =SWAPPER_MM_MMUFLAGS
339+
mov x7, SWAPPER_MM_MMUFLAGS
340340

341341
/*
342342
* Create the identity mapping.
@@ -392,12 +392,13 @@ __create_page_tables:
392392
* Map the kernel image (starting with PHYS_OFFSET).
393393
*/
394394
mov x0, x26 // swapper_pg_dir
395-
ldr x5, =KIMAGE_VADDR
395+
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
396396
add x5, x5, x23 // add KASLR displacement
397397
create_pgd_entry x0, x5, x3, x6
398-
ldr w6, kernel_img_size
399-
add x6, x6, x5
400-
mov x3, x24 // phys offset
398+
adrp x6, _end // runtime __pa(_end)
399+
adrp x3, _text // runtime __pa(_text)
400+
sub x6, x6, x3 // _end - _text
401+
add x6, x6, x5 // runtime __va(_end)
401402
create_block_map x0, x7, x3, x5, x6
402403

403404
/*
@@ -412,16 +413,13 @@ __create_page_tables:
412413

413414
ret x28
414415
ENDPROC(__create_page_tables)
415-
416-
kernel_img_size:
417-
.long _end - (_head - TEXT_OFFSET)
418416
.ltorg
419417

420418
/*
421419
* The following fragment of code is executed with the MMU enabled.
422420
*/
423421
.set initial_sp, init_thread_union + THREAD_START_SP
424-
__mmap_switched:
422+
__primary_switched:
425423
mov x28, lr // preserve LR
426424
adr_l x8, vectors // load VBAR_EL1 with virtual
427425
msr vbar_el1, x8 // vector table address
@@ -435,44 +433,6 @@ __mmap_switched:
435433
bl __pi_memset
436434
dsb ishst // Make zero page visible to PTW
437435

438-
#ifdef CONFIG_RELOCATABLE
439-
440-
/*
441-
* Iterate over each entry in the relocation table, and apply the
442-
* relocations in place.
443-
*/
444-
adr_l x8, __dynsym_start // start of symbol table
445-
adr_l x9, __reloc_start // start of reloc table
446-
adr_l x10, __reloc_end // end of reloc table
447-
448-
0: cmp x9, x10
449-
b.hs 2f
450-
ldp x11, x12, [x9], #24
451-
ldr x13, [x9, #-8]
452-
cmp w12, #R_AARCH64_RELATIVE
453-
b.ne 1f
454-
add x13, x13, x23 // relocate
455-
str x13, [x11, x23]
456-
b 0b
457-
458-
1: cmp w12, #R_AARCH64_ABS64
459-
b.ne 0b
460-
add x12, x12, x12, lsl #1 // symtab offset: 24x top word
461-
add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
462-
ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
463-
ldr x15, [x12, #8] // Elf64_Sym::st_value
464-
cmp w14, #-0xf // SHN_ABS (0xfff1) ?
465-
add x14, x15, x23 // relocate
466-
csel x15, x14, x15, ne
467-
add x15, x13, x15
468-
str x15, [x11, x23]
469-
b 0b
470-
471-
2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr
472-
dc cvac, x8 // value visible to secondaries
473-
dsb sy // with MMU off
474-
#endif
475-
476436
adr_l sp, initial_sp, x4
477437
mov x4, sp
478438
and x4, x4, #~(THREAD_SIZE - 1)
@@ -488,17 +448,19 @@ __mmap_switched:
488448
bl kasan_early_init
489449
#endif
490450
#ifdef CONFIG_RANDOMIZE_BASE
491-
cbnz x23, 0f // already running randomized?
451+
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
452+
b.ne 0f
492453
mov x0, x21 // pass FDT address in x0
454+
mov x1, x23 // pass modulo offset in x1
493455
bl kaslr_early_init // parse FDT for KASLR options
494456
cbz x0, 0f // KASLR disabled? just proceed
495-
mov x23, x0 // record KASLR offset
457+
orr x23, x23, x0 // record KASLR offset
496458
ret x28 // we must enable KASLR, return
497459
// to __enable_mmu()
498460
0:
499461
#endif
500462
b start_kernel
501-
ENDPROC(__mmap_switched)
463+
ENDPROC(__primary_switched)
502464

503465
/*
504466
* end early head section, begin head code that is also used for
@@ -613,7 +575,7 @@ ENDPROC(el2_setup)
613575
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
614576
* in x20. See arch/arm64/include/asm/virt.h for more info.
615577
*/
616-
ENTRY(set_cpu_boot_mode_flag)
578+
set_cpu_boot_mode_flag:
617579
adr_l x1, __boot_cpu_mode
618580
cmp w20, #BOOT_CPU_MODE_EL2
619581
b.ne 1f
@@ -646,7 +608,7 @@ ENTRY(secondary_holding_pen)
646608
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
647609
bl set_cpu_boot_mode_flag
648610
mrs x0, mpidr_el1
649-
ldr x1, =MPIDR_HWID_BITMASK
611+
mov_q x1, MPIDR_HWID_BITMASK
650612
and x0, x0, x1
651613
adr_l x3, secondary_holding_pen_release
652614
pen: ldr x4, [x3]
@@ -666,22 +628,19 @@ ENTRY(secondary_entry)
666628
b secondary_startup
667629
ENDPROC(secondary_entry)
668630

669-
ENTRY(secondary_startup)
631+
secondary_startup:
670632
/*
671633
* Common entry point for secondary CPUs.
672634
*/
673635
adrp x25, idmap_pg_dir
674636
adrp x26, swapper_pg_dir
675637
bl __cpu_setup // initialise processor
676638

677-
ldr x8, kimage_vaddr
678-
ldr w9, 0f
679-
sub x27, x8, w9, sxtw // address to jump to after enabling the MMU
639+
adr_l x27, __secondary_switch // address to jump to after enabling the MMU
680640
b __enable_mmu
681641
ENDPROC(secondary_startup)
682-
0: .long (_text - TEXT_OFFSET) - __secondary_switched
683642

684-
ENTRY(__secondary_switched)
643+
__secondary_switched:
685644
adr_l x5, vectors
686645
msr vbar_el1, x5
687646
isb
@@ -743,7 +702,6 @@ __enable_mmu:
743702
ic iallu // flush instructions fetched
744703
dsb nsh // via old mapping
745704
isb
746-
add x27, x27, x23 // relocated __mmap_switched
747705
#endif
748706
br x27
749707
ENDPROC(__enable_mmu)
@@ -752,3 +710,53 @@ __no_granule_support:
752710
wfe
753711
b __no_granule_support
754712
ENDPROC(__no_granule_support)
713+
714+
__primary_switch:
715+
#ifdef CONFIG_RELOCATABLE
716+
/*
717+
* Iterate over each entry in the relocation table, and apply the
718+
* relocations in place.
719+
*/
720+
ldr w8, =__dynsym_offset // offset to symbol table
721+
ldr w9, =__rela_offset // offset to reloc table
722+
ldr w10, =__rela_size // size of reloc table
723+
724+
mov_q x11, KIMAGE_VADDR // default virtual offset
725+
add x11, x11, x23 // actual virtual offset
726+
add x8, x8, x11 // __va(.dynsym)
727+
add x9, x9, x11 // __va(.rela)
728+
add x10, x9, x10 // __va(.rela) + sizeof(.rela)
729+
730+
0: cmp x9, x10
731+
b.hs 2f
732+
ldp x11, x12, [x9], #24
733+
ldr x13, [x9, #-8]
734+
cmp w12, #R_AARCH64_RELATIVE
735+
b.ne 1f
736+
add x13, x13, x23 // relocate
737+
str x13, [x11, x23]
738+
b 0b
739+
740+
1: cmp w12, #R_AARCH64_ABS64
741+
b.ne 0b
742+
add x12, x12, x12, lsl #1 // symtab offset: 24x top word
743+
add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
744+
ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
745+
ldr x15, [x12, #8] // Elf64_Sym::st_value
746+
cmp w14, #-0xf // SHN_ABS (0xfff1) ?
747+
add x14, x15, x23 // relocate
748+
csel x15, x14, x15, ne
749+
add x15, x13, x15
750+
str x15, [x11, x23]
751+
b 0b
752+
753+
2:
754+
#endif
755+
ldr x8, =__primary_switched
756+
br x8
757+
ENDPROC(__primary_switch)
758+
759+
__secondary_switch:
760+
ldr x8, =__secondary_switched
761+
br x8
762+
ENDPROC(__secondary_switch)

arch/arm64/kernel/image.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,8 @@
7373

7474
#ifdef CONFIG_EFI
7575

76+
__efistub_stext_offset = stext - _text;
77+
7678
/*
7779
* Prevent the symbol aliases below from being emitted into the kallsyms
7880
* table, by forcing them to be absolute symbols (which are conveniently

arch/arm64/kernel/kaslr.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
7474
* containing function pointers) to be reinitialized, and zero-initialized
7575
* .bss variables will be reset to 0.
7676
*/
77-
u64 __init kaslr_early_init(u64 dt_phys)
77+
u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
7878
{
7979
void *fdt;
8080
u64 seed, offset, mask, module_range;
@@ -132,8 +132,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
132132
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
133133
* happens, increase the KASLR offset by the size of the kernel image.
134134
*/
135-
if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
136-
(((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
135+
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
136+
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
137137
offset = (offset + (u64)(_end - _text)) & mask;
138138

139139
if (IS_ENABLED(CONFIG_KASAN))

0 commit comments

Comments
 (0)