Skip to content

Commit 09fa0a8

Browse files
Ard BiesheuvelAlex Shi
authored andcommitted
arm64: kvm: deal with kernel symbols outside of linear mapping
KVM on arm64 uses a fixed offset between the linear mapping at EL1 and the HYP mapping at EL2. Before we can move the kernel virtual mapping out of the linear mapping, we have to make sure that references to kernel symbols that are accessed via the HYP mapping are translated to their linear equivalent. Reviewed-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit a0bf9776cd0be4490d4675d4108e13379849fc7f) Signed-off-by: Alex Shi <alex.shi@linaro.org> Conflicts: arch/arm64/kvm/hyp.S
1 parent 8163c58 commit 09fa0a8

5 files changed

Lines changed: 32 additions & 9 deletions

File tree

arch/arm/include/asm/kvm_asm.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,8 @@
7979
#define rr_lo_hi(a1, a2) a1, a2
8080
#endif
8181

82+
#define kvm_ksym_ref(kva) (kva)
83+
8284
#ifndef __ASSEMBLY__
8385
struct kvm;
8486
struct kvm_vcpu;

arch/arm/kvm/arm.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -979,7 +979,7 @@ static void cpu_init_hyp_mode(void *dummy)
979979
pgd_ptr = kvm_mmu_get_httbr();
980980
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
981981
hyp_stack_ptr = stack_page + PAGE_SIZE;
982-
vector_ptr = (unsigned long)__kvm_hyp_vector;
982+
vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
983983

984984
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
985985
__cpu_init_stage2();
@@ -1072,13 +1072,15 @@ static int init_hyp_mode(void)
10721072
/*
10731073
* Map the Hyp-code called directly from the host
10741074
*/
1075-
err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
1075+
err = create_hyp_mappings(kvm_ksym_ref(__kvm_hyp_code_start),
1076+
kvm_ksym_ref(__kvm_hyp_code_end));
10761077
if (err) {
10771078
kvm_err("Cannot map world-switch code\n");
10781079
goto out_free_mappings;
10791080
}
10801081

1081-
err = create_hyp_mappings(__start_rodata, __end_rodata);
1082+
err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1083+
kvm_ksym_ref(__end_rodata));
10821084
if (err) {
10831085
kvm_err("Cannot map rodata section\n");
10841086
goto out_free_mappings;

arch/arm64/include/asm/kvm_asm.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,24 @@
2626
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
2727
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
2828

29+
#define kvm_ksym_ref(sym) ((void *)&sym + kvm_ksym_shift)
30+
2931
#ifndef __ASSEMBLY__
32+
#if __GNUC__ > 4
33+
#define kvm_ksym_shift (PAGE_OFFSET - KIMAGE_VADDR)
34+
#else
35+
/*
36+
* GCC versions 4.9 and older will fold the constant below into the addend of
37+
* the reference to 'sym' above if kvm_ksym_shift is declared static or if the
38+
* constant is used directly. However, since we use the small code model for
39+
* the core kernel, the reference to 'sym' will be emitted as a adrp/add pair,
40+
* with a +/- 4 GB range, resulting in linker relocation errors if the shift
41+
* is sufficiently large. So prevent the compiler from folding the shift into
42+
* the addend, by making the shift a variable with external linkage.
43+
*/
44+
__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR;
45+
#endif
46+
3047
struct kvm;
3148
struct kvm_vcpu;
3249

arch/arm64/include/asm/kvm_host.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
301301
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
302302
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
303303

304-
u64 kvm_call_hyp(void *hypfn, ...);
304+
u64 __kvm_call_hyp(void *hypfn, ...);
305305
void force_vm_exit(const cpumask_t *mask);
306306
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
307307

@@ -322,8 +322,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
322322
* Call initialization code, and switch to the full blown
323323
* HYP code.
324324
*/
325-
kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
326-
hyp_stack_ptr, vector_ptr);
325+
__kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
326+
hyp_stack_ptr, vector_ptr);
327327
}
328328

329329
static inline void __cpu_init_stage2(void)
@@ -341,4 +341,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
341341
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
342342
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
343343

344+
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
345+
344346
#endif /* __ARM64_KVM_HOST_H__ */

arch/arm64/kvm/hyp.S

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
#include <asm/cpufeature.h>
2323

2424
/*
25-
* u64 kvm_call_hyp(void *hypfn, ...);
25+
* u64 __kvm_call_hyp(void *hypfn, ...);
2626
*
2727
* This is not really a variadic function in the classic C-way and care must
2828
* be taken when calling this to ensure parameters are passed in registers
@@ -39,12 +39,12 @@
3939
* used to implement __hyp_get_vectors in the same way as in
4040
* arch/arm64/kernel/hyp_stub.S.
4141
*/
42-
ENTRY(kvm_call_hyp)
42+
ENTRY(__kvm_call_hyp)
4343
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
4444
hvc #0
4545
ret
4646
alternative_else
4747
b __vhe_hyp_call
4848
nop
4949
alternative_endif
50-
ENDPROC(kvm_call_hyp)
50+
ENDPROC(__kvm_call_hyp)

0 commit comments

Comments
 (0)