Skip to content

Commit 4cf193b

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "Bug fixes for all architectures. Nothing really stands out" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (21 commits) KVM: nVMX: remove incorrect vpid check in nested invvpid emulation arm64: kvm: report original PAR_EL1 upon panic arm64: kvm: avoid %p in __kvm_hyp_panic KVM: arm/arm64: vgic: Trust the LR state for HW IRQs KVM: arm/arm64: arch_timer: Preserve physical dist. active state on LR.active KVM: arm/arm64: Fix preemptible timer active state crazyness arm64: KVM: Add workaround for Cortex-A57 erratum 834220 arm64: KVM: Fix AArch32 to AArch64 register mapping ARM/arm64: KVM: test properly for a PTE's uncachedness KVM: s390: fix wrong lookup of VCPUs by array index KVM: s390: avoid memory overwrites on emergency signal injection KVM: Provide function for VCPU lookup by id KVM: s390: fix pfmf intercept handler KVM: s390: enable SIMD only when no VCPUs were created KVM: x86: request interrupt window when IRQ chip is split KVM: x86: set KVM_REQ_EVENT on local interrupt request from user space KVM: x86: split kvm_vcpu_ready_for_interrupt_injection out of dm_request_for_irq_injection KVM: x86: fix interrupt window handling in split IRQ chip case MIPS: KVM: Uninit VCPU in vcpu_create error path MIPS: KVM: Fix CACHE immediate offset sign extension ...
2 parents 6ffeba9 + b2467e7 commit 4cf193b

21 files changed

Lines changed: 171 additions & 111 deletions

File tree

arch/arm/kvm/arm.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -563,18 +563,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
563563
if (vcpu->arch.power_off || vcpu->arch.pause)
564564
vcpu_sleep(vcpu);
565565

566-
/*
567-
* Disarming the background timer must be done in a
568-
* preemptible context, as this call may sleep.
569-
*/
570-
kvm_timer_flush_hwstate(vcpu);
571-
572566
/*
573567
* Preparing the interrupts to be injected also
574568
* involves poking the GIC, which must be done in a
575569
* non-preemptible context.
576570
*/
577571
preempt_disable();
572+
kvm_timer_flush_hwstate(vcpu);
578573
kvm_vgic_flush_hwstate(vcpu);
579574

580575
local_irq_disable();

arch/arm/kvm/mmu.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
9898
__kvm_flush_dcache_pud(pud);
9999
}
100100

101+
static bool kvm_is_device_pfn(unsigned long pfn)
102+
{
103+
return !pfn_valid(pfn);
104+
}
105+
101106
/**
102107
* stage2_dissolve_pmd() - clear and flush huge PMD entry
103108
* @kvm: pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
213218
kvm_tlb_flush_vmid_ipa(kvm, addr);
214219

215220
/* No need to invalidate the cache for device mappings */
216-
if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
221+
if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
217222
kvm_flush_dcache_pte(old_pte);
218223

219224
put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
305310

306311
pte = pte_offset_kernel(pmd, addr);
307312
do {
308-
if (!pte_none(*pte) &&
309-
(pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
313+
if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
310314
kvm_flush_dcache_pte(*pte);
311315
} while (pte++, addr += PAGE_SIZE, addr != end);
312316
}
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
10371041
return kvm_vcpu_dabt_iswrite(vcpu);
10381042
}
10391043

1040-
static bool kvm_is_device_pfn(unsigned long pfn)
1041-
{
1042-
return !pfn_valid(pfn);
1043-
}
1044-
10451044
/**
10461045
* stage2_wp_ptes - write protect PMD range
10471046
* @pmd: pointer to pmd entry

arch/arm64/Kconfig

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
316316

317317
If unsure, say Y.
318318

319+
config ARM64_ERRATUM_834220
320+
bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
321+
depends on KVM
322+
default y
323+
help
324+
This option adds an alternative code sequence to work around ARM
325+
erratum 834220 on Cortex-A57 parts up to r1p2.
326+
327+
Affected Cortex-A57 parts might report a Stage 2 translation
328+
fault as the result of a Stage 1 fault for load crossing a
329+
page boundary when there is a permission or device memory
330+
alignment fault at Stage 1 and a translation fault at Stage 2.
331+
332+
The workaround is to verify that the Stage 1 translation
333+
doesn't generate a fault before handling the Stage 2 fault.
334+
Please note that this does not necessarily enable the workaround,
335+
as it depends on the alternative framework, which will only patch
336+
the kernel if an affected CPU is detected.
337+
338+
If unsure, say Y.
339+
319340
config ARM64_ERRATUM_845719
320341
bool "Cortex-A53: 845719: a load might read incorrect data"
321342
depends on COMPAT

arch/arm64/include/asm/cpufeature.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,9 @@
2929
#define ARM64_HAS_PAN 4
3030
#define ARM64_HAS_LSE_ATOMICS 5
3131
#define ARM64_WORKAROUND_CAVIUM_23154 6
32+
#define ARM64_WORKAROUND_834220 7
3233

33-
#define ARM64_NCAPS 7
34+
#define ARM64_NCAPS 8
3435

3536
#ifndef __ASSEMBLY__
3637

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
9999
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
100100
}
101101

102+
/*
103+
* vcpu_reg should always be passed a register number coming from a
104+
* read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
105+
* with banked registers.
106+
*/
102107
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
103108
{
104-
if (vcpu_mode_is_32bit(vcpu))
105-
return vcpu_reg32(vcpu, reg_num);
106-
107109
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
108110
}
109111

arch/arm64/kernel/cpu_errata.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
7575
(1 << MIDR_VARIANT_SHIFT) | 2),
7676
},
7777
#endif
78+
#ifdef CONFIG_ARM64_ERRATUM_834220
79+
{
80+
/* Cortex-A57 r0p0 - r1p2 */
81+
.desc = "ARM erratum 834220",
82+
.capability = ARM64_WORKAROUND_834220,
83+
MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
84+
(1 << MIDR_VARIANT_SHIFT) | 2),
85+
},
86+
#endif
7887
#ifdef CONFIG_ARM64_ERRATUM_845719
7988
{
8089
/* Cortex-A53 r0p[01234] */

arch/arm64/kvm/hyp.S

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
864864
ENDPROC(__kvm_flush_vm_context)
865865

866866
__kvm_hyp_panic:
867+
// Stash PAR_EL1 before corrupting it in __restore_sysregs
868+
mrs x0, par_el1
869+
push x0, xzr
870+
867871
// Guess the context by looking at VTTBR:
868872
// If zero, then we're already a host.
869873
// Otherwise restore a minimal host context before panicing.
@@ -898,7 +902,7 @@ __kvm_hyp_panic:
898902
mrs x3, esr_el2
899903
mrs x4, far_el2
900904
mrs x5, hpfar_el2
901-
mrs x6, par_el1
905+
pop x6, xzr // active context PAR_EL1
902906
mrs x7, tpidr_el2
903907

904908
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -914,7 +918,7 @@ __kvm_hyp_panic:
914918
ENDPROC(__kvm_hyp_panic)
915919

916920
__hyp_panic_str:
917-
.ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
921+
.ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
918922

919923
.align 2
920924

@@ -1015,9 +1019,15 @@ el1_trap:
10151019
b.ne 1f // Not an abort we care about
10161020

10171021
/* This is an abort. Check for permission fault */
1022+
alternative_if_not ARM64_WORKAROUND_834220
10181023
and x2, x1, #ESR_ELx_FSC_TYPE
10191024
cmp x2, #FSC_PERM
10201025
b.ne 1f // Not a permission fault
1026+
alternative_else
1027+
nop // Use the permission fault path to
1028+
nop // check for a valid S1 translation,
1029+
nop // regardless of the ESR value.
1030+
alternative_endif
10211031

10221032
/*
10231033
* Check for Stage-1 page table walk, which is guaranteed

arch/arm64/kvm/inject_fault.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
4848

4949
/* Note: These now point to the banked copies */
5050
*vcpu_spsr(vcpu) = new_spsr_value;
51-
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
51+
*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
5252

5353
/* Branch to exception vector */
5454
if (sctlr & (1 << 13))

arch/mips/kvm/emulate.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
15811581

15821582
base = (inst >> 21) & 0x1f;
15831583
op_inst = (inst >> 16) & 0x1f;
1584-
offset = inst & 0xffff;
1584+
offset = (int16_t)inst;
15851585
cache = (inst >> 16) & 0x3;
15861586
op = (inst >> 18) & 0x7;
15871587

arch/mips/kvm/locore.S

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run)
157157

158158
FEXPORT(__kvm_mips_load_asid)
159159
/* Set the ASID for the Guest Kernel */
160-
INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
161-
/* addresses shift to 0x80000000 */
162-
bltz t0, 1f /* If kernel */
160+
PTR_L t0, VCPU_COP0(k1)
161+
LONG_L t0, COP0_STATUS(t0)
162+
andi t0, KSU_USER | ST0_ERL | ST0_EXL
163+
xori t0, KSU_USER
164+
bnez t0, 1f /* If kernel */
163165
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
164166
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
165167
1:
@@ -474,9 +476,11 @@ __kvm_mips_return_to_guest:
474476
mtc0 t0, CP0_EPC
475477

476478
/* Set the ASID for the Guest Kernel */
477-
INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
478-
/* addresses shift to 0x80000000 */
479-
bltz t0, 1f /* If kernel */
479+
PTR_L t0, VCPU_COP0(k1)
480+
LONG_L t0, COP0_STATUS(t0)
481+
andi t0, KSU_USER | ST0_ERL | ST0_EXL
482+
xori t0, KSU_USER
483+
bnez t0, 1f /* If kernel */
480484
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
481485
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
482486
1:

0 commit comments

Comments
 (0)