Skip to content

Commit edc55b7

Browse files
ryanhrobctmarinas
authored andcommitted
arm64: mm: Implicitly invalidate user ASID based on TLBI operation
When kpti is enabled, separate ASIDs are used for userspace and kernelspace, requiring ASID-qualified TLB invalidation by virtual address to invalidate both of them. Push the logic for invalidating the two ASIDs down into the low-level tlbi-op-specific functions and remove the burden from the caller to handle the kpti-specific behaviour. Co-developed-by: Will Deacon <will@kernel.org> Signed-off-by: Will Deacon <will@kernel.org> Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent d2bf322 commit edc55b7

1 file changed

Lines changed: 13 additions & 17 deletions

File tree

arch/arm64/include/asm/tlbflush.h

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ typedef void (*tlbi_op)(u64 arg);
102102
static __always_inline void vae1is(u64 arg)
103103
{
104104
__tlbi(vae1is, arg);
105+
__tlbi_user(vae1is, arg);
105106
}
106107

107108
static __always_inline void vae2is(u64 arg)
@@ -112,11 +113,13 @@ static __always_inline void vae2is(u64 arg)
112113
static __always_inline void vale1(u64 arg)
113114
{
114115
__tlbi(vale1, arg);
116+
__tlbi_user(vale1, arg);
115117
}
116118

117119
static __always_inline void vale1is(u64 arg)
118120
{
119121
__tlbi(vale1is, arg);
122+
__tlbi_user(vale1is, arg);
120123
}
121124

122125
static __always_inline void vale2is(u64 arg)
@@ -152,11 +155,6 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
152155
op(arg);
153156
}
154157

155-
#define __tlbi_user_level(op, arg, level) do { \
156-
if (arm64_kernel_unmapped_at_el0()) \
157-
__tlbi_level(op, (arg | USER_ASID_FLAG), level); \
158-
} while (0)
159-
160158
/*
161159
* This macro creates a properly formatted VA operand for the TLB RANGE. The
162160
* value bit assignments are:
@@ -444,8 +442,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
444442
* @stride: Flush granularity
445443
* @asid: The ASID of the task (0 for IPA instructions)
446444
* @tlb_level: Translation Table level hint, if known
447-
* @tlbi_user: If 'true', call an additional __tlbi_user()
448-
* (typically for user ASIDs). 'flase' for IPA instructions
449445
* @lpa2: If 'true', the lpa2 scheme is used as set out below
450446
*
451447
* When the CPU does not support TLB range operations, flush the TLB
@@ -471,16 +467,19 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
471467
static __always_inline void rvae1is(u64 arg)
472468
{
473469
__tlbi(rvae1is, arg);
470+
__tlbi_user(rvae1is, arg);
474471
}
475472

476473
static __always_inline void rvale1(u64 arg)
477474
{
478475
__tlbi(rvale1, arg);
476+
__tlbi_user(rvale1, arg);
479477
}
480478

481479
static __always_inline void rvale1is(u64 arg)
482480
{
483481
__tlbi(rvale1is, arg);
482+
__tlbi_user(rvale1is, arg);
484483
}
485484

486485
static __always_inline void rvaale1is(u64 arg)
@@ -499,7 +498,7 @@ static __always_inline void __tlbi_range(tlbi_op op, u64 arg)
499498
}
500499

501500
#define __flush_tlb_range_op(op, start, pages, stride, \
502-
asid, tlb_level, tlbi_user, lpa2) \
501+
asid, tlb_level, lpa2) \
503502
do { \
504503
typeof(start) __flush_start = start; \
505504
typeof(pages) __flush_pages = pages; \
@@ -514,8 +513,6 @@ do { \
514513
(lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
515514
addr = __TLBI_VADDR(__flush_start, asid); \
516515
__tlbi_level(op, addr, tlb_level); \
517-
if (tlbi_user) \
518-
__tlbi_user_level(op, addr, tlb_level); \
519516
__flush_start += stride; \
520517
__flush_pages -= stride >> PAGE_SHIFT; \
521518
continue; \
@@ -526,8 +523,6 @@ do { \
526523
addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
527524
scale, num, tlb_level); \
528525
__tlbi_range(r##op, addr); \
529-
if (tlbi_user) \
530-
__tlbi_user(r##op, addr); \
531526
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
532527
__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
533528
} \
@@ -536,7 +531,7 @@ do { \
536531
} while (0)
537532

538533
#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
539-
__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
534+
__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled());
540535

541536
static inline bool __flush_tlb_range_limit_excess(unsigned long start,
542537
unsigned long end, unsigned long pages, unsigned long stride)
@@ -576,10 +571,10 @@ static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
576571

577572
if (last_level)
578573
__flush_tlb_range_op(vale1is, start, pages, stride, asid,
579-
tlb_level, true, lpa2_is_enabled());
574+
tlb_level, lpa2_is_enabled());
580575
else
581576
__flush_tlb_range_op(vae1is, start, pages, stride, asid,
582-
tlb_level, true, lpa2_is_enabled());
577+
tlb_level, lpa2_is_enabled());
583578

584579
mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
585580
}
@@ -604,7 +599,7 @@ static inline void local_flush_tlb_contpte(struct vm_area_struct *vma,
604599
dsb(nshst);
605600
asid = ASID(vma->vm_mm);
606601
__flush_tlb_range_op(vale1, addr, CONT_PTES, PAGE_SIZE, asid,
607-
3, true, lpa2_is_enabled());
602+
3, lpa2_is_enabled());
608603
mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, addr,
609604
addr + CONT_PTE_SIZE);
610605
dsb(nsh);
@@ -638,7 +633,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
638633

639634
dsb(ishst);
640635
__flush_tlb_range_op(vaale1is, start, pages, stride, 0,
641-
TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
636+
TLBI_TTL_UNKNOWN, lpa2_is_enabled());
642637
__tlbi_sync_s1ish();
643638
isb();
644639
}
@@ -689,6 +684,7 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
689684
}
690685
#define huge_pmd_needs_flush huge_pmd_needs_flush
691686

687+
#undef __tlbi_user
692688
#endif
693689

694690
#endif

0 commit comments

Comments
 (0)