Skip to content

Commit 7d13203

Browse files
labbottAlex Shi
authored andcommitted
arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC
ARCH_SUPPORTS_DEBUG_PAGEALLOC provides a hook to map and unmap pages for debugging purposes. This requires memory be mapped with PAGE_SIZE mappings since breaking down larger mappings at runtime will lead to TLB conflicts. Check if debug_pagealloc is enabled at runtime and if so, map everyting with PAGE_SIZE pages. Implement the functions to actually map/unmap the pages at runtime. Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Laura Abbott <labbott@fedoraproject.org> [catalin.marinas@arm.com: static annotation block_mappings_allowed() and #ifdef] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit 83863f25e4b8214e994ef8b5647aad614d74b45d) Signed-off-by: Alex Shi <alex.shi@linaro.org>
1 parent e1e5e10 commit 7d13203

3 files changed

Lines changed: 63 additions & 12 deletions

File tree

arch/arm64/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -530,6 +530,9 @@ config HOTPLUG_CPU
530530
source kernel/Kconfig.preempt
531531
source kernel/Kconfig.hz
532532

533+
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
534+
def_bool y
535+
533536
config ARCH_HAS_HOLES_MEMORYMODEL
534537
def_bool y if SPARSEMEM
535538

arch/arm64/mm/mmu.c

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,26 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
149149
} while (pmd++, i++, i < PTRS_PER_PMD);
150150
}
151151

152+
#ifdef CONFIG_DEBUG_PAGEALLOC
153+
static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
154+
{
155+
156+
/*
157+
* If debug_page_alloc is enabled we must map the linear map
158+
* using pages. However, other mappings created by
159+
* create_mapping_noalloc must use sections in some cases. Allow
160+
* sections to be used in those cases, where no pgtable_alloc
161+
* function is provided.
162+
*/
163+
return !pgtable_alloc || !debug_pagealloc_enabled();
164+
}
165+
#else
166+
static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
167+
{
168+
return true;
169+
}
170+
#endif
171+
152172
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
153173
phys_addr_t phys, pgprot_t prot,
154174
phys_addr_t (*pgtable_alloc)(void))
@@ -181,7 +201,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
181201
do {
182202
next = pmd_addr_end(addr, end);
183203
/* try section mapping first */
184-
if (((addr | next | phys) & ~SECTION_MASK) == 0) {
204+
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
205+
block_mappings_allowed(pgtable_alloc)) {
185206
pmd_t old_pmd =*pmd;
186207
set_pmd(pmd, __pmd(phys |
187208
pgprot_val(mk_sect_prot(prot))));
@@ -241,7 +262,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
241262
/*
242263
* For 4K granule only, attempt to put down a 1GB block
243264
*/
244-
if (use_1G_block(addr, next, phys)) {
265+
if (use_1G_block(addr, next, phys) &&
266+
block_mappings_allowed(pgtable_alloc)) {
245267
pud_t old_pud = *pud;
246268
set_pud(pud, __pud(phys |
247269
pgprot_val(mk_sect_prot(prot))));

arch/arm64/mm/pageattr.c

Lines changed: 36 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,31 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
3737
return 0;
3838
}
3939

40+
/*
41+
* This function assumes that the range is mapped with PAGE_SIZE pages.
42+
*/
43+
static int __change_memory_common(unsigned long start, unsigned long size,
44+
pgprot_t set_mask, pgprot_t clear_mask)
45+
{
46+
struct page_change_data data;
47+
int ret;
48+
49+
data.set_mask = set_mask;
50+
data.clear_mask = clear_mask;
51+
52+
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
53+
&data);
54+
55+
flush_tlb_kernel_range(start, start + size);
56+
return ret;
57+
}
58+
4059
static int change_memory_common(unsigned long addr, int numpages,
4160
pgprot_t set_mask, pgprot_t clear_mask)
4261
{
4362
unsigned long start = addr;
4463
unsigned long size = PAGE_SIZE*numpages;
4564
unsigned long end = start + size;
46-
int ret;
47-
struct page_change_data data;
4865
struct vm_struct *area;
4966

5067
if (!PAGE_ALIGNED(addr)) {
@@ -75,14 +92,7 @@ static int change_memory_common(unsigned long addr, int numpages,
7592
if (!numpages)
7693
return 0;
7794

78-
data.set_mask = set_mask;
79-
data.clear_mask = clear_mask;
80-
81-
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
82-
&data);
83-
84-
flush_tlb_kernel_range(start, end);
85-
return ret;
95+
return __change_memory_common(start, size, set_mask, clear_mask);
8696
}
8797

8898
int set_memory_ro(unsigned long addr, int numpages)
@@ -114,3 +124,19 @@ int set_memory_x(unsigned long addr, int numpages)
114124
__pgprot(PTE_PXN));
115125
}
116126
EXPORT_SYMBOL_GPL(set_memory_x);
127+
128+
#ifdef CONFIG_DEBUG_PAGEALLOC
129+
void __kernel_map_pages(struct page *page, int numpages, int enable)
130+
{
131+
unsigned long addr = (unsigned long) page_address(page);
132+
133+
if (enable)
134+
__change_memory_common(addr, PAGE_SIZE * numpages,
135+
__pgprot(PTE_VALID),
136+
__pgprot(0));
137+
else
138+
__change_memory_common(addr, PAGE_SIZE * numpages,
139+
__pgprot(0),
140+
__pgprot(PTE_VALID));
141+
}
142+
#endif

0 commit comments

Comments
 (0)