Skip to content

Commit 4cef048

Browse files
JoonsooKimAKASHI Takahiro
authored andcommitted
mm/slab: clean up DEBUG_PAGEALLOC processing code
Currently, open code for checking DEBUG_PAGEALLOC cache is spread to some sites. It makes code unreadable and hard to change. This patch cleans up this code. The following patch will change the criteria for DEBUG_PAGEALLOC cache so this clean-up will help it, too. [akpm@linux-foundation.org: fix build with CONFIG_DEBUG_PAGEALLOC=n] Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent f7cd74f commit 4cef048

2 files changed

Lines changed: 57 additions & 52 deletions

File tree

include/linux/mm.h

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2148,14 +2148,18 @@ kernel_map_pages(struct page *page, int numpages, int enable)
21482148
}
21492149
#ifdef CONFIG_HIBERNATION
21502150
extern bool kernel_page_present(struct page *page);
2151-
#endif /* CONFIG_HIBERNATION */
2152-
#else
2151+
#endif /* CONFIG_HIBERNATION */
2152+
#else /* CONFIG_DEBUG_PAGEALLOC */
21532153
static inline void
21542154
kernel_map_pages(struct page *page, int numpages, int enable) {}
21552155
#ifdef CONFIG_HIBERNATION
21562156
static inline bool kernel_page_present(struct page *page) { return true; }
2157-
#endif /* CONFIG_HIBERNATION */
2158-
#endif
2157+
#endif /* CONFIG_HIBERNATION */
2158+
static inline bool debug_pagealloc_enabled(void)
2159+
{
2160+
return false;
2161+
}
2162+
#endif /* CONFIG_DEBUG_PAGEALLOC */
21592163

21602164
#ifdef __HAVE_ARCH_GATE_AREA
21612165
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);

mm/slab.c

Lines changed: 49 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -1670,6 +1670,14 @@ static void kmem_rcu_free(struct rcu_head *head)
16701670
}
16711671

16721672
#if DEBUG
1673+
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1674+
{
1675+
if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1676+
(cachep->size % PAGE_SIZE) == 0)
1677+
return true;
1678+
1679+
return false;
1680+
}
16731681

16741682
#ifdef CONFIG_DEBUG_PAGEALLOC
16751683
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
@@ -1703,6 +1711,23 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
17031711
}
17041712
*addr++ = 0x87654321;
17051713
}
1714+
1715+
static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1716+
int map, unsigned long caller)
1717+
{
1718+
if (!is_debug_pagealloc_cache(cachep))
1719+
return;
1720+
1721+
if (caller)
1722+
store_stackinfo(cachep, objp, caller);
1723+
1724+
kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1725+
}
1726+
1727+
#else
1728+
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1729+
int map, unsigned long caller) {}
1730+
17061731
#endif
17071732

17081733
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
@@ -1781,6 +1806,9 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
17811806
int size, i;
17821807
int lines = 0;
17831808

1809+
if (is_debug_pagealloc_cache(cachep))
1810+
return;
1811+
17841812
realobj = (char *)objp + obj_offset(cachep);
17851813
size = cachep->object_size;
17861814

@@ -1846,17 +1874,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
18461874
void *objp = index_to_obj(cachep, page, i);
18471875

18481876
if (cachep->flags & SLAB_POISON) {
1849-
#ifdef CONFIG_DEBUG_PAGEALLOC
1850-
if (debug_pagealloc_enabled() &&
1851-
cachep->size % PAGE_SIZE == 0 &&
1852-
OFF_SLAB(cachep))
1853-
kernel_map_pages(virt_to_page(objp),
1854-
cachep->size / PAGE_SIZE, 1);
1855-
else
1856-
check_poison_obj(cachep, objp);
1857-
#else
18581877
check_poison_obj(cachep, objp);
1859-
#endif
1878+
slab_kernel_map(cachep, objp, 1, 0);
18601879
}
18611880
if (cachep->flags & SLAB_RED_ZONE) {
18621881
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
@@ -2237,16 +2256,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
22372256
if (flags & CFLGS_OFF_SLAB) {
22382257
/* really off slab. No need for manual alignment */
22392258
freelist_size = calculate_freelist_size(cachep->num, 0);
2240-
2241-
#ifdef CONFIG_PAGE_POISONING
2242-
/* If we're going to use the generic kernel_map_pages()
2243-
* poisoning, then it's going to smash the contents of
2244-
* the redzone and userword anyhow, so switch them off.
2245-
*/
2246-
if (debug_pagealloc_enabled() &&
2247-
size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2248-
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2249-
#endif
22502259
}
22512260

22522261
cachep->colour_off = cache_line_size();
@@ -2262,7 +2271,19 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
22622271
cachep->size = size;
22632272
cachep->reciprocal_buffer_size = reciprocal_value(size);
22642273

2265-
if (flags & CFLGS_OFF_SLAB) {
2274+
#if DEBUG
2275+
/*
2276+
* If we're going to use the generic kernel_map_pages()
2277+
* poisoning, then it's going to smash the contents of
2278+
* the redzone and userword anyhow, so switch them off.
2279+
*/
2280+
if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2281+
(cachep->flags & SLAB_POISON) &&
2282+
is_debug_pagealloc_cache(cachep))
2283+
cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2284+
#endif
2285+
2286+
if (OFF_SLAB(cachep)) {
22662287
cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
22672288
/*
22682289
* This is a possibility for one of the kmalloc_{dma,}_caches.
@@ -2489,9 +2510,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
24892510
for (i = 0; i < cachep->num; i++) {
24902511
void *objp = index_to_obj(cachep, page, i);
24912512
#if DEBUG
2492-
/* need to poison the objs? */
2493-
if (cachep->flags & SLAB_POISON)
2494-
poison_obj(cachep, objp, POISON_FREE);
24952513
if (cachep->flags & SLAB_STORE_USER)
24962514
*dbg_userword(cachep, objp) = NULL;
24972515

@@ -2515,10 +2533,11 @@ static void cache_init_objs(struct kmem_cache *cachep,
25152533
slab_error(cachep, "constructor overwrote the"
25162534
" start of an object");
25172535
}
2518-
if ((cachep->size % PAGE_SIZE) == 0 &&
2519-
OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2520-
kernel_map_pages(virt_to_page(objp),
2521-
cachep->size / PAGE_SIZE, 0);
2536+
/* need to poison the objs? */
2537+
if (cachep->flags & SLAB_POISON) {
2538+
poison_obj(cachep, objp, POISON_FREE);
2539+
slab_kernel_map(cachep, objp, 0, 0);
2540+
}
25222541
#else
25232542
if (cachep->ctor)
25242543
cachep->ctor(objp);
@@ -2737,18 +2756,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
27372756

27382757
set_obj_status(page, objnr, OBJECT_FREE);
27392758
if (cachep->flags & SLAB_POISON) {
2740-
#ifdef CONFIG_DEBUG_PAGEALLOC
2741-
if (debug_pagealloc_enabled() &&
2742-
(cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
2743-
store_stackinfo(cachep, objp, caller);
2744-
kernel_map_pages(virt_to_page(objp),
2745-
cachep->size / PAGE_SIZE, 0);
2746-
} else {
2747-
poison_obj(cachep, objp, POISON_FREE);
2748-
}
2749-
#else
27502759
poison_obj(cachep, objp, POISON_FREE);
2751-
#endif
2760+
slab_kernel_map(cachep, objp, 0, caller);
27522761
}
27532762
return objp;
27542763
}
@@ -2875,16 +2884,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
28752884
if (!objp)
28762885
return objp;
28772886
if (cachep->flags & SLAB_POISON) {
2878-
#ifdef CONFIG_DEBUG_PAGEALLOC
2879-
if (debug_pagealloc_enabled() &&
2880-
(cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2881-
kernel_map_pages(virt_to_page(objp),
2882-
cachep->size / PAGE_SIZE, 1);
2883-
else
2884-
check_poison_obj(cachep, objp);
2885-
#else
28862887
check_poison_obj(cachep, objp);
2887-
#endif
2888+
slab_kernel_map(cachep, objp, 1, 0);
28882889
poison_obj(cachep, objp, POISON_INUSE);
28892890
}
28902891
if (cachep->flags & SLAB_STORE_USER)

0 commit comments

Comments
 (0)