Skip to content

Commit 9452a80

Browse files
JoonsooKimAKASHI Takahiro
authored andcommitted
mm/slab: activate debug_pagealloc in SLAB when it is actually enabled
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 3cd7d78 commit 9452a80

1 file changed

Lines changed: 10 additions & 5 deletions

File tree

mm/slab.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1847,7 +1847,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
18471847

18481848
if (cachep->flags & SLAB_POISON) {
18491849
#ifdef CONFIG_DEBUG_PAGEALLOC
1850-
if (cachep->size % PAGE_SIZE == 0 &&
1850+
if (debug_pagealloc_enabled() &&
1851+
cachep->size % PAGE_SIZE == 0 &&
18511852
OFF_SLAB(cachep))
18521853
kernel_map_pages(virt_to_page(objp),
18531854
cachep->size / PAGE_SIZE, 1);
@@ -2187,7 +2188,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
21872188
* to check size >= 256. It guarantees that all necessary small
21882189
* sized slab is initialized in current slab initialization sequence.
21892190
*/
2190-
if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
2191+
if (debug_pagealloc_enabled() &&
2192+
!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
21912193
size >= 256 && cachep->object_size > cache_line_size() &&
21922194
ALIGN(size, cachep->align) < PAGE_SIZE) {
21932195
cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
@@ -2243,7 +2245,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
22432245
* poisoning, then it's going to smash the contents of
22442246
* the redzone and userword anyhow, so switch them off.
22452247
*/
2246-
if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2248+
if (debug_pagealloc_enabled() &&
2249+
size % PAGE_SIZE == 0 && flags & SLAB_POISON)
22472250
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
22482251
#endif
22492252
}
@@ -2737,7 +2740,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
27372740
set_obj_status(page, objnr, OBJECT_FREE);
27382741
if (cachep->flags & SLAB_POISON) {
27392742
#ifdef CONFIG_DEBUG_PAGEALLOC
2740-
if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2743+
if (debug_pagealloc_enabled() &&
2744+
(cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
27412745
store_stackinfo(cachep, objp, caller);
27422746
kernel_map_pages(virt_to_page(objp),
27432747
cachep->size / PAGE_SIZE, 0);
@@ -2874,7 +2878,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
28742878
return objp;
28752879
if (cachep->flags & SLAB_POISON) {
28762880
#ifdef CONFIG_DEBUG_PAGEALLOC
2877-
if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2881+
if (debug_pagealloc_enabled() &&
2882+
(cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
28782883
kernel_map_pages(virt_to_page(objp),
28792884
cachep->size / PAGE_SIZE, 1);
28802885
else

0 commit comments

Comments
 (0)