Skip to content

Commit dacbe1f

Browse files
JoonsooKimAlex Shi
authored andcommitted
mm/slub: support left redzone
SLUB already has a redzone debugging feature. But it is only positioned at the end of object (aka right redzone) so it cannot catch left oob. Although current object's right redzone acts as left redzone of next object, first object in a slab cannot take advantage of this effect. This patch explicitly adds a left red zone to each object to detect left oob more precisely. Background: Someone complained to me that left OOB doesn't catch even if KASAN is enabled which does page allocation debugging. That page is out of our control so it would be allocated when left OOB happens and, in this case, we can't find OOB. Moreover, SLUB debugging feature can be enabled without page allocator debugging and, in this case, we will miss that OOB. Before trying to implement, I expected that changes would be too complex, but, it doesn't look that complex to me now. Almost changes are applied to debug specific functions so I feel okay. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> (cherry picked from commit d86bd1bece6fc41d59253002db5441fe960a37f6) Signed-off-by: Alex Shi <alex.shi@linaro.org>
1 parent 3ad78ba commit dacbe1f

2 files changed

Lines changed: 72 additions & 29 deletions

File tree

include/linux/slub_def.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ struct kmem_cache {
8181
int reserved; /* Reserved bytes at the end of slabs */
8282
const char *name; /* Name (only for display!) */
8383
struct list_head list; /* List of slab caches */
84+
int red_left_pad; /* Left redzone padding size */
8485
#ifdef CONFIG_SYSFS
8586
struct kobject kobj; /* For sysfs */
8687
#endif

mm/slub.c

Lines changed: 71 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,14 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
124124
#endif
125125
}
126126

127+
static inline void *fixup_red_left(struct kmem_cache *s, void *p)
128+
{
129+
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
130+
p += s->red_left_pad;
131+
132+
return p;
133+
}
134+
127135
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
128136
{
129137
#ifdef CONFIG_SLUB_CPU_PARTIAL
@@ -224,24 +232,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
224232
* Core slab cache functions
225233
*******************************************************************/
226234

227-
/* Verify that a pointer has an address that is valid within a slab page */
228-
static inline int check_valid_pointer(struct kmem_cache *s,
229-
struct page *page, const void *object)
230-
{
231-
void *base;
232-
233-
if (!object)
234-
return 1;
235-
236-
base = page_address(page);
237-
if (object < base || object >= base + page->objects * s->size ||
238-
(object - base) % s->size) {
239-
return 0;
240-
}
241-
242-
return 1;
243-
}
244-
245235
static inline void *get_freepointer(struct kmem_cache *s, void *object)
246236
{
247237
return *(void **)(object + s->offset);
@@ -271,12 +261,14 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
271261

272262
/* Loop over all objects in a slab */
273263
#define for_each_object(__p, __s, __addr, __objects) \
274-
for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
275-
__p += (__s)->size)
264+
for (__p = fixup_red_left(__s, __addr); \
265+
__p < (__addr) + (__objects) * (__s)->size; \
266+
__p += (__s)->size)
276267

277268
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
278-
for (__p = (__addr), __idx = 1; __idx <= __objects;\
279-
__p += (__s)->size, __idx++)
269+
for (__p = fixup_red_left(__s, __addr), __idx = 1; \
270+
__idx <= __objects; \
271+
__p += (__s)->size, __idx++)
280272

281273
/* Determine object index from a given position */
282274
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
@@ -456,6 +448,22 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
456448
set_bit(slab_index(p, s, addr), map);
457449
}
458450

451+
static inline int size_from_object(struct kmem_cache *s)
452+
{
453+
if (s->flags & SLAB_RED_ZONE)
454+
return s->size - s->red_left_pad;
455+
456+
return s->size;
457+
}
458+
459+
static inline void *restore_red_left(struct kmem_cache *s, void *p)
460+
{
461+
if (s->flags & SLAB_RED_ZONE)
462+
p -= s->red_left_pad;
463+
464+
return p;
465+
}
466+
459467
/*
460468
* Debug settings:
461469
*/
@@ -489,6 +497,26 @@ static inline void metadata_access_disable(void)
489497
/*
490498
* Object debugging
491499
*/
500+
501+
/* Verify that a pointer has an address that is valid within a slab page */
502+
static inline int check_valid_pointer(struct kmem_cache *s,
503+
struct page *page, void *object)
504+
{
505+
void *base;
506+
507+
if (!object)
508+
return 1;
509+
510+
base = page_address(page);
511+
object = restore_red_left(s, object);
512+
if (object < base || object >= base + page->objects * s->size ||
513+
(object - base) % s->size) {
514+
return 0;
515+
}
516+
517+
return 1;
518+
}
519+
492520
static void print_section(char *text, u8 *addr, unsigned int length)
493521
{
494522
metadata_access_enable();
@@ -628,7 +656,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
628656
pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
629657
p, p - addr, get_freepointer(s, p));
630658

631-
if (p > addr + 16)
659+
if (s->flags & SLAB_RED_ZONE)
660+
print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
661+
else if (p > addr + 16)
632662
print_section("Bytes b4 ", p - 16, 16);
633663

634664
print_section("Object ", p, min_t(unsigned long, s->object_size,
@@ -645,9 +675,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
645675
if (s->flags & SLAB_STORE_USER)
646676
off += 2 * sizeof(struct track);
647677

648-
if (off != s->size)
678+
if (off != size_from_object(s))
649679
/* Beginning of the filler is the free pointer */
650-
print_section("Padding ", p + off, s->size - off);
680+
print_section("Padding ", p + off, size_from_object(s) - off);
651681

652682
dump_stack();
653683
}
@@ -677,6 +707,9 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
677707
{
678708
u8 *p = object;
679709

710+
if (s->flags & SLAB_RED_ZONE)
711+
memset(p - s->red_left_pad, val, s->red_left_pad);
712+
680713
if (s->flags & __OBJECT_POISON) {
681714
memset(p, POISON_FREE, s->object_size - 1);
682715
p[s->object_size - 1] = POISON_END;
@@ -769,11 +802,11 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
769802
/* We also have user information there */
770803
off += 2 * sizeof(struct track);
771804

772-
if (s->size == off)
805+
if (size_from_object(s) == off)
773806
return 1;
774807

775808
return check_bytes_and_report(s, page, p, "Object padding",
776-
p + off, POISON_INUSE, s->size - off);
809+
p + off, POISON_INUSE, size_from_object(s) - off);
777810
}
778811

779812
/* Check the pad bytes at the end of a slab page */
@@ -817,6 +850,10 @@ static int check_object(struct kmem_cache *s, struct page *page,
817850
u8 *endobject = object + s->object_size;
818851

819852
if (s->flags & SLAB_RED_ZONE) {
853+
if (!check_bytes_and_report(s, page, object, "Redzone",
854+
object - s->red_left_pad, val, s->red_left_pad))
855+
return 0;
856+
820857
if (!check_bytes_and_report(s, page, object, "Redzone",
821858
endobject, val, s->inuse - s->object_size))
822859
return 0;
@@ -1468,7 +1505,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
14681505
set_freepointer(s, p, NULL);
14691506
}
14701507

1471-
page->freelist = start;
1508+
page->freelist = fixup_red_left(s, start);
14721509
page->inuse = page->objects;
14731510
page->frozen = 1;
14741511

@@ -3283,7 +3320,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
32833320
*/
32843321
size += 2 * sizeof(struct track);
32853322

3286-
if (flags & SLAB_RED_ZONE)
3323+
if (flags & SLAB_RED_ZONE) {
32873324
/*
32883325
* Add some empty padding so that we can catch
32893326
* overwrites from earlier objects rather than let
@@ -3292,6 +3329,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
32923329
* of the object.
32933330
*/
32943331
size += sizeof(void *);
3332+
3333+
s->red_left_pad = sizeof(void *);
3334+
s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3335+
size += s->red_left_pad;
3336+
}
32953337
#endif
32963338

32973339
/*

0 commit comments

Comments
 (0)