@@ -1065,18 +1065,25 @@ static noinline int alloc_debug_processing(struct kmem_cache *s,
10651065 return 0 ;
10661066}
10671067
1068+ /* Supports checking bulk free of a constructed freelist */
10681069static noinline struct kmem_cache_node * free_debug_processing (
1069- struct kmem_cache * s , struct page * page , void * object ,
1070+ struct kmem_cache * s , struct page * page ,
1071+ void * head , void * tail , int bulk_cnt ,
10701072 unsigned long addr , unsigned long * flags )
10711073{
10721074 struct kmem_cache_node * n = get_node (s , page_to_nid (page ));
1075+ void * object = head ;
1076+ int cnt = 0 ;
10731077
10741078 spin_lock_irqsave (& n -> list_lock , * flags );
10751079 slab_lock (page );
10761080
10771081 if (!check_slab (s , page ))
10781082 goto fail ;
10791083
1084+ next_object :
1085+ cnt ++ ;
1086+
10801087 if (!check_valid_pointer (s , page , object )) {
10811088 slab_err (s , page , "Invalid object pointer 0x%p" , object );
10821089 goto fail ;
@@ -1107,8 +1114,19 @@ static noinline struct kmem_cache_node *free_debug_processing(
11071114 if (s -> flags & SLAB_STORE_USER )
11081115 set_track (s , object , TRACK_FREE , addr );
11091116 trace (s , page , object , 0 );
1117+ /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
11101118 init_object (s , object , SLUB_RED_INACTIVE );
1119+
1120+ /* Reached end of constructed freelist yet? */
1121+ if (object != tail ) {
1122+ object = get_freepointer (s , object );
1123+ goto next_object ;
1124+ }
11111125out :
1126+ if (cnt != bulk_cnt )
1127+ slab_err (s , page , "Bulk freelist count(%d) invalid(%d)\n" ,
1128+ bulk_cnt , cnt );
1129+
11121130 slab_unlock (page );
11131131 /*
11141132 * Keep node_lock to preserve integrity
@@ -1212,7 +1230,8 @@ static inline int alloc_debug_processing(struct kmem_cache *s,
12121230 struct page * page , void * object , unsigned long addr ) { return 0 ; }
12131231
12141232static inline struct kmem_cache_node * free_debug_processing (
1215- struct kmem_cache * s , struct page * page , void * object ,
1233+ struct kmem_cache * s , struct page * page ,
1234+ void * head , void * tail , int bulk_cnt ,
12161235 unsigned long addr , unsigned long * flags ) { return NULL ; }
12171236
12181237static inline int slab_pad_check (struct kmem_cache * s , struct page * page )
@@ -1308,6 +1327,29 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
13081327 kasan_slab_free (s , x );
13091328}
13101329
1330+ static inline void slab_free_freelist_hook (struct kmem_cache * s ,
1331+ void * head , void * tail )
1332+ {
1333+ /*
1334+ * Compiler cannot detect this function can be removed if slab_free_hook()
1335+ * evaluates to nothing. Thus, catch all relevant config debug options here.
1336+ */
1337+ #if defined(CONFIG_KMEMCHECK ) || \
1338+ defined(CONFIG_LOCKDEP ) || \
1339+ defined(CONFIG_DEBUG_KMEMLEAK ) || \
1340+ defined(CONFIG_DEBUG_OBJECTS_FREE ) || \
1341+ defined(CONFIG_KASAN )
1342+
1343+ void * object = head ;
1344+ void * tail_obj = tail ? : head ;
1345+
1346+ do {
1347+ slab_free_hook (s , object );
1348+ } while ((object != tail_obj ) &&
1349+ (object = get_freepointer (s , object )));
1350+ #endif
1351+ }
1352+
13111353static void setup_object (struct kmem_cache * s , struct page * page ,
13121354 void * object )
13131355{
@@ -2583,10 +2625,11 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
25832625 * handling required then we can return immediately.
25842626 */
25852627static void __slab_free (struct kmem_cache * s , struct page * page ,
2586- void * x , unsigned long addr )
2628+ void * head , void * tail , int cnt ,
2629+ unsigned long addr )
2630+
25872631{
25882632 void * prior ;
2589- void * * object = (void * )x ;
25902633 int was_frozen ;
25912634 struct page new ;
25922635 unsigned long counters ;
@@ -2596,7 +2639,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
25962639 stat (s , FREE_SLOWPATH );
25972640
25982641 if (kmem_cache_debug (s ) &&
2599- !(n = free_debug_processing (s , page , x , addr , & flags )))
2642+ !(n = free_debug_processing (s , page , head , tail , cnt ,
2643+ addr , & flags )))
26002644 return ;
26012645
26022646 do {
@@ -2606,10 +2650,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
26062650 }
26072651 prior = page -> freelist ;
26082652 counters = page -> counters ;
2609- set_freepointer (s , object , prior );
2653+ set_freepointer (s , tail , prior );
26102654 new .counters = counters ;
26112655 was_frozen = new .frozen ;
2612- new .inuse -- ;
2656+ new .inuse -= cnt ;
26132657 if ((!new .inuse || !prior ) && !was_frozen ) {
26142658
26152659 if (kmem_cache_has_cpu_partial (s ) && !prior ) {
@@ -2640,7 +2684,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
26402684
26412685 } while (!cmpxchg_double_slab (s , page ,
26422686 prior , counters ,
2643- object , new .counters ,
2687+ head , new .counters ,
26442688 "__slab_free" ));
26452689
26462690 if (likely (!n )) {
@@ -2705,15 +2749,20 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
27052749 *
27062750 * If fastpath is not possible then fall back to __slab_free where we deal
27072751 * with all sorts of special processing.
2752+ *
2753+ * Bulk free of a freelist with several objects (all pointing to the
2754+ * same page) possible by specifying head and tail ptr, plus objects
2755+ * count (cnt). Bulk free indicated by tail pointer being set.
27082756 */
2709- static __always_inline void slab_free (struct kmem_cache * s ,
2710- struct page * page , void * x , unsigned long addr )
2757+ static __always_inline void slab_free (struct kmem_cache * s , struct page * page ,
2758+ void * head , void * tail , int cnt ,
2759+ unsigned long addr )
27112760{
2712- void * * object = ( void * ) x ;
2761+ void * tail_obj = tail ? : head ;
27132762 struct kmem_cache_cpu * c ;
27142763 unsigned long tid ;
27152764
2716- slab_free_hook (s , x );
2765+ slab_free_freelist_hook (s , head , tail );
27172766
27182767redo :
27192768 /*
@@ -2732,19 +2781,19 @@ static __always_inline void slab_free(struct kmem_cache *s,
27322781 barrier ();
27332782
27342783 if (likely (page == c -> page )) {
2735- set_freepointer (s , object , c -> freelist );
2784+ set_freepointer (s , tail_obj , c -> freelist );
27362785
27372786 if (unlikely (!this_cpu_cmpxchg_double (
27382787 s -> cpu_slab -> freelist , s -> cpu_slab -> tid ,
27392788 c -> freelist , tid ,
2740- object , next_tid (tid )))) {
2789+ head , next_tid (tid )))) {
27412790
27422791 note_cmpxchg_failure ("slab_free" , s , tid );
27432792 goto redo ;
27442793 }
27452794 stat (s , FREE_FASTPATH );
27462795 } else
2747- __slab_free (s , page , x , addr );
2796+ __slab_free (s , page , head , tail_obj , cnt , addr );
27482797
27492798}
27502799
@@ -2753,7 +2802,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
27532802 s = cache_from_obj (s , x );
27542803 if (!s )
27552804 return ;
2756- slab_free (s , virt_to_head_page (x ), x , _RET_IP_ );
2805+ slab_free (s , virt_to_head_page (x ), x , NULL , 1 , _RET_IP_ );
27572806 trace_kmem_cache_free (_RET_IP_ , x );
27582807}
27592808EXPORT_SYMBOL (kmem_cache_free );
@@ -2788,7 +2837,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
27882837 c -> tid = next_tid (c -> tid );
27892838 local_irq_enable ();
27902839 /* Slowpath: overhead locked cmpxchg_double_slab */
2791- __slab_free (s , page , object , _RET_IP_ );
2840+ __slab_free (s , page , object , object , 1 , _RET_IP_ );
27922841 local_irq_disable ();
27932842 c = this_cpu_ptr (s -> cpu_slab );
27942843 }
@@ -3523,7 +3572,7 @@ void kfree(const void *x)
35233572 __free_kmem_pages (page , compound_order (page ));
35243573 return ;
35253574 }
3526- slab_free (page -> slab_cache , page , object , _RET_IP_ );
3575+ slab_free (page -> slab_cache , page , object , NULL , 1 , _RET_IP_ );
35273576}
35283577EXPORT_SYMBOL (kfree );
35293578
0 commit comments