Skip to content

Commit aaaf9e5

Browse files
hnazAlex Shi
authored andcommitted
mm: page_alloc: generalize the dirty balance reserve
The dirty balance reserve that dirty throttling has to consider is merely memory not available to userspace allocations. There is nothing writeback-specific about it. Generalize the name so that it's reusable outside of that context. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> (cherry picked from commit a8d0143730d7b42c9fe6d1435d92ecce6863a62a) Signed-off-by: Alex Shi <alex.shi@linaro.org>
1 parent 83595f5 commit aaaf9e5

4 files changed

Lines changed: 18 additions & 24 deletions

File tree

include/linux/mmzone.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -361,10 +361,10 @@ struct zone {
361361
struct per_cpu_pageset __percpu *pageset;
362362

363363
/*
364-
* This is a per-zone reserve of pages that should not be
365-
* considered dirtyable memory.
364+
* This is a per-zone reserve of pages that are not available
365+
* to userspace allocations.
366366
*/
367-
unsigned long dirty_balance_reserve;
367+
unsigned long totalreserve_pages;
368368

369369
#ifndef CONFIG_SPARSEMEM
370370
/*

include/linux/swap.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
289289
/* linux/mm/page_alloc.c */
290290
extern unsigned long totalram_pages;
291291
extern unsigned long totalreserve_pages;
292-
extern unsigned long dirty_balance_reserve;
293292
extern unsigned long nr_free_buffer_pages(void);
294293
extern unsigned long nr_free_pagecache_pages(void);
295294

mm/page-writeback.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
278278
unsigned long nr_pages;
279279

280280
nr_pages = zone_page_state(zone, NR_FREE_PAGES);
281-
nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
281+
/*
282+
* Pages reserved for the kernel should not be considered
283+
* dirtyable, to prevent a situation where reclaim has to
284+
* clean pages in order to balance the zones.
285+
*/
286+
nr_pages -= min(nr_pages, zone->totalreserve_pages);
282287

283288
nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
284289
nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
@@ -332,7 +337,12 @@ static unsigned long global_dirtyable_memory(void)
332337
unsigned long x;
333338

334339
x = global_page_state(NR_FREE_PAGES);
335-
x -= min(x, dirty_balance_reserve);
340+
/*
341+
* Pages reserved for the kernel should not be considered
342+
* dirtyable, to prevent a situation where reclaim has to
343+
* clean pages in order to balance the zones.
344+
*/
345+
x -= min(x, totalreserve_pages);
336346

337347
x += global_page_state(NR_INACTIVE_FILE);
338348
x += global_page_state(NR_ACTIVE_FILE);

mm/page_alloc.c

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -114,13 +114,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock);
114114
unsigned long totalram_pages __read_mostly;
115115
unsigned long totalreserve_pages __read_mostly;
116116
unsigned long totalcma_pages __read_mostly;
117-
/*
118-
* When calculating the number of globally allowed dirty pages, there
119-
* is a certain number of per-zone reserves that should not be
120-
* considered dirtyable memory. This is the sum of those reserves
121-
* over all existing zones that contribute dirtyable memory.
122-
*/
123-
unsigned long dirty_balance_reserve __read_mostly;
124117

125118
int percpu_pagelist_fraction;
126119
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
@@ -5978,20 +5971,12 @@ static void calculate_totalreserve_pages(void)
59785971

59795972
if (max > zone->managed_pages)
59805973
max = zone->managed_pages;
5974+
5975+
zone->totalreserve_pages = max;
5976+
59815977
reserve_pages += max;
5982-
/*
5983-
* Lowmem reserves are not available to
5984-
* GFP_HIGHUSER page cache allocations and
5985-
* kswapd tries to balance zones to their high
5986-
* watermark. As a result, neither should be
5987-
* regarded as dirtyable memory, to prevent a
5988-
* situation where reclaim has to clean pages
5989-
* in order to balance the zones.
5990-
*/
5991-
zone->dirty_balance_reserve = max;
59925978
}
59935979
}
5994-
dirty_balance_reserve = reserve_pages;
59955980
totalreserve_pages = reserve_pages;
59965981
}
59975982

0 commit comments

Comments
 (0)