summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJoshua Hahn <joshua.hahnjy@gmail.com>2025-10-14 07:50:09 -0700
committerAndrew Morton <akpm@linux-foundation.org>2025-11-16 17:28:04 -0800
commitfc4b909c368f3a7b08c895dd5926476b58e85312 (patch)
tree59f623d636cc57d710d3dcc2d6fa50fd914e5998 /mm/page_alloc.c
parent0acc67c4030c39f39ac90413cc5d0abddd3a9527 (diff)
mm/page_alloc: batch page freeing in decay_pcp_high
It is possible for pcp->count - pcp->high to exceed pcp->batch by a lot. When this happens, we should perform batching to ensure that free_pcppages_bulk isn't called with too many pages to free at once and starve out other threads that need the pcp or zone lock. Since we are still only freeing the difference between the initial pcp->count and pcp->high values, there should be no change to how many pages are freed. Link: https://lkml.kernel.org/r/20251014145011.3427205-3-joshua.hahnjy@gmail.com Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com> Suggested-by: Chris Mason <clm@fb.com> Suggested-by: Andrew Morton <akpm@linux-foundation.org> Co-developed-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Brendan Jackman <jackmanb@google.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Michal Hocko <mhocko@suse.com> Cc: SeongJae Park <sj@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f057ce5ea7da..52480f513ba2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2559,7 +2559,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
*/
bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
{
- int high_min, to_drain, batch;
+ int high_min, to_drain, to_drain_batched, batch;
bool todo = false;
high_min = READ_ONCE(pcp->high_min);
@@ -2577,11 +2577,14 @@ bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
}
to_drain = pcp->count - pcp->high;
- if (to_drain > 0) {
+ while (to_drain > 0) {
+ to_drain_batched = min(to_drain, batch);
spin_lock(&pcp->lock);
- free_pcppages_bulk(zone, to_drain, pcp, 0);
+ free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
spin_unlock(&pcp->lock);
todo = true;
+
+ to_drain -= to_drain_batched;
}
return todo;