summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorShakeel Butt <shakeel.butt@linux.dev>2025-11-10 15:20:07 -0800
committerAndrew Morton <akpm@linux-foundation.org>2025-11-24 15:08:54 -0800
commit5b3eb779a20cf30d74bb346d2a1e525bc9072685 (patch)
treead2fe8074542a18b86b9e96739271edab5cc88e3 /mm
parent469241fe7657dbec9e2948287ab7412955d8b73a (diff)
memcg: remove __mod_lruvec_state
__mod_lruvec_state() is already safe against irqs, so there is no need to have a separate interface (i.e. mod_lruvec_state) which wraps calls to it with irq disabling and reenabling. Let's rename __mod_lruvec_state() to mod_lruvec_state(). Link: https://lkml.kernel.org/r/20251110232008.1352063-4-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/migrate.c20
-rw-r--r--mm/vmscan.c4
3 files changed, 16 insertions, 16 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ae154f51931e..9a659f16af77 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -757,7 +757,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
}
/**
- * __mod_lruvec_state - update lruvec memory statistics
+ * mod_lruvec_state - update lruvec memory statistics
* @lruvec: the lruvec
* @idx: the stat item
* @val: delta to add to the counter, can be negative
@@ -766,7 +766,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
* function updates the all three counters that are affected by a
* change of state at this level: per-node, per-cgroup, per-lruvec.
*/
-void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val)
{
/* Update node */
@@ -794,7 +794,7 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
}
lruvec = mem_cgroup_lruvec(memcg, pgdat);
- __mod_lruvec_state(lruvec, idx, val);
+ mod_lruvec_state(lruvec, idx, val);
rcu_read_unlock();
}
EXPORT_SYMBOL(__lruvec_stat_mod_folio);
@@ -818,7 +818,7 @@ void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
mod_node_page_state(pgdat, idx, val);
} else {
lruvec = mem_cgroup_lruvec(memcg, pgdat);
- __mod_lruvec_state(lruvec, idx, val);
+ mod_lruvec_state(lruvec, idx, val);
}
rcu_read_unlock();
}
diff --git a/mm/migrate.c b/mm/migrate.c
index b2ad78bf85d5..5169f9717f60 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -675,27 +675,27 @@ static int __folio_migrate_mapping(struct address_space *mapping,
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
- __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
- __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
+ mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
+ mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
- __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
- __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
+ mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
+ mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
if (folio_test_pmd_mappable(folio)) {
- __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
- __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
+ mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
+ mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
}
}
#ifdef CONFIG_SWAP
if (folio_test_swapcache(folio)) {
- __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
- __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
+ mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
+ mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
}
#endif
if (dirty && mapping_can_writeback(mapping)) {
- __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
+ mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
- __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
+ mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
}
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 51ffd32e6e01..720772baf2a7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2018,7 +2018,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
spin_lock_irq(&lruvec->lru_lock);
move_folios_to_lru(lruvec, &folio_list);
- __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
+ mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
@@ -4744,7 +4744,7 @@ retry:
reset_batch_size(walk);
}
- __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
+ mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);