summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2025-11-13 00:09:17 +0000
committerVlastimil Babka <vbabka@suse.cz>2025-11-13 11:01:08 +0100
commitea4702b1708ee3df8da06f07ce41fea84e6ed81d (patch)
treef562e7126fc3c31a0fe9042a03517933c0307581 /mm/slub.c
parentee1ee8abc4197e21594ca29348629ccbfff4daec (diff)
slab: Remove folio references in memcg_slab_post_charge()
This allows us to skip the compound_head() call for large kmalloc objects as the virt_to_page() call will always give us the head page for the large kmalloc case. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Link: https://patch.msgid.link/20251113000932.1589073-4-willy@infradead.org Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index d4367f25b20d..a7c0662f89c6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2369,33 +2369,34 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
{
struct slabobj_ext *slab_exts;
struct kmem_cache *s;
- struct folio *folio;
+ struct page *page;
struct slab *slab;
unsigned long off;
- folio = virt_to_folio(p);
- if (!folio_test_slab(folio)) {
+ page = virt_to_page(p);
+ if (PageLargeKmalloc(page)) {
+ unsigned int order;
int size;
- if (folio_memcg_kmem(folio))
+ if (PageMemcgKmem(page))
return true;
- if (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
- folio_order(folio)))
+ order = large_kmalloc_order(page);
+ if (__memcg_kmem_charge_page(page, flags, order))
return false;
/*
- * This folio has already been accounted in the global stats but
+ * This page has already been accounted in the global stats but
* not in the memcg stats. So, subtract from the global and use
* the interface which adds to both global and memcg stats.
*/
- size = folio_size(folio);
- node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size);
- lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size);
+ size = PAGE_SIZE << order;
+ mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, -size);
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, size);
return true;
}
- slab = folio_slab(folio);
+ slab = page_slab(page);
s = slab->slab_cache;
/*