summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2025-11-13 00:09:20 +0000
committerVlastimil Babka <vbabka@suse.cz>2025-11-13 11:01:08 +0100
commit0bdfdd6a05aa51fa66bae15af79dba977eeaffe9 (patch)
treead21e2c9f178de932f8cc6b9019c3abe1b75f095 /mm
parentf9395bf5db450ccbf58eb737c227485df6aab26c (diff)
slab: Remove folio references from free_large_kmalloc()
There's no need to use folio APIs here; just use a page directly. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Link: https://patch.msgid.link/20251113000932.1589073-7-willy@infradead.org Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 458e58e37480..79b71ee47f63 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -6742,12 +6742,12 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
}
EXPORT_SYMBOL(kmem_cache_free);
-static void free_large_kmalloc(struct folio *folio, void *object)
+static void free_large_kmalloc(struct page *page, void *object)
{
- unsigned int order = folio_order(folio);
+ unsigned int order = compound_order(page);
- if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) {
- dump_page(&folio->page, "Not a kmalloc allocation");
+ if (WARN_ON_ONCE(!PageLargeKmalloc(page))) {
+ dump_page(page, "Not a kmalloc allocation");
return;
}
@@ -6758,10 +6758,10 @@ static void free_large_kmalloc(struct folio *folio, void *object)
kasan_kfree_large(object);
kmsan_kfree_large(object);
- lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
- __folio_clear_large_kmalloc(folio);
- free_frozen_pages(&folio->page, order);
+ __ClearPageLargeKmalloc(page);
+ free_frozen_pages(page, order);
}
/*
@@ -6789,7 +6789,7 @@ void kvfree_rcu_cb(struct rcu_head *head)
* consider folio order
*/
obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
- free_large_kmalloc(folio, obj);
+ free_large_kmalloc(&folio->page, obj);
return;
}
@@ -6829,7 +6829,7 @@ void kfree(const void *object)
folio = virt_to_folio(object);
if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, (void *)object);
+ free_large_kmalloc(&folio->page, (void *)object);
return;
}
@@ -7253,7 +7253,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, object);
+ free_large_kmalloc(&folio->page, object);
df->slab = NULL;
return size;
}