summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2025-11-13 00:09:21 +0000
committerVlastimil Babka <vbabka@suse.cz>2025-11-13 11:01:08 +0100
commitf262cfd75d52eb285d696d0c7357dc853d7bc7ea (patch)
tree72ea0de95d9b1e5bdcaf4a70a33b4c6ba8cab1b1 /mm/slub.c
parent0bdfdd6a05aa51fa66bae15af79dba977eeaffe9 (diff)
slab: Remove folio references from kvfree_rcu_cb()
Remove conversions from folio to page and folio to slab. This is preparation for separately allocated struct slab from struct page. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Link: https://patch.msgid.link/20251113000932.1589073-8-willy@infradead.org Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 79b71ee47f63..56c7ddff43fa 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -6771,7 +6771,7 @@ static void free_large_kmalloc(struct page *page, void *object)
void kvfree_rcu_cb(struct rcu_head *head)
{
void *obj = head;
- struct folio *folio;
+ struct page *page;
struct slab *slab;
struct kmem_cache *s;
void *slab_addr;
@@ -6782,20 +6782,20 @@ void kvfree_rcu_cb(struct rcu_head *head)
return;
}
- folio = virt_to_folio(obj);
- if (!folio_test_slab(folio)) {
+ page = virt_to_page(obj);
+ slab = page_slab(page);
+ if (!slab) {
/*
* rcu_head offset can be only less than page size so no need to
- * consider folio order
+ * consider allocation order
*/
obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
- free_large_kmalloc(&folio->page, obj);
+ free_large_kmalloc(page, obj);
return;
}
- slab = folio_slab(folio);
s = slab->slab_cache;
- slab_addr = folio_address(folio);
+ slab_addr = slab_address(slab);
if (is_kfence_address(obj)) {
obj = kfence_object_start(obj);