summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2025-11-13 00:09:16 +0000
committerVlastimil Babka <vbabka@suse.cz>2025-11-13 11:01:08 +0100
commitee1ee8abc4197e21594ca29348629ccbfff4daec (patch)
tree048c8a84ca69d70b217c2b311654ddb56e0fcee9
parent2bcd3800f2da1be13b972858f63c66d035b1ec6d (diff)
slab: Remove folio references from __ksize()
In the future, we will separate slab, folio and page from each other and calling virt_to_folio() on an address allocated from slab will return NULL. Delay the conversion from struct page to struct slab until we know we're not dealing with a large kmalloc allocation. There's a minor win for large kmalloc allocations as we avoid the compound_head() hidden in virt_to_folio(). This deprecates calling ksize() on memory allocated by alloc_pages(). Today it becomes a warning and support will be removed entirely in the future. Introduce large_kmalloc_size() to abstract how we represent the size of a large kmalloc allocation. For now, this is the same as page_size(), but it will change with separately allocated memdescs. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Link: https://patch.msgid.link/20251113000932.1589073-3-willy@infradead.org Acked-by: David Hildenbrand (Red Hat) <david@kernel.org> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--mm/slab.h10
-rw-r--r--mm/slab_common.c23
3 files changed, 23 insertions, 12 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6d5e44968eab..f7a0e4af0c73 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -1064,7 +1064,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
* Serialized with zone lock.
*/
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
-FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
+PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
/**
* PageHuge - Determine if the page belongs to hugetlbfs
diff --git a/mm/slab.h b/mm/slab.h
index a64b9b2c8731..31ccf0f6d3a1 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -605,6 +605,16 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
return s->size;
}
+static inline unsigned int large_kmalloc_order(const struct page *page)
+{
+ return page[1].flags.f & 0xff;
+}
+
+static inline size_t large_kmalloc_size(const struct page *page)
+{
+ return PAGE_SIZE << large_kmalloc_order(page);
+}
+
#ifdef CONFIG_SLUB_DEBUG
void dump_unreclaimable_slab(void);
#else
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 932d13ada36c..67ad2328276e 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -997,26 +997,27 @@ void __init create_kmalloc_caches(void)
*/
size_t __ksize(const void *object)
{
- struct folio *folio;
+ const struct page *page;
+ const struct slab *slab;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
- folio = virt_to_folio(object);
+ page = virt_to_page(object);
- if (unlikely(!folio_test_slab(folio))) {
- if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
- return 0;
- if (WARN_ON(object != folio_address(folio)))
- return 0;
- return folio_size(folio);
- }
+ if (unlikely(PageLargeKmalloc(page)))
+ return large_kmalloc_size(page);
+
+ slab = page_slab(page);
+ /* Delete this after we're sure there are no users */
+ if (WARN_ON(!slab))
+ return page_size(page);
#ifdef CONFIG_SLUB_DEBUG
- skip_orig_size_check(folio_slab(folio)->slab_cache, object);
+ skip_orig_size_check(slab->slab_cache, object);
#endif
- return slab_ksize(folio_slab(folio)->slab_cache);
+ return slab_ksize(slab->slab_cache);
}
gfp_t kmalloc_fix_flags(gfp_t flags)