summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 458e58e37480..79b71ee47f63 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -6742,12 +6742,12 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
}
EXPORT_SYMBOL(kmem_cache_free);
-static void free_large_kmalloc(struct folio *folio, void *object)
+static void free_large_kmalloc(struct page *page, void *object)
{
- unsigned int order = folio_order(folio);
+ unsigned int order = compound_order(page);
- if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) {
- dump_page(&folio->page, "Not a kmalloc allocation");
+ if (WARN_ON_ONCE(!PageLargeKmalloc(page))) {
+ dump_page(page, "Not a kmalloc allocation");
return;
}
@@ -6758,10 +6758,10 @@ static void free_large_kmalloc(struct folio *folio, void *object)
kasan_kfree_large(object);
kmsan_kfree_large(object);
- lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
- __folio_clear_large_kmalloc(folio);
- free_frozen_pages(&folio->page, order);
+ __ClearPageLargeKmalloc(page);
+ free_frozen_pages(page, order);
}
/*
@@ -6789,7 +6789,7 @@ void kvfree_rcu_cb(struct rcu_head *head)
* consider folio order
*/
obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
- free_large_kmalloc(folio, obj);
+ free_large_kmalloc(&folio->page, obj);
return;
}
@@ -6829,7 +6829,7 @@ void kfree(const void *object)
folio = virt_to_folio(object);
if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, (void *)object);
+ free_large_kmalloc(&folio->page, (void *)object);
return;
}
@@ -7253,7 +7253,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!folio_test_slab(folio))) {
- free_large_kmalloc(folio, object);
+ free_large_kmalloc(&folio->page, object);
df->slab = NULL;
return size;
}