summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a7c0662f89c6..112222eacdcb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3045,24 +3045,24 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
struct kmem_cache_order_objects oo,
bool allow_spin)
{
- struct folio *folio;
+ struct page *page;
struct slab *slab;
unsigned int order = oo_order(oo);
if (unlikely(!allow_spin))
- folio = (struct folio *)alloc_frozen_pages_nolock(0/* __GFP_COMP is implied */,
+ page = alloc_frozen_pages_nolock(0/* __GFP_COMP is implied */,
node, order);
else if (node == NUMA_NO_NODE)
- folio = (struct folio *)alloc_frozen_pages(flags, order);
+ page = alloc_frozen_pages(flags, order);
else
- folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL);
+ page = __alloc_frozen_pages(flags, order, node, NULL);
- if (!folio)
+ if (!page)
return NULL;
- slab = folio_slab(folio);
- __folio_set_slab(folio);
- if (folio_is_pfmemalloc(folio))
+ __SetPageSlab(page);
+ slab = page_slab(page);
+ if (page_is_pfmemalloc(page))
slab_set_pfmemalloc(slab);
return slab;
@@ -3286,16 +3286,16 @@ static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
static void __free_slab(struct kmem_cache *s, struct slab *slab)
{
- struct folio *folio = slab_folio(slab);
- int order = folio_order(folio);
+ struct page *page = slab_page(slab);
+ int order = compound_order(page);
int pages = 1 << order;
__slab_clear_pfmemalloc(slab);
- folio->mapping = NULL;
- __folio_clear_slab(folio);
+ page->mapping = NULL;
+ __ClearPageSlab(page);
mm_account_reclaimed_pages(pages);
unaccount_slab(slab, order, s);
- free_frozen_pages(&folio->page, order);
+ free_frozen_pages(page, order);
}
static void rcu_free_slab(struct rcu_head *h)