summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2025-11-25 14:35:33 +0100
committerVlastimil Babka <vbabka@suse.cz>2025-11-25 14:35:33 +0100
commited80cc758b784a1ed297f9130625de217a904ba5 (patch)
treeb23adc4646526bb611f81234526c6872e4330e10 /mm
parente5d7764e1372925c27fc574c4552122a8c3c9272 (diff)
parentc33196c9429a1db5bc6cded27b6286f341ad6be0 (diff)
Merge branch 'slab/for-6.19/freelist_aba_t_cleanups' into slab/for-next
Merge series "slab: cmpxchg cleanups enabled by -fms-extensions" From the cover letter [1]: After learning about -fms-extensions being enabled for 6.19, I realized there is some cleanup potential in slub code by extending the definition and usage of freelist_aba_t, as it can now become an unnamed member of struct slab. This series performs the cleanup, with no functional changes intended. Additionally we turn freelist_aba_t to struct freelist_counters as it doesn't meet any criteria for being a typedef, per Documentation/process/coding-style.rst Based on the tag kbuild-ms-extensions-6.19 from git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linuxV Link: https://lore.kernel.org/all/20251107-slab-fms-cleanup-v1-0-650b1491ac9e@suse.cz/#t [1]
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.h52
-rw-r--r--mm/slub.c155
2 files changed, 93 insertions, 114 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 8430e24bba3b..f730e012553c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -40,13 +40,29 @@ typedef u64 freelist_full_t;
* Freelist pointer and counter to cmpxchg together, avoids the typical ABA
* problems with cmpxchg of just a pointer.
*/
-typedef union {
- struct {
- void *freelist;
- unsigned long counter;
+struct freelist_counters {
+ union {
+ struct {
+ void *freelist;
+ union {
+ unsigned long counters;
+ struct {
+ unsigned inuse:16;
+ unsigned objects:15;
+ /*
+ * If slab debugging is enabled then the
+ * frozen bit can be reused to indicate
+ * that the slab was corrupted
+ */
+ unsigned frozen:1;
+ };
+ };
+ };
+#ifdef system_has_freelist_aba
+ freelist_full_t freelist_counters;
+#endif
};
- freelist_full_t full;
-} freelist_aba_t;
+};
/* Reuses the bits in struct page */
struct slab {
@@ -69,27 +85,7 @@ struct slab {
#endif
};
/* Double-word boundary */
- union {
- struct {
- void *freelist; /* first free object */
- union {
- unsigned long counters;
- struct {
- unsigned inuse:16;
- unsigned objects:15;
- /*
- * If slab debugging is enabled then the
- * frozen bit can be reused to indicate
- * that the slab was corrupted
- */
- unsigned frozen:1;
- };
- };
- };
-#ifdef system_has_freelist_aba
- freelist_aba_t freelist_counter;
-#endif
- };
+ struct freelist_counters;
};
struct rcu_head rcu_head;
};
@@ -114,7 +110,7 @@ SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
#undef SLAB_MATCH
static_assert(sizeof(struct slab) <= sizeof(struct page));
#if defined(system_has_freelist_aba)
-static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
+static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters)));
#endif
/**
diff --git a/mm/slub.c b/mm/slub.c
index 4de428d9e76d..785e25a14999 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -410,18 +410,22 @@ enum stat_item {
NR_SLUB_STAT_ITEMS
};
-/*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
-struct kmem_cache_cpu {
+struct freelist_tid {
union {
struct {
- void **freelist; /* Pointer to next available object */
+ void *freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
};
- freelist_aba_t freelist_tid;
+ freelist_full_t freelist_tid;
};
+};
+
+/*
+ * When changing the layout, make sure freelist and tid are still compatible
+ * with this_cpu_cmpxchg_double() alignment requirements.
+ */
+struct kmem_cache_cpu {
+ struct freelist_tid;
struct slab *slab; /* The slab from which we are allocating */
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct slab *partial; /* Partially allocated slabs */
@@ -756,32 +760,29 @@ static __always_inline void slab_unlock(struct slab *slab)
}
static inline bool
-__update_freelist_fast(struct slab *slab,
- void *freelist_old, unsigned long counters_old,
- void *freelist_new, unsigned long counters_new)
+__update_freelist_fast(struct slab *slab, struct freelist_counters *old,
+ struct freelist_counters *new)
{
#ifdef system_has_freelist_aba
- freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
- freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };
-
- return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
+ return try_cmpxchg_freelist(&slab->freelist_counters,
+ &old->freelist_counters,
+ new->freelist_counters);
#else
return false;
#endif
}
static inline bool
-__update_freelist_slow(struct slab *slab,
- void *freelist_old, unsigned long counters_old,
- void *freelist_new, unsigned long counters_new)
+__update_freelist_slow(struct slab *slab, struct freelist_counters *old,
+ struct freelist_counters *new)
{
bool ret = false;
slab_lock(slab);
- if (slab->freelist == freelist_old &&
- slab->counters == counters_old) {
- slab->freelist = freelist_new;
- slab->counters = counters_new;
+ if (slab->freelist == old->freelist &&
+ slab->counters == old->counters) {
+ slab->freelist = new->freelist;
+ slab->counters = new->counters;
ret = true;
}
slab_unlock(slab);
@@ -797,22 +798,18 @@ __update_freelist_slow(struct slab *slab,
* interrupt the operation.
*/
static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
- void *freelist_old, unsigned long counters_old,
- void *freelist_new, unsigned long counters_new,
- const char *n)
+ struct freelist_counters *old, struct freelist_counters *new, const char *n)
{
bool ret;
if (USE_LOCKLESS_FAST_PATH())
lockdep_assert_irqs_disabled();
- if (s->flags & __CMPXCHG_DOUBLE) {
- ret = __update_freelist_fast(slab, freelist_old, counters_old,
- freelist_new, counters_new);
- } else {
- ret = __update_freelist_slow(slab, freelist_old, counters_old,
- freelist_new, counters_new);
- }
+ if (s->flags & __CMPXCHG_DOUBLE)
+ ret = __update_freelist_fast(slab, old, new);
+ else
+ ret = __update_freelist_slow(slab, old, new);
+
if (likely(ret))
return true;
@@ -827,21 +824,17 @@ static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *sla
}
static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
- void *freelist_old, unsigned long counters_old,
- void *freelist_new, unsigned long counters_new,
- const char *n)
+ struct freelist_counters *old, struct freelist_counters *new, const char *n)
{
bool ret;
if (s->flags & __CMPXCHG_DOUBLE) {
- ret = __update_freelist_fast(slab, freelist_old, counters_old,
- freelist_new, counters_new);
+ ret = __update_freelist_fast(slab, old, new);
} else {
unsigned long flags;
local_irq_save(flags);
- ret = __update_freelist_slow(slab, freelist_old, counters_old,
- freelist_new, counters_new);
+ ret = __update_freelist_slow(slab, old, new);
local_irq_restore(flags);
}
if (likely(ret))
@@ -3740,8 +3733,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
void *nextfree, *freelist_iter, *freelist_tail;
int tail = DEACTIVATE_TO_HEAD;
unsigned long flags = 0;
- struct slab new;
- struct slab old;
+ struct freelist_counters old, new;
if (READ_ONCE(slab->freelist)) {
stat(s, DEACTIVATE_REMOTE_FREES);
@@ -3790,10 +3782,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
} else {
new.freelist = old.freelist;
}
- } while (!slab_update_freelist(s, slab,
- old.freelist, old.counters,
- new.freelist, new.counters,
- "unfreezing slab"));
+ } while (!slab_update_freelist(s, slab, &old, &new, "unfreezing slab"));
/*
* Stage three: Manipulate the slab list based on the updated state.
@@ -4381,11 +4370,11 @@ __update_cpu_freelist_fast(struct kmem_cache *s,
void *freelist_old, void *freelist_new,
unsigned long tid)
{
- freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
- freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
+ struct freelist_tid old = { .freelist = freelist_old, .tid = tid };
+ struct freelist_tid new = { .freelist = freelist_new, .tid = next_tid(tid) };
- return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
- &old.full, new.full);
+ return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid,
+ &old.freelist_tid, new.freelist_tid);
}
/*
@@ -4398,27 +4387,24 @@ __update_cpu_freelist_fast(struct kmem_cache *s,
*/
static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
{
- struct slab new;
- unsigned long counters;
- void *freelist;
+ struct freelist_counters old, new;
lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
do {
- freelist = slab->freelist;
- counters = slab->counters;
+ old.freelist = slab->freelist;
+ old.counters = slab->counters;
+
+ new.freelist = NULL;
+ new.counters = old.counters;
- new.counters = counters;
+ new.inuse = old.objects;
+ new.frozen = old.freelist != NULL;
- new.inuse = slab->objects;
- new.frozen = freelist != NULL;
- } while (!__slab_update_freelist(s, slab,
- freelist, counters,
- NULL, new.counters,
- "get_freelist"));
+ } while (!__slab_update_freelist(s, slab, &old, &new, "get_freelist"));
- return freelist;
+ return old.freelist;
}
/*
@@ -4426,26 +4412,22 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
*/
static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab)
{
- struct slab new;
- unsigned long counters;
- void *freelist;
+ struct freelist_counters old, new;
do {
- freelist = slab->freelist;
- counters = slab->counters;
+ old.freelist = slab->freelist;
+ old.counters = slab->counters;
- new.counters = counters;
+ new.freelist = NULL;
+ new.counters = old.counters;
VM_BUG_ON(new.frozen);
- new.inuse = slab->objects;
+ new.inuse = old.objects;
new.frozen = 1;
- } while (!slab_update_freelist(s, slab,
- freelist, counters,
- NULL, new.counters,
- "freeze_slab"));
+ } while (!slab_update_freelist(s, slab, &old, &new, "freeze_slab"));
- return freelist;
+ return old.freelist;
}
/*
@@ -5877,10 +5859,8 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
unsigned long addr)
{
- void *old_head;
bool was_frozen, was_full;
- struct slab new;
- unsigned long counters;
+ struct freelist_counters old, new;
struct kmem_cache_node *n = NULL;
unsigned long flags;
bool on_node_partial;
@@ -5904,13 +5884,19 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
spin_unlock_irqrestore(&n->list_lock, flags);
n = NULL;
}
- old_head = slab->freelist;
- counters = slab->counters;
- set_freepointer(s, tail, old_head);
- new.counters = counters;
- was_frozen = !!new.frozen;
- was_full = (old_head == NULL);
+
+ old.freelist = slab->freelist;
+ old.counters = slab->counters;
+
+ was_full = (old.freelist == NULL);
+ was_frozen = old.frozen;
+
+ set_freepointer(s, tail, old.freelist);
+
+ new.freelist = head;
+ new.counters = old.counters;
new.inuse -= cnt;
+
/*
* Might need to be taken off (due to becoming empty) or added
* to (due to not being full anymore) the partial list.
@@ -5939,10 +5925,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
}
}
- } while (!slab_update_freelist(s, slab,
- old_head, counters,
- head, new.counters,
- "__slab_free"));
+ } while (!slab_update_freelist(s, slab, &old, &new, "__slab_free"));
if (likely(!n)) {