summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorValentin Schneider <valentin.schneider@arm.com>2025-07-12 03:33:44 +0000
committerPeter Zijlstra <peterz@infradead.org>2025-07-14 17:16:32 +0200
commita4f0b6fef4b08e9928449206390133e48ac185a7 (patch)
tree2797d4f803c8036e99df543520a6da633889a18e /kernel
parent44e4e0297c3c01987399bb9973f4d22a096a62c2 (diff)
locking/mutex: Add p->blocked_on wrappers for correctness checks
This lets us assert mutex::wait_lock is held whenever we access p->blocked_on, as well as warn us for unexpected state changes. [fix conflicts, call in more places] [jstultz: tweaked commit subject, reworked a good bit] Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Connor O'Brien <connoro@google.com> Signed-off-by: John Stultz <jstultz@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://lkml.kernel.org/r/20250712033407.2383110-4-jstultz@google.com
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/mutex-debug.c4
-rw-r--r--kernel/locking/mutex.c32
-rw-r--r--kernel/locking/ww_mutex.h8
3 files changed, 19 insertions, 25 deletions
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 758b7a6792b0..949103fd8e9b 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -54,13 +54,13 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
lockdep_assert_held(&lock->wait_lock);
/* Current thread can't be already blocked (since it's executing!) */
- DEBUG_LOCKS_WARN_ON(task->blocked_on);
+ DEBUG_LOCKS_WARN_ON(__get_task_blocked_on(task));
}
void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
- struct mutex *blocked_on = READ_ONCE(task->blocked_on);
+ struct mutex *blocked_on = __get_task_blocked_on(task);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
DEBUG_LOCKS_WARN_ON(waiter->task != task);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index e2f59863a866..80d778fedd60 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -644,8 +644,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
goto err_early_kill;
}
- WARN_ON(current->blocked_on);
- current->blocked_on = lock;
+ __set_task_blocked_on(current, lock);
set_current_state(state);
trace_contention_begin(lock, LCB_F_MUTEX);
for (;;) {
@@ -685,9 +684,9 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
/*
* As we likely have been woken up by task
* that has cleared our blocked_on state, re-set
- * it to the lock we are trying to aquire.
+ * it to the lock we are trying to acquire.
*/
- current->blocked_on = lock;
+ set_task_blocked_on(current, lock);
set_current_state(state);
/*
* Here we order against unlock; we must either see it change
@@ -699,11 +698,15 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
if (first) {
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
- /* clear blocked_on as mutex_optimistic_spin may schedule() */
- current->blocked_on = NULL;
+ /*
+ * mutex_optimistic_spin() can call schedule(), so
+ * clear blocked on so we don't become unselectable
+ * to run.
+ */
+ clear_task_blocked_on(current, lock);
if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
break;
- current->blocked_on = lock;
+ set_task_blocked_on(current, lock);
trace_contention_begin(lock, LCB_F_MUTEX);
}
@@ -711,7 +714,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
}
raw_spin_lock_irqsave(&lock->wait_lock, flags);
acquired:
- current->blocked_on = NULL;
+ __clear_task_blocked_on(current, lock);
__set_current_state(TASK_RUNNING);
if (ww_ctx) {
@@ -741,11 +744,11 @@ skip_wait:
return 0;
err:
- current->blocked_on = NULL;
+ __clear_task_blocked_on(current, lock);
__set_current_state(TASK_RUNNING);
__mutex_remove_waiter(lock, &waiter);
err_early_kill:
- WARN_ON(current->blocked_on);
+ WARN_ON(__get_task_blocked_on(current));
trace_contention_end(lock, ret);
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
debug_mutex_free_waiter(&waiter);
@@ -956,14 +959,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
next = waiter->task;
debug_mutex_wake_waiter(lock, waiter);
- /*
- * Unlock wakeups can be happening in parallel
- * (when optimistic spinners steal and release
- * the lock), so blocked_on may already be
- * cleared here.
- */
- WARN_ON(next->blocked_on && next->blocked_on != lock);
- next->blocked_on = NULL;
+ __clear_task_blocked_on(next, lock);
wake_q_add(&wake_q, next);
}
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 45fe05e51db1..086fd5487ca7 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -283,15 +283,13 @@ __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
#ifndef WW_RT
debug_mutex_wake_waiter(lock, waiter);
+#endif
/*
* When waking up the task to die, be sure to clear the
* blocked_on pointer. Otherwise we can see circular
* blocked_on relationships that can't resolve.
*/
- WARN_ON(waiter->task->blocked_on &&
- waiter->task->blocked_on != lock);
-#endif
- waiter->task->blocked_on = NULL;
+ __clear_task_blocked_on(waiter->task, lock);
wake_q_add(wake_q, waiter->task);
}
@@ -345,7 +343,7 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
* blocked_on pointer. Otherwise we can see circular
* blocked_on relationships that can't resolve.
*/
- owner->blocked_on = NULL;
+ __clear_task_blocked_on(owner, lock);
wake_q_add(wake_q, owner);
}
return true;