summaryrefslogtreecommitdiff
path: root/kernel/locking/mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/mutex.c')
-rw-r--r--kernel/locking/mutex.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a39ecccbd106..e2f59863a866 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -644,6 +644,8 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
goto err_early_kill;
}
+ WARN_ON(current->blocked_on);
+ current->blocked_on = lock;
set_current_state(state);
trace_contention_begin(lock, LCB_F_MUTEX);
for (;;) {
@@ -680,6 +682,12 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
first = __mutex_waiter_is_first(lock, &waiter);
+ /*
+ * As we likely have been woken up by task
+ * that has cleared our blocked_on state, re-set
+ * it to the lock we are trying to aquire.
+ */
+ current->blocked_on = lock;
set_current_state(state);
/*
* Here we order against unlock; we must either see it change
@@ -691,8 +699,11 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
if (first) {
trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
+ /* clear blocked_on as mutex_optimistic_spin may schedule() */
+ current->blocked_on = NULL;
if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
break;
+ current->blocked_on = lock;
trace_contention_begin(lock, LCB_F_MUTEX);
}
@@ -700,6 +711,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
}
raw_spin_lock_irqsave(&lock->wait_lock, flags);
acquired:
+ current->blocked_on = NULL;
__set_current_state(TASK_RUNNING);
if (ww_ctx) {
@@ -729,9 +741,11 @@ skip_wait:
return 0;
err:
+ current->blocked_on = NULL;
__set_current_state(TASK_RUNNING);
__mutex_remove_waiter(lock, &waiter);
err_early_kill:
+ WARN_ON(current->blocked_on);
trace_contention_end(lock, ret);
raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
debug_mutex_free_waiter(&waiter);
@@ -942,6 +956,14 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
next = waiter->task;
debug_mutex_wake_waiter(lock, waiter);
+ /*
+ * Unlock wakeups can be happening in parallel
+ * (when optimistic spinners steal and release
+ * the lock), so blocked_on may already be
+ * cleared here.
+ */
+ WARN_ON(next->blocked_on && next->blocked_on != lock);
+ next->blocked_on = NULL;
wake_q_add(&wake_q, next);
}