Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21#include <linux/mutex.h>
22#include <linux/ww_mutex.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/wake_q.h>
26#include <linux/sched/debug.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/interrupt.h>
30#include <linux/debug_locks.h>
31#include <linux/osq_lock.h>
32
33#define CREATE_TRACE_POINTS
34#include <trace/events/lock.h>
35
36#ifndef CONFIG_PREEMPT_RT
37#include "mutex.h"
38
39#ifdef CONFIG_DEBUG_MUTEXES
40# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
41#else
42# define MUTEX_WARN_ON(cond)
43#endif
44
45void
46__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
47{
48 atomic_long_set(&lock->owner, 0);
49 raw_spin_lock_init(&lock->wait_lock);
50 INIT_LIST_HEAD(&lock->wait_list);
51#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
52 osq_lock_init(&lock->osq);
53#endif
54
55 debug_mutex_init(lock, name, key);
56}
57EXPORT_SYMBOL(__mutex_init);
58
59static inline struct task_struct *__owner_task(unsigned long owner)
60{
61 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
62}
63
64bool mutex_is_locked(struct mutex *lock)
65{
66 return __mutex_owner(lock) != NULL;
67}
68EXPORT_SYMBOL(mutex_is_locked);
69
70static inline unsigned long __owner_flags(unsigned long owner)
71{
72 return owner & MUTEX_FLAGS;
73}
74
75/*
76 * Returns: __mutex_owner(lock) on failure or NULL on success.
77 */
78static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
79{
80 unsigned long owner, curr = (unsigned long)current;
81
82 owner = atomic_long_read(&lock->owner);
83 for (;;) { /* must loop, can race against a flag */
84 unsigned long flags = __owner_flags(owner);
85 unsigned long task = owner & ~MUTEX_FLAGS;
86
87 if (task) {
88 if (flags & MUTEX_FLAG_PICKUP) {
89 if (task != curr)
90 break;
91 flags &= ~MUTEX_FLAG_PICKUP;
92 } else if (handoff) {
93 if (flags & MUTEX_FLAG_HANDOFF)
94 break;
95 flags |= MUTEX_FLAG_HANDOFF;
96 } else {
97 break;
98 }
99 } else {
100 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
101 task = curr;
102 }
103
104 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
105 if (task == curr)
106 return NULL;
107 break;
108 }
109 }
110
111 return __owner_task(owner);
112}
113
114/*
115 * Trylock or set HANDOFF
116 */
117static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
118{
119 return !__mutex_trylock_common(lock, handoff);
120}
121
122/*
123 * Actual trylock that will work on any unlocked state.
124 */
125static inline bool __mutex_trylock(struct mutex *lock)
126{
127 return !__mutex_trylock_common(lock, false);
128}
129
130#ifndef CONFIG_DEBUG_LOCK_ALLOC
131/*
132 * Lockdep annotations are contained to the slow paths for simplicity.
133 * There is nothing that would stop spreading the lockdep annotations outwards
134 * except more code.
135 */
136
137/*
138 * Optimistic trylock that only works in the uncontended case. Make sure to
139 * follow with a __mutex_trylock() before failing.
140 */
141static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
142{
143 unsigned long curr = (unsigned long)current;
144 unsigned long zero = 0UL;
145
146 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
147 return true;
148
149 return false;
150}
151
152static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
153{
154 unsigned long curr = (unsigned long)current;
155
156 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
157}
158#endif
159
160static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
161{
162 atomic_long_or(flag, &lock->owner);
163}
164
165static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
166{
167 atomic_long_andnot(flag, &lock->owner);
168}
169
170static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
171{
172 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
173}
174
175/*
176 * Add @waiter to a given location in the lock wait_list and set the
177 * FLAG_WAITERS flag if it's the first waiter.
178 */
179static void
180__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
181 struct list_head *list)
182{
183 debug_mutex_add_waiter(lock, waiter, current);
184
185 list_add_tail(&waiter->list, list);
186 if (__mutex_waiter_is_first(lock, waiter))
187 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
188}
189
190static void
191__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
192{
193 list_del(&waiter->list);
194 if (likely(list_empty(&lock->wait_list)))
195 __mutex_clear_flag(lock, MUTEX_FLAGS);
196
197 debug_mutex_remove_waiter(lock, waiter, current);
198}
199
200/*
201 * Give up ownership to a specific task, when @task = NULL, this is equivalent
202 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
203 * WAITERS. Provides RELEASE semantics like a regular unlock, the
204 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
205 */
206static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
207{
208 unsigned long owner = atomic_long_read(&lock->owner);
209
210 for (;;) {
211 unsigned long new;
212
213 MUTEX_WARN_ON(__owner_task(owner) != current);
214 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
215
216 new = (owner & MUTEX_FLAG_WAITERS);
217 new |= (unsigned long)task;
218 if (task)
219 new |= MUTEX_FLAG_PICKUP;
220
221 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
222 break;
223 }
224}
225
226#ifndef CONFIG_DEBUG_LOCK_ALLOC
227/*
228 * We split the mutex lock/unlock logic into separate fastpath and
229 * slowpath functions, to reduce the register pressure on the fastpath.
230 * We also put the fastpath first in the kernel image, to make sure the
231 * branch is predicted by the CPU as default-untaken.
232 */
233static void __sched __mutex_lock_slowpath(struct mutex *lock);
234
235/**
236 * mutex_lock - acquire the mutex
237 * @lock: the mutex to be acquired
238 *
239 * Lock the mutex exclusively for this task. If the mutex is not
240 * available right now, it will sleep until it can get it.
241 *
242 * The mutex must later on be released by the same task that
243 * acquired it. Recursive locking is not allowed. The task
244 * may not exit without first unlocking the mutex. Also, kernel
245 * memory where the mutex resides must not be freed with
246 * the mutex still locked. The mutex must first be initialized
247 * (or statically defined) before it can be locked. memset()-ing
248 * the mutex to 0 is not allowed.
249 *
250 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
251 * checks that will enforce the restrictions and will also do
252 * deadlock debugging)
253 *
254 * This function is similar to (but not equivalent to) down().
255 */
256void __sched mutex_lock(struct mutex *lock)
257{
258 might_sleep();
259
260 if (!__mutex_trylock_fast(lock))
261 __mutex_lock_slowpath(lock);
262}
263EXPORT_SYMBOL(mutex_lock);
264#endif
265
266#include "ww_mutex.h"
267
268#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
269
270/*
271 * Trylock variant that returns the owning task on failure.
272 */
273static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
274{
275 return __mutex_trylock_common(lock, false);
276}
277
278static inline
279bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
280 struct mutex_waiter *waiter)
281{
282 struct ww_mutex *ww;
283
284 ww = container_of(lock, struct ww_mutex, base);
285
286 /*
287 * If ww->ctx is set the contents are undefined, only
288 * by acquiring wait_lock there is a guarantee that
289 * they are not invalid when reading.
290 *
291 * As such, when deadlock detection needs to be
292 * performed the optimistic spinning cannot be done.
293 *
294 * Check this in every inner iteration because we may
295 * be racing against another thread's ww_mutex_lock.
296 */
297 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
298 return false;
299
300 /*
301 * If we aren't on the wait list yet, cancel the spin
302 * if there are waiters. We want to avoid stealing the
303 * lock from a waiter with an earlier stamp, since the
304 * other thread may already own a lock that we also
305 * need.
306 */
307 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
308 return false;
309
310 /*
311 * Similarly, stop spinning if we are no longer the
312 * first waiter.
313 */
314 if (waiter && !__mutex_waiter_is_first(lock, waiter))
315 return false;
316
317 return true;
318}
319
320/*
321 * Look out! "owner" is an entirely speculative pointer access and not
322 * reliable.
323 *
324 * "noinline" so that this function shows up on perf profiles.
325 */
326static noinline
327bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
328 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
329{
330 bool ret = true;
331
332 lockdep_assert_preemption_disabled();
333
334 while (__mutex_owner(lock) == owner) {
335 /*
336 * Ensure we emit the owner->on_cpu, dereference _after_
337 * checking lock->owner still matches owner. And we already
338 * disabled preemption which is equal to the RCU read-side
339 * crital section in optimistic spinning code. Thus the
340 * task_strcut structure won't go away during the spinning
341 * period
342 */
343 barrier();
344
345 /*
346 * Use vcpu_is_preempted to detect lock holder preemption issue.
347 */
348 if (!owner_on_cpu(owner) || need_resched()) {
349 ret = false;
350 break;
351 }
352
353 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
354 ret = false;
355 break;
356 }
357
358 cpu_relax();
359 }
360
361 return ret;
362}
363
364/*
365 * Initial check for entering the mutex spinning loop
366 */
367static inline int mutex_can_spin_on_owner(struct mutex *lock)
368{
369 struct task_struct *owner;
370 int retval = 1;
371
372 lockdep_assert_preemption_disabled();
373
374 if (need_resched())
375 return 0;
376
377 /*
378 * We already disabled preemption which is equal to the RCU read-side
379 * crital section in optimistic spinning code. Thus the task_strcut
380 * structure won't go away during the spinning period.
381 */
382 owner = __mutex_owner(lock);
383 if (owner)
384 retval = owner_on_cpu(owner);
385
386 /*
387 * If lock->owner is not set, the mutex has been released. Return true
388 * such that we'll trylock in the spin path, which is a faster option
389 * than the blocking slow path.
390 */
391 return retval;
392}
393
394/*
395 * Optimistic spinning.
396 *
397 * We try to spin for acquisition when we find that the lock owner
398 * is currently running on a (different) CPU and while we don't
399 * need to reschedule. The rationale is that if the lock owner is
400 * running, it is likely to release the lock soon.
401 *
402 * The mutex spinners are queued up using MCS lock so that only one
403 * spinner can compete for the mutex. However, if mutex spinning isn't
404 * going to happen, there is no point in going through the lock/unlock
405 * overhead.
406 *
407 * Returns true when the lock was taken, otherwise false, indicating
408 * that we need to jump to the slowpath and sleep.
409 *
410 * The waiter flag is set to true if the spinner is a waiter in the wait
411 * queue. The waiter-spinner will spin on the lock directly and concurrently
412 * with the spinner at the head of the OSQ, if present, until the owner is
413 * changed to itself.
414 */
415static __always_inline bool
416mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
417 struct mutex_waiter *waiter)
418{
419 if (!waiter) {
420 /*
421 * The purpose of the mutex_can_spin_on_owner() function is
422 * to eliminate the overhead of osq_lock() and osq_unlock()
423 * in case spinning isn't possible. As a waiter-spinner
424 * is not going to take OSQ lock anyway, there is no need
425 * to call mutex_can_spin_on_owner().
426 */
427 if (!mutex_can_spin_on_owner(lock))
428 goto fail;
429
430 /*
431 * In order to avoid a stampede of mutex spinners trying to
432 * acquire the mutex all at once, the spinners need to take a
433 * MCS (queued) lock first before spinning on the owner field.
434 */
435 if (!osq_lock(&lock->osq))
436 goto fail;
437 }
438
439 for (;;) {
440 struct task_struct *owner;
441
442 /* Try to acquire the mutex... */
443 owner = __mutex_trylock_or_owner(lock);
444 if (!owner)
445 break;
446
447 /*
448 * There's an owner, wait for it to either
449 * release the lock or go to sleep.
450 */
451 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
452 goto fail_unlock;
453
454 /*
455 * The cpu_relax() call is a compiler barrier which forces
456 * everything in this loop to be re-loaded. We don't need
457 * memory barriers as we'll eventually observe the right
458 * values at the cost of a few extra spins.
459 */
460 cpu_relax();
461 }
462
463 if (!waiter)
464 osq_unlock(&lock->osq);
465
466 return true;
467
468
469fail_unlock:
470 if (!waiter)
471 osq_unlock(&lock->osq);
472
473fail:
474 /*
475 * If we fell out of the spin path because of need_resched(),
476 * reschedule now, before we try-lock the mutex. This avoids getting
477 * scheduled out right after we obtained the mutex.
478 */
479 if (need_resched()) {
480 /*
481 * We _should_ have TASK_RUNNING here, but just in case
482 * we do not, make it so, otherwise we might get stuck.
483 */
484 __set_current_state(TASK_RUNNING);
485 schedule_preempt_disabled();
486 }
487
488 return false;
489}
490#else
491static __always_inline bool
492mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
493 struct mutex_waiter *waiter)
494{
495 return false;
496}
497#endif
498
499static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
500
501/**
502 * mutex_unlock - release the mutex
503 * @lock: the mutex to be released
504 *
505 * Unlock a mutex that has been locked by this task previously.
506 *
507 * This function must not be used in interrupt context. Unlocking
508 * of a not locked mutex is not allowed.
509 *
510 * The caller must ensure that the mutex stays alive until this function has
511 * returned - mutex_unlock() can NOT directly be used to release an object such
512 * that another concurrent task can free it.
513 * Mutexes are different from spinlocks & refcounts in this aspect.
514 *
515 * This function is similar to (but not equivalent to) up().
516 */
517void __sched mutex_unlock(struct mutex *lock)
518{
519#ifndef CONFIG_DEBUG_LOCK_ALLOC
520 if (__mutex_unlock_fast(lock))
521 return;
522#endif
523 __mutex_unlock_slowpath(lock, _RET_IP_);
524}
525EXPORT_SYMBOL(mutex_unlock);
526
527/**
528 * ww_mutex_unlock - release the w/w mutex
529 * @lock: the mutex to be released
530 *
531 * Unlock a mutex that has been locked by this task previously with any of the
532 * ww_mutex_lock* functions (with or without an acquire context). It is
533 * forbidden to release the locks after releasing the acquire context.
534 *
535 * This function must not be used in interrupt context. Unlocking
536 * of a unlocked mutex is not allowed.
537 */
538void __sched ww_mutex_unlock(struct ww_mutex *lock)
539{
540 __ww_mutex_unlock(lock);
541 mutex_unlock(&lock->base);
542}
543EXPORT_SYMBOL(ww_mutex_unlock);
544
545/*
546 * Lock a mutex (possibly interruptible), slowpath:
547 */
548static __always_inline int __sched
549__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
550 struct lockdep_map *nest_lock, unsigned long ip,
551 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
552{
553 DEFINE_WAKE_Q(wake_q);
554 struct mutex_waiter waiter;
555 struct ww_mutex *ww;
556 unsigned long flags;
557 int ret;
558
559 if (!use_ww_ctx)
560 ww_ctx = NULL;
561
562 might_sleep();
563
564 MUTEX_WARN_ON(lock->magic != lock);
565
566 ww = container_of(lock, struct ww_mutex, base);
567 if (ww_ctx) {
568 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
569 return -EALREADY;
570
571 /*
572 * Reset the wounded flag after a kill. No other process can
573 * race and wound us here since they can't have a valid owner
574 * pointer if we don't have any locks held.
575 */
576 if (ww_ctx->acquired == 0)
577 ww_ctx->wounded = 0;
578
579#ifdef CONFIG_DEBUG_LOCK_ALLOC
580 nest_lock = &ww_ctx->dep_map;
581#endif
582 }
583
584 preempt_disable();
585 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
586
587 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
588 if (__mutex_trylock(lock) ||
589 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
590 /* got the lock, yay! */
591 lock_acquired(&lock->dep_map, ip);
592 if (ww_ctx)
593 ww_mutex_set_context_fastpath(ww, ww_ctx);
594 trace_contention_end(lock, 0);
595 preempt_enable();
596 return 0;
597 }
598
599 raw_spin_lock_irqsave(&lock->wait_lock, flags);
600 /*
601 * After waiting to acquire the wait_lock, try again.
602 */
603 if (__mutex_trylock(lock)) {
604 if (ww_ctx)
605 __ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
606
607 goto skip_wait;
608 }
609
610 debug_mutex_lock_common(lock, &waiter);
611 waiter.task = current;
612 if (use_ww_ctx)
613 waiter.ww_ctx = ww_ctx;
614
615 lock_contended(&lock->dep_map, ip);
616
617 if (!use_ww_ctx) {
618 /* add waiting tasks to the end of the waitqueue (FIFO): */
619 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
620 } else {
621 /*
622 * Add in stamp order, waking up waiters that must kill
623 * themselves.
624 */
625 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx, &wake_q);
626 if (ret)
627 goto err_early_kill;
628 }
629
630 set_current_state(state);
631 trace_contention_begin(lock, LCB_F_MUTEX);
632 for (;;) {
633 bool first;
634
635 /*
636 * Once we hold wait_lock, we're serialized against
637 * mutex_unlock() handing the lock off to us, do a trylock
638 * before testing the error conditions to make sure we pick up
639 * the handoff.
640 */
641 if (__mutex_trylock(lock))
642 goto acquired;
643
644 /*
645 * Check for signals and kill conditions while holding
646 * wait_lock. This ensures the lock cancellation is ordered
647 * against mutex_unlock() and wake-ups do not go missing.
648 */
649 if (signal_pending_state(state, current)) {
650 ret = -EINTR;
651 goto err;
652 }
653
654 if (ww_ctx) {
655 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
656 if (ret)
657 goto err;
658 }
659
660 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
661 /* Make sure we do wakeups before calling schedule */
662 wake_up_q(&wake_q);
663 wake_q_init(&wake_q);
664
665 schedule_preempt_disabled();
666
667 first = __mutex_waiter_is_first(lock, &waiter);
668
669 set_current_state(state);
670 /*
671 * Here we order against unlock; we must either see it change
672 * state back to RUNNING and fall through the next schedule(),
673 * or we must see its unlock and acquire.
674 */
675 if (__mutex_trylock_or_handoff(lock, first))
676 break;
677
678 if (first) {
679 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
680 if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
681 break;
682 trace_contention_begin(lock, LCB_F_MUTEX);
683 }
684
685 raw_spin_lock_irqsave(&lock->wait_lock, flags);
686 }
687 raw_spin_lock_irqsave(&lock->wait_lock, flags);
688acquired:
689 __set_current_state(TASK_RUNNING);
690
691 if (ww_ctx) {
692 /*
693 * Wound-Wait; we stole the lock (!first_waiter), check the
694 * waiters as anyone might want to wound us.
695 */
696 if (!ww_ctx->is_wait_die &&
697 !__mutex_waiter_is_first(lock, &waiter))
698 __ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
699 }
700
701 __mutex_remove_waiter(lock, &waiter);
702
703 debug_mutex_free_waiter(&waiter);
704
705skip_wait:
706 /* got the lock - cleanup and rejoice! */
707 lock_acquired(&lock->dep_map, ip);
708 trace_contention_end(lock, 0);
709
710 if (ww_ctx)
711 ww_mutex_lock_acquired(ww, ww_ctx);
712
713 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
714 wake_up_q(&wake_q);
715 preempt_enable();
716 return 0;
717
718err:
719 __set_current_state(TASK_RUNNING);
720 __mutex_remove_waiter(lock, &waiter);
721err_early_kill:
722 trace_contention_end(lock, ret);
723 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
724 debug_mutex_free_waiter(&waiter);
725 mutex_release(&lock->dep_map, ip);
726 wake_up_q(&wake_q);
727 preempt_enable();
728 return ret;
729}
730
731static int __sched
732__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
733 struct lockdep_map *nest_lock, unsigned long ip)
734{
735 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
736}
737
738static int __sched
739__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
740 unsigned long ip, struct ww_acquire_ctx *ww_ctx)
741{
742 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
743}
744
745/**
746 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
747 * @ww: mutex to lock
748 * @ww_ctx: optional w/w acquire context
749 *
750 * Trylocks a mutex with the optional acquire context; no deadlock detection is
751 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
752 *
753 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
754 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
755 *
756 * A mutex acquired with this function must be released with ww_mutex_unlock.
757 */
758int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
759{
760 if (!ww_ctx)
761 return mutex_trylock(&ww->base);
762
763 MUTEX_WARN_ON(ww->base.magic != &ww->base);
764
765 /*
766 * Reset the wounded flag after a kill. No other process can
767 * race and wound us here, since they can't have a valid owner
768 * pointer if we don't have any locks held.
769 */
770 if (ww_ctx->acquired == 0)
771 ww_ctx->wounded = 0;
772
773 if (__mutex_trylock(&ww->base)) {
774 ww_mutex_set_context_fastpath(ww, ww_ctx);
775 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
776 return 1;
777 }
778
779 return 0;
780}
781EXPORT_SYMBOL(ww_mutex_trylock);
782
783#ifdef CONFIG_DEBUG_LOCK_ALLOC
784void __sched
785mutex_lock_nested(struct mutex *lock, unsigned int subclass)
786{
787 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
788}
789
790EXPORT_SYMBOL_GPL(mutex_lock_nested);
791
792void __sched
793_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
794{
795 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
796}
797EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
798
799int __sched
800mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
801{
802 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
803}
804EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
805
806int __sched
807mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
808{
809 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
810}
811EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
812
813void __sched
814mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
815{
816 int token;
817
818 might_sleep();
819
820 token = io_schedule_prepare();
821 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
822 subclass, NULL, _RET_IP_, NULL, 0);
823 io_schedule_finish(token);
824}
825EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
826
827static inline int
828ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
829{
830#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
831 unsigned tmp;
832
833 if (ctx->deadlock_inject_countdown-- == 0) {
834 tmp = ctx->deadlock_inject_interval;
835 if (tmp > UINT_MAX/4)
836 tmp = UINT_MAX;
837 else
838 tmp = tmp*2 + tmp + tmp/2;
839
840 ctx->deadlock_inject_interval = tmp;
841 ctx->deadlock_inject_countdown = tmp;
842 ctx->contending_lock = lock;
843
844 ww_mutex_unlock(lock);
845
846 return -EDEADLK;
847 }
848#endif
849
850 return 0;
851}
852
853int __sched
854ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
855{
856 int ret;
857
858 might_sleep();
859 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
860 0, _RET_IP_, ctx);
861 if (!ret && ctx && ctx->acquired > 1)
862 return ww_mutex_deadlock_injection(lock, ctx);
863
864 return ret;
865}
866EXPORT_SYMBOL_GPL(ww_mutex_lock);
867
868int __sched
869ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
870{
871 int ret;
872
873 might_sleep();
874 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
875 0, _RET_IP_, ctx);
876
877 if (!ret && ctx && ctx->acquired > 1)
878 return ww_mutex_deadlock_injection(lock, ctx);
879
880 return ret;
881}
882EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
883
884#endif
885
886/*
887 * Release the lock, slowpath:
888 */
889static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
890{
891 struct task_struct *next = NULL;
892 DEFINE_WAKE_Q(wake_q);
893 unsigned long owner;
894 unsigned long flags;
895
896 mutex_release(&lock->dep_map, ip);
897
898 /*
899 * Release the lock before (potentially) taking the spinlock such that
900 * other contenders can get on with things ASAP.
901 *
902 * Except when HANDOFF, in that case we must not clear the owner field,
903 * but instead set it to the top waiter.
904 */
905 owner = atomic_long_read(&lock->owner);
906 for (;;) {
907 MUTEX_WARN_ON(__owner_task(owner) != current);
908 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
909
910 if (owner & MUTEX_FLAG_HANDOFF)
911 break;
912
913 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
914 if (owner & MUTEX_FLAG_WAITERS)
915 break;
916
917 return;
918 }
919 }
920
921 raw_spin_lock_irqsave(&lock->wait_lock, flags);
922 debug_mutex_unlock(lock);
923 if (!list_empty(&lock->wait_list)) {
924 /* get the first entry from the wait-list: */
925 struct mutex_waiter *waiter =
926 list_first_entry(&lock->wait_list,
927 struct mutex_waiter, list);
928
929 next = waiter->task;
930
931 debug_mutex_wake_waiter(lock, waiter);
932 wake_q_add(&wake_q, next);
933 }
934
935 if (owner & MUTEX_FLAG_HANDOFF)
936 __mutex_handoff(lock, next);
937
938 preempt_disable();
939 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
940 wake_up_q(&wake_q);
941 preempt_enable();
942}
943
944#ifndef CONFIG_DEBUG_LOCK_ALLOC
945/*
946 * Here come the less common (and hence less performance-critical) APIs:
947 * mutex_lock_interruptible() and mutex_trylock().
948 */
949static noinline int __sched
950__mutex_lock_killable_slowpath(struct mutex *lock);
951
952static noinline int __sched
953__mutex_lock_interruptible_slowpath(struct mutex *lock);
954
955/**
956 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
957 * @lock: The mutex to be acquired.
958 *
959 * Lock the mutex like mutex_lock(). If a signal is delivered while the
960 * process is sleeping, this function will return without acquiring the
961 * mutex.
962 *
963 * Context: Process context.
964 * Return: 0 if the lock was successfully acquired or %-EINTR if a
965 * signal arrived.
966 */
967int __sched mutex_lock_interruptible(struct mutex *lock)
968{
969 might_sleep();
970
971 if (__mutex_trylock_fast(lock))
972 return 0;
973
974 return __mutex_lock_interruptible_slowpath(lock);
975}
976
977EXPORT_SYMBOL(mutex_lock_interruptible);
978
979/**
980 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
981 * @lock: The mutex to be acquired.
982 *
983 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
984 * the current process is delivered while the process is sleeping, this
985 * function will return without acquiring the mutex.
986 *
987 * Context: Process context.
988 * Return: 0 if the lock was successfully acquired or %-EINTR if a
989 * fatal signal arrived.
990 */
991int __sched mutex_lock_killable(struct mutex *lock)
992{
993 might_sleep();
994
995 if (__mutex_trylock_fast(lock))
996 return 0;
997
998 return __mutex_lock_killable_slowpath(lock);
999}
1000EXPORT_SYMBOL(mutex_lock_killable);
1001
1002/**
1003 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1004 * @lock: The mutex to be acquired.
1005 *
1006 * Lock the mutex like mutex_lock(). While the task is waiting for this
1007 * mutex, it will be accounted as being in the IO wait state by the
1008 * scheduler.
1009 *
1010 * Context: Process context.
1011 */
1012void __sched mutex_lock_io(struct mutex *lock)
1013{
1014 int token;
1015
1016 token = io_schedule_prepare();
1017 mutex_lock(lock);
1018 io_schedule_finish(token);
1019}
1020EXPORT_SYMBOL_GPL(mutex_lock_io);
1021
1022static noinline void __sched
1023__mutex_lock_slowpath(struct mutex *lock)
1024{
1025 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1026}
1027
1028static noinline int __sched
1029__mutex_lock_killable_slowpath(struct mutex *lock)
1030{
1031 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1032}
1033
1034static noinline int __sched
1035__mutex_lock_interruptible_slowpath(struct mutex *lock)
1036{
1037 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1038}
1039
1040static noinline int __sched
1041__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1042{
1043 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1044 _RET_IP_, ctx);
1045}
1046
1047static noinline int __sched
1048__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1049 struct ww_acquire_ctx *ctx)
1050{
1051 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1052 _RET_IP_, ctx);
1053}
1054
1055#endif
1056
1057/**
1058 * mutex_trylock - try to acquire the mutex, without waiting
1059 * @lock: the mutex to be acquired
1060 *
1061 * Try to acquire the mutex atomically. Returns 1 if the mutex
1062 * has been acquired successfully, and 0 on contention.
1063 *
1064 * NOTE: this function follows the spin_trylock() convention, so
1065 * it is negated from the down_trylock() return values! Be careful
1066 * about this when converting semaphore users to mutexes.
1067 *
1068 * This function must not be used in interrupt context. The
1069 * mutex must be released by the same task that acquired it.
1070 */
1071int __sched mutex_trylock(struct mutex *lock)
1072{
1073 bool locked;
1074
1075 MUTEX_WARN_ON(lock->magic != lock);
1076
1077 locked = __mutex_trylock(lock);
1078 if (locked)
1079 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1080
1081 return locked;
1082}
1083EXPORT_SYMBOL(mutex_trylock);
1084
1085#ifndef CONFIG_DEBUG_LOCK_ALLOC
1086int __sched
1087ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1088{
1089 might_sleep();
1090
1091 if (__mutex_trylock_fast(&lock->base)) {
1092 if (ctx)
1093 ww_mutex_set_context_fastpath(lock, ctx);
1094 return 0;
1095 }
1096
1097 return __ww_mutex_lock_slowpath(lock, ctx);
1098}
1099EXPORT_SYMBOL(ww_mutex_lock);
1100
1101int __sched
1102ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1103{
1104 might_sleep();
1105
1106 if (__mutex_trylock_fast(&lock->base)) {
1107 if (ctx)
1108 ww_mutex_set_context_fastpath(lock, ctx);
1109 return 0;
1110 }
1111
1112 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1113}
1114EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1115
1116#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1117#endif /* !CONFIG_PREEMPT_RT */
1118
1119EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
1120EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
1121
1122/**
1123 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1124 * @cnt: the atomic which we are to dec
1125 * @lock: the mutex to return holding if we dec to 0
1126 *
1127 * return true and hold lock if we dec to 0, return false otherwise
1128 */
1129int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1130{
1131 /* dec if we can't possibly hit 0 */
1132 if (atomic_add_unless(cnt, -1, 1))
1133 return 0;
1134 /* we might hit 0, so take the lock */
1135 mutex_lock(lock);
1136 if (!atomic_dec_and_test(cnt)) {
1137 /* when we actually did the dec, we didn't hit 0 */
1138 mutex_unlock(lock);
1139 return 0;
1140 }
1141 /* we hit 0, and we hold the lock */
1142 return 1;
1143}
1144EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21#include <linux/mutex.h>
22#include <linux/ww_mutex.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/wake_q.h>
26#include <linux/sched/debug.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/interrupt.h>
30#include <linux/debug_locks.h>
31#include <linux/osq_lock.h>
32
33#ifdef CONFIG_DEBUG_MUTEXES
34# include "mutex-debug.h"
35#else
36# include "mutex.h"
37#endif
38
39void
40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41{
42 atomic_long_set(&lock->owner, 0);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 osq_lock_init(&lock->osq);
47#endif
48
49 debug_mutex_init(lock, name, key);
50}
51EXPORT_SYMBOL(__mutex_init);
52
53/*
54 * @owner: contains: 'struct task_struct *' to the current lock owner,
55 * NULL means not owned. Since task_struct pointers are aligned at
56 * at least L1_CACHE_BYTES, we have low bits to store extra state.
57 *
58 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
59 * Bit1 indicates unlock needs to hand the lock to the top-waiter
60 * Bit2 indicates handoff has been done and we're waiting for pickup.
61 */
62#define MUTEX_FLAG_WAITERS 0x01
63#define MUTEX_FLAG_HANDOFF 0x02
64#define MUTEX_FLAG_PICKUP 0x04
65
66#define MUTEX_FLAGS 0x07
67
68/*
69 * Internal helper function; C doesn't allow us to hide it :/
70 *
71 * DO NOT USE (outside of mutex code).
72 */
73static inline struct task_struct *__mutex_owner(struct mutex *lock)
74{
75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
76}
77
78static inline struct task_struct *__owner_task(unsigned long owner)
79{
80 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
81}
82
83bool mutex_is_locked(struct mutex *lock)
84{
85 return __mutex_owner(lock) != NULL;
86}
87EXPORT_SYMBOL(mutex_is_locked);
88
89static inline unsigned long __owner_flags(unsigned long owner)
90{
91 return owner & MUTEX_FLAGS;
92}
93
94/*
95 * Trylock variant that returns the owning task on failure.
96 */
97static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
98{
99 unsigned long owner, curr = (unsigned long)current;
100
101 owner = atomic_long_read(&lock->owner);
102 for (;;) { /* must loop, can race against a flag */
103 unsigned long old, flags = __owner_flags(owner);
104 unsigned long task = owner & ~MUTEX_FLAGS;
105
106 if (task) {
107 if (likely(task != curr))
108 break;
109
110 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
111 break;
112
113 flags &= ~MUTEX_FLAG_PICKUP;
114 } else {
115#ifdef CONFIG_DEBUG_MUTEXES
116 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
117#endif
118 }
119
120 /*
121 * We set the HANDOFF bit, we must make sure it doesn't live
122 * past the point where we acquire it. This would be possible
123 * if we (accidentally) set the bit on an unlocked mutex.
124 */
125 flags &= ~MUTEX_FLAG_HANDOFF;
126
127 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
128 if (old == owner)
129 return NULL;
130
131 owner = old;
132 }
133
134 return __owner_task(owner);
135}
136
137/*
138 * Actual trylock that will work on any unlocked state.
139 */
140static inline bool __mutex_trylock(struct mutex *lock)
141{
142 return !__mutex_trylock_or_owner(lock);
143}
144
145#ifndef CONFIG_DEBUG_LOCK_ALLOC
146/*
147 * Lockdep annotations are contained to the slow paths for simplicity.
148 * There is nothing that would stop spreading the lockdep annotations outwards
149 * except more code.
150 */
151
152/*
153 * Optimistic trylock that only works in the uncontended case. Make sure to
154 * follow with a __mutex_trylock() before failing.
155 */
156static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
157{
158 unsigned long curr = (unsigned long)current;
159 unsigned long zero = 0UL;
160
161 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
162 return true;
163
164 return false;
165}
166
167static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
168{
169 unsigned long curr = (unsigned long)current;
170
171 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
172 return true;
173
174 return false;
175}
176#endif
177
178static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
179{
180 atomic_long_or(flag, &lock->owner);
181}
182
183static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
184{
185 atomic_long_andnot(flag, &lock->owner);
186}
187
188static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
189{
190 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
191}
192
193/*
194 * Add @waiter to a given location in the lock wait_list and set the
195 * FLAG_WAITERS flag if it's the first waiter.
196 */
197static void
198__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
199 struct list_head *list)
200{
201 debug_mutex_add_waiter(lock, waiter, current);
202
203 list_add_tail(&waiter->list, list);
204 if (__mutex_waiter_is_first(lock, waiter))
205 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
206}
207
208static void
209__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
210{
211 list_del(&waiter->list);
212 if (likely(list_empty(&lock->wait_list)))
213 __mutex_clear_flag(lock, MUTEX_FLAGS);
214
215 debug_mutex_remove_waiter(lock, waiter, current);
216}
217
218/*
219 * Give up ownership to a specific task, when @task = NULL, this is equivalent
220 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
221 * WAITERS. Provides RELEASE semantics like a regular unlock, the
222 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
223 */
224static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
225{
226 unsigned long owner = atomic_long_read(&lock->owner);
227
228 for (;;) {
229 unsigned long old, new;
230
231#ifdef CONFIG_DEBUG_MUTEXES
232 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
233 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
234#endif
235
236 new = (owner & MUTEX_FLAG_WAITERS);
237 new |= (unsigned long)task;
238 if (task)
239 new |= MUTEX_FLAG_PICKUP;
240
241 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
242 if (old == owner)
243 break;
244
245 owner = old;
246 }
247}
248
249#ifndef CONFIG_DEBUG_LOCK_ALLOC
250/*
251 * We split the mutex lock/unlock logic into separate fastpath and
252 * slowpath functions, to reduce the register pressure on the fastpath.
253 * We also put the fastpath first in the kernel image, to make sure the
254 * branch is predicted by the CPU as default-untaken.
255 */
256static void __sched __mutex_lock_slowpath(struct mutex *lock);
257
258/**
259 * mutex_lock - acquire the mutex
260 * @lock: the mutex to be acquired
261 *
262 * Lock the mutex exclusively for this task. If the mutex is not
263 * available right now, it will sleep until it can get it.
264 *
265 * The mutex must later on be released by the same task that
266 * acquired it. Recursive locking is not allowed. The task
267 * may not exit without first unlocking the mutex. Also, kernel
268 * memory where the mutex resides must not be freed with
269 * the mutex still locked. The mutex must first be initialized
270 * (or statically defined) before it can be locked. memset()-ing
271 * the mutex to 0 is not allowed.
272 *
273 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
274 * checks that will enforce the restrictions and will also do
275 * deadlock debugging)
276 *
277 * This function is similar to (but not equivalent to) down().
278 */
279void __sched mutex_lock(struct mutex *lock)
280{
281 might_sleep();
282
283 if (!__mutex_trylock_fast(lock))
284 __mutex_lock_slowpath(lock);
285}
286EXPORT_SYMBOL(mutex_lock);
287#endif
288
289/*
290 * Wait-Die:
291 * The newer transactions are killed when:
292 * It (the new transaction) makes a request for a lock being held
293 * by an older transaction.
294 *
295 * Wound-Wait:
296 * The newer transactions are wounded when:
297 * An older transaction makes a request for a lock being held by
298 * the newer transaction.
299 */
300
301/*
302 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
303 * it.
304 */
305static __always_inline void
306ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
307{
308#ifdef CONFIG_DEBUG_MUTEXES
309 /*
310 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
311 * but released with a normal mutex_unlock in this call.
312 *
313 * This should never happen, always use ww_mutex_unlock.
314 */
315 DEBUG_LOCKS_WARN_ON(ww->ctx);
316
317 /*
318 * Not quite done after calling ww_acquire_done() ?
319 */
320 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
321
322 if (ww_ctx->contending_lock) {
323 /*
324 * After -EDEADLK you tried to
325 * acquire a different ww_mutex? Bad!
326 */
327 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
328
329 /*
330 * You called ww_mutex_lock after receiving -EDEADLK,
331 * but 'forgot' to unlock everything else first?
332 */
333 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
334 ww_ctx->contending_lock = NULL;
335 }
336
337 /*
338 * Naughty, using a different class will lead to undefined behavior!
339 */
340 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
341#endif
342 ww_ctx->acquired++;
343 ww->ctx = ww_ctx;
344}
345
346/*
347 * Determine if context @a is 'after' context @b. IOW, @a is a younger
348 * transaction than @b and depending on algorithm either needs to wait for
349 * @b or die.
350 */
351static inline bool __sched
352__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
353{
354
355 return (signed long)(a->stamp - b->stamp) > 0;
356}
357
358/*
359 * Wait-Die; wake a younger waiter context (when locks held) such that it can
360 * die.
361 *
362 * Among waiters with context, only the first one can have other locks acquired
363 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
364 * __ww_mutex_check_kill() wake any but the earliest context.
365 */
366static bool __sched
367__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
368 struct ww_acquire_ctx *ww_ctx)
369{
370 if (!ww_ctx->is_wait_die)
371 return false;
372
373 if (waiter->ww_ctx->acquired > 0 &&
374 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
375 debug_mutex_wake_waiter(lock, waiter);
376 wake_up_process(waiter->task);
377 }
378
379 return true;
380}
381
382/*
383 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
384 *
385 * Wound the lock holder if there are waiters with older transactions than
386 * the lock holders. Even if multiple waiters may wound the lock holder,
387 * it's sufficient that only one does.
388 */
389static bool __ww_mutex_wound(struct mutex *lock,
390 struct ww_acquire_ctx *ww_ctx,
391 struct ww_acquire_ctx *hold_ctx)
392{
393 struct task_struct *owner = __mutex_owner(lock);
394
395 lockdep_assert_held(&lock->wait_lock);
396
397 /*
398 * Possible through __ww_mutex_add_waiter() when we race with
399 * ww_mutex_set_context_fastpath(). In that case we'll get here again
400 * through __ww_mutex_check_waiters().
401 */
402 if (!hold_ctx)
403 return false;
404
405 /*
406 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
407 * it cannot go away because we'll have FLAG_WAITERS set and hold
408 * wait_lock.
409 */
410 if (!owner)
411 return false;
412
413 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
414 hold_ctx->wounded = 1;
415
416 /*
417 * wake_up_process() paired with set_current_state()
418 * inserts sufficient barriers to make sure @owner either sees
419 * it's wounded in __ww_mutex_check_kill() or has a
420 * wakeup pending to re-read the wounded state.
421 */
422 if (owner != current)
423 wake_up_process(owner);
424
425 return true;
426 }
427
428 return false;
429}
430
431/*
432 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
433 * behind us on the wait-list, check if they need to die, or wound us.
434 *
435 * See __ww_mutex_add_waiter() for the list-order construction; basically the
436 * list is ordered by stamp, smallest (oldest) first.
437 *
438 * This relies on never mixing wait-die/wound-wait on the same wait-list;
439 * which is currently ensured by that being a ww_class property.
440 *
441 * The current task must not be on the wait list.
442 */
443static void __sched
444__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
445{
446 struct mutex_waiter *cur;
447
448 lockdep_assert_held(&lock->wait_lock);
449
450 list_for_each_entry(cur, &lock->wait_list, list) {
451 if (!cur->ww_ctx)
452 continue;
453
454 if (__ww_mutex_die(lock, cur, ww_ctx) ||
455 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
456 break;
457 }
458}
459
460/*
461 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
462 * and wake up any waiters so they can recheck.
463 */
464static __always_inline void
465ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
466{
467 ww_mutex_lock_acquired(lock, ctx);
468
469 /*
470 * The lock->ctx update should be visible on all cores before
471 * the WAITERS check is done, otherwise contended waiters might be
472 * missed. The contended waiters will either see ww_ctx == NULL
473 * and keep spinning, or it will acquire wait_lock, add itself
474 * to waiter list and sleep.
475 */
476 smp_mb(); /* See comments above and below. */
477
478 /*
479 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
480 * MB MB
481 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
482 *
483 * The memory barrier above pairs with the memory barrier in
484 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
485 * and/or !empty list.
486 */
487 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
488 return;
489
490 /*
491 * Uh oh, we raced in fastpath, check if any of the waiters need to
492 * die or wound us.
493 */
494 spin_lock(&lock->base.wait_lock);
495 __ww_mutex_check_waiters(&lock->base, ctx);
496 spin_unlock(&lock->base.wait_lock);
497}
498
499#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
500
501static inline
502bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
503 struct mutex_waiter *waiter)
504{
505 struct ww_mutex *ww;
506
507 ww = container_of(lock, struct ww_mutex, base);
508
509 /*
510 * If ww->ctx is set the contents are undefined, only
511 * by acquiring wait_lock there is a guarantee that
512 * they are not invalid when reading.
513 *
514 * As such, when deadlock detection needs to be
515 * performed the optimistic spinning cannot be done.
516 *
517 * Check this in every inner iteration because we may
518 * be racing against another thread's ww_mutex_lock.
519 */
520 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
521 return false;
522
523 /*
524 * If we aren't on the wait list yet, cancel the spin
525 * if there are waiters. We want to avoid stealing the
526 * lock from a waiter with an earlier stamp, since the
527 * other thread may already own a lock that we also
528 * need.
529 */
530 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
531 return false;
532
533 /*
534 * Similarly, stop spinning if we are no longer the
535 * first waiter.
536 */
537 if (waiter && !__mutex_waiter_is_first(lock, waiter))
538 return false;
539
540 return true;
541}
542
543/*
544 * Look out! "owner" is an entirely speculative pointer access and not
545 * reliable.
546 *
547 * "noinline" so that this function shows up on perf profiles.
548 */
549static noinline
550bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
551 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
552{
553 bool ret = true;
554
555 rcu_read_lock();
556 while (__mutex_owner(lock) == owner) {
557 /*
558 * Ensure we emit the owner->on_cpu, dereference _after_
559 * checking lock->owner still matches owner. If that fails,
560 * owner might point to freed memory. If it still matches,
561 * the rcu_read_lock() ensures the memory stays valid.
562 */
563 barrier();
564
565 /*
566 * Use vcpu_is_preempted to detect lock holder preemption issue.
567 */
568 if (!owner->on_cpu || need_resched() ||
569 vcpu_is_preempted(task_cpu(owner))) {
570 ret = false;
571 break;
572 }
573
574 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
575 ret = false;
576 break;
577 }
578
579 cpu_relax();
580 }
581 rcu_read_unlock();
582
583 return ret;
584}
585
586/*
587 * Initial check for entering the mutex spinning loop
588 */
589static inline int mutex_can_spin_on_owner(struct mutex *lock)
590{
591 struct task_struct *owner;
592 int retval = 1;
593
594 if (need_resched())
595 return 0;
596
597 rcu_read_lock();
598 owner = __mutex_owner(lock);
599
600 /*
601 * As lock holder preemption issue, we both skip spinning if task is not
602 * on cpu or its cpu is preempted
603 */
604 if (owner)
605 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
606 rcu_read_unlock();
607
608 /*
609 * If lock->owner is not set, the mutex has been released. Return true
610 * such that we'll trylock in the spin path, which is a faster option
611 * than the blocking slow path.
612 */
613 return retval;
614}
615
616/*
617 * Optimistic spinning.
618 *
619 * We try to spin for acquisition when we find that the lock owner
620 * is currently running on a (different) CPU and while we don't
621 * need to reschedule. The rationale is that if the lock owner is
622 * running, it is likely to release the lock soon.
623 *
624 * The mutex spinners are queued up using MCS lock so that only one
625 * spinner can compete for the mutex. However, if mutex spinning isn't
626 * going to happen, there is no point in going through the lock/unlock
627 * overhead.
628 *
629 * Returns true when the lock was taken, otherwise false, indicating
630 * that we need to jump to the slowpath and sleep.
631 *
632 * The waiter flag is set to true if the spinner is a waiter in the wait
633 * queue. The waiter-spinner will spin on the lock directly and concurrently
634 * with the spinner at the head of the OSQ, if present, until the owner is
635 * changed to itself.
636 */
637static __always_inline bool
638mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
639 struct mutex_waiter *waiter)
640{
641 if (!waiter) {
642 /*
643 * The purpose of the mutex_can_spin_on_owner() function is
644 * to eliminate the overhead of osq_lock() and osq_unlock()
645 * in case spinning isn't possible. As a waiter-spinner
646 * is not going to take OSQ lock anyway, there is no need
647 * to call mutex_can_spin_on_owner().
648 */
649 if (!mutex_can_spin_on_owner(lock))
650 goto fail;
651
652 /*
653 * In order to avoid a stampede of mutex spinners trying to
654 * acquire the mutex all at once, the spinners need to take a
655 * MCS (queued) lock first before spinning on the owner field.
656 */
657 if (!osq_lock(&lock->osq))
658 goto fail;
659 }
660
661 for (;;) {
662 struct task_struct *owner;
663
664 /* Try to acquire the mutex... */
665 owner = __mutex_trylock_or_owner(lock);
666 if (!owner)
667 break;
668
669 /*
670 * There's an owner, wait for it to either
671 * release the lock or go to sleep.
672 */
673 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
674 goto fail_unlock;
675
676 /*
677 * The cpu_relax() call is a compiler barrier which forces
678 * everything in this loop to be re-loaded. We don't need
679 * memory barriers as we'll eventually observe the right
680 * values at the cost of a few extra spins.
681 */
682 cpu_relax();
683 }
684
685 if (!waiter)
686 osq_unlock(&lock->osq);
687
688 return true;
689
690
691fail_unlock:
692 if (!waiter)
693 osq_unlock(&lock->osq);
694
695fail:
696 /*
697 * If we fell out of the spin path because of need_resched(),
698 * reschedule now, before we try-lock the mutex. This avoids getting
699 * scheduled out right after we obtained the mutex.
700 */
701 if (need_resched()) {
702 /*
703 * We _should_ have TASK_RUNNING here, but just in case
704 * we do not, make it so, otherwise we might get stuck.
705 */
706 __set_current_state(TASK_RUNNING);
707 schedule_preempt_disabled();
708 }
709
710 return false;
711}
712#else
713static __always_inline bool
714mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
715 struct mutex_waiter *waiter)
716{
717 return false;
718}
719#endif
720
721static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
722
723/**
724 * mutex_unlock - release the mutex
725 * @lock: the mutex to be released
726 *
727 * Unlock a mutex that has been locked by this task previously.
728 *
729 * This function must not be used in interrupt context. Unlocking
730 * of a not locked mutex is not allowed.
731 *
732 * This function is similar to (but not equivalent to) up().
733 */
734void __sched mutex_unlock(struct mutex *lock)
735{
736#ifndef CONFIG_DEBUG_LOCK_ALLOC
737 if (__mutex_unlock_fast(lock))
738 return;
739#endif
740 __mutex_unlock_slowpath(lock, _RET_IP_);
741}
742EXPORT_SYMBOL(mutex_unlock);
743
744/**
745 * ww_mutex_unlock - release the w/w mutex
746 * @lock: the mutex to be released
747 *
748 * Unlock a mutex that has been locked by this task previously with any of the
749 * ww_mutex_lock* functions (with or without an acquire context). It is
750 * forbidden to release the locks after releasing the acquire context.
751 *
752 * This function must not be used in interrupt context. Unlocking
753 * of a unlocked mutex is not allowed.
754 */
755void __sched ww_mutex_unlock(struct ww_mutex *lock)
756{
757 /*
758 * The unlocking fastpath is the 0->1 transition from 'locked'
759 * into 'unlocked' state:
760 */
761 if (lock->ctx) {
762#ifdef CONFIG_DEBUG_MUTEXES
763 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
764#endif
765 if (lock->ctx->acquired > 0)
766 lock->ctx->acquired--;
767 lock->ctx = NULL;
768 }
769
770 mutex_unlock(&lock->base);
771}
772EXPORT_SYMBOL(ww_mutex_unlock);
773
774
775static __always_inline int __sched
776__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
777{
778 if (ww_ctx->acquired > 0) {
779#ifdef CONFIG_DEBUG_MUTEXES
780 struct ww_mutex *ww;
781
782 ww = container_of(lock, struct ww_mutex, base);
783 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
784 ww_ctx->contending_lock = ww;
785#endif
786 return -EDEADLK;
787 }
788
789 return 0;
790}
791
792
793/*
794 * Check the wound condition for the current lock acquire.
795 *
796 * Wound-Wait: If we're wounded, kill ourself.
797 *
798 * Wait-Die: If we're trying to acquire a lock already held by an older
799 * context, kill ourselves.
800 *
801 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
802 * look at waiters before us in the wait-list.
803 */
804static inline int __sched
805__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
806 struct ww_acquire_ctx *ctx)
807{
808 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
809 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
810 struct mutex_waiter *cur;
811
812 if (ctx->acquired == 0)
813 return 0;
814
815 if (!ctx->is_wait_die) {
816 if (ctx->wounded)
817 return __ww_mutex_kill(lock, ctx);
818
819 return 0;
820 }
821
822 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
823 return __ww_mutex_kill(lock, ctx);
824
825 /*
826 * If there is a waiter in front of us that has a context, then its
827 * stamp is earlier than ours and we must kill ourself.
828 */
829 cur = waiter;
830 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
831 if (!cur->ww_ctx)
832 continue;
833
834 return __ww_mutex_kill(lock, ctx);
835 }
836
837 return 0;
838}
839
840/*
841 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
842 * first. Such that older contexts are preferred to acquire the lock over
843 * younger contexts.
844 *
845 * Waiters without context are interspersed in FIFO order.
846 *
847 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
848 * older contexts already waiting) to avoid unnecessary waiting and for
849 * Wound-Wait ensure we wound the owning context when it is younger.
850 */
851static inline int __sched
852__ww_mutex_add_waiter(struct mutex_waiter *waiter,
853 struct mutex *lock,
854 struct ww_acquire_ctx *ww_ctx)
855{
856 struct mutex_waiter *cur;
857 struct list_head *pos;
858 bool is_wait_die;
859
860 if (!ww_ctx) {
861 __mutex_add_waiter(lock, waiter, &lock->wait_list);
862 return 0;
863 }
864
865 is_wait_die = ww_ctx->is_wait_die;
866
867 /*
868 * Add the waiter before the first waiter with a higher stamp.
869 * Waiters without a context are skipped to avoid starving
870 * them. Wait-Die waiters may die here. Wound-Wait waiters
871 * never die here, but they are sorted in stamp order and
872 * may wound the lock holder.
873 */
874 pos = &lock->wait_list;
875 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
876 if (!cur->ww_ctx)
877 continue;
878
879 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
880 /*
881 * Wait-Die: if we find an older context waiting, there
882 * is no point in queueing behind it, as we'd have to
883 * die the moment it would acquire the lock.
884 */
885 if (is_wait_die) {
886 int ret = __ww_mutex_kill(lock, ww_ctx);
887
888 if (ret)
889 return ret;
890 }
891
892 break;
893 }
894
895 pos = &cur->list;
896
897 /* Wait-Die: ensure younger waiters die. */
898 __ww_mutex_die(lock, cur, ww_ctx);
899 }
900
901 __mutex_add_waiter(lock, waiter, pos);
902
903 /*
904 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
905 * wound that such that we might proceed.
906 */
907 if (!is_wait_die) {
908 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
909
910 /*
911 * See ww_mutex_set_context_fastpath(). Orders setting
912 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
913 * such that either we or the fastpath will wound @ww->ctx.
914 */
915 smp_mb();
916 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
917 }
918
919 return 0;
920}
921
922/*
923 * Lock a mutex (possibly interruptible), slowpath:
924 */
925static __always_inline int __sched
926__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
927 struct lockdep_map *nest_lock, unsigned long ip,
928 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
929{
930 struct mutex_waiter waiter;
931 struct ww_mutex *ww;
932 int ret;
933
934 if (!use_ww_ctx)
935 ww_ctx = NULL;
936
937 might_sleep();
938
939#ifdef CONFIG_DEBUG_MUTEXES
940 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
941#endif
942
943 ww = container_of(lock, struct ww_mutex, base);
944 if (ww_ctx) {
945 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
946 return -EALREADY;
947
948 /*
949 * Reset the wounded flag after a kill. No other process can
950 * race and wound us here since they can't have a valid owner
951 * pointer if we don't have any locks held.
952 */
953 if (ww_ctx->acquired == 0)
954 ww_ctx->wounded = 0;
955 }
956
957 preempt_disable();
958 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
959
960 if (__mutex_trylock(lock) ||
961 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
962 /* got the lock, yay! */
963 lock_acquired(&lock->dep_map, ip);
964 if (ww_ctx)
965 ww_mutex_set_context_fastpath(ww, ww_ctx);
966 preempt_enable();
967 return 0;
968 }
969
970 spin_lock(&lock->wait_lock);
971 /*
972 * After waiting to acquire the wait_lock, try again.
973 */
974 if (__mutex_trylock(lock)) {
975 if (ww_ctx)
976 __ww_mutex_check_waiters(lock, ww_ctx);
977
978 goto skip_wait;
979 }
980
981 debug_mutex_lock_common(lock, &waiter);
982
983 lock_contended(&lock->dep_map, ip);
984
985 if (!use_ww_ctx) {
986 /* add waiting tasks to the end of the waitqueue (FIFO): */
987 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
988
989
990#ifdef CONFIG_DEBUG_MUTEXES
991 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
992#endif
993 } else {
994 /*
995 * Add in stamp order, waking up waiters that must kill
996 * themselves.
997 */
998 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
999 if (ret)
1000 goto err_early_kill;
1001
1002 waiter.ww_ctx = ww_ctx;
1003 }
1004
1005 waiter.task = current;
1006
1007 set_current_state(state);
1008 for (;;) {
1009 bool first;
1010
1011 /*
1012 * Once we hold wait_lock, we're serialized against
1013 * mutex_unlock() handing the lock off to us, do a trylock
1014 * before testing the error conditions to make sure we pick up
1015 * the handoff.
1016 */
1017 if (__mutex_trylock(lock))
1018 goto acquired;
1019
1020 /*
1021 * Check for signals and kill conditions while holding
1022 * wait_lock. This ensures the lock cancellation is ordered
1023 * against mutex_unlock() and wake-ups do not go missing.
1024 */
1025 if (signal_pending_state(state, current)) {
1026 ret = -EINTR;
1027 goto err;
1028 }
1029
1030 if (ww_ctx) {
1031 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1032 if (ret)
1033 goto err;
1034 }
1035
1036 spin_unlock(&lock->wait_lock);
1037 schedule_preempt_disabled();
1038
1039 first = __mutex_waiter_is_first(lock, &waiter);
1040 if (first)
1041 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1042
1043 set_current_state(state);
1044 /*
1045 * Here we order against unlock; we must either see it change
1046 * state back to RUNNING and fall through the next schedule(),
1047 * or we must see its unlock and acquire.
1048 */
1049 if (__mutex_trylock(lock) ||
1050 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
1051 break;
1052
1053 spin_lock(&lock->wait_lock);
1054 }
1055 spin_lock(&lock->wait_lock);
1056acquired:
1057 __set_current_state(TASK_RUNNING);
1058
1059 if (ww_ctx) {
1060 /*
1061 * Wound-Wait; we stole the lock (!first_waiter), check the
1062 * waiters as anyone might want to wound us.
1063 */
1064 if (!ww_ctx->is_wait_die &&
1065 !__mutex_waiter_is_first(lock, &waiter))
1066 __ww_mutex_check_waiters(lock, ww_ctx);
1067 }
1068
1069 __mutex_remove_waiter(lock, &waiter);
1070
1071 debug_mutex_free_waiter(&waiter);
1072
1073skip_wait:
1074 /* got the lock - cleanup and rejoice! */
1075 lock_acquired(&lock->dep_map, ip);
1076
1077 if (ww_ctx)
1078 ww_mutex_lock_acquired(ww, ww_ctx);
1079
1080 spin_unlock(&lock->wait_lock);
1081 preempt_enable();
1082 return 0;
1083
1084err:
1085 __set_current_state(TASK_RUNNING);
1086 __mutex_remove_waiter(lock, &waiter);
1087err_early_kill:
1088 spin_unlock(&lock->wait_lock);
1089 debug_mutex_free_waiter(&waiter);
1090 mutex_release(&lock->dep_map, ip);
1091 preempt_enable();
1092 return ret;
1093}
1094
1095static int __sched
1096__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
1097 struct lockdep_map *nest_lock, unsigned long ip)
1098{
1099 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1100}
1101
1102static int __sched
1103__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
1104 struct lockdep_map *nest_lock, unsigned long ip,
1105 struct ww_acquire_ctx *ww_ctx)
1106{
1107 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1108}
1109
1110#ifdef CONFIG_DEBUG_LOCK_ALLOC
1111void __sched
1112mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1113{
1114 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1115}
1116
1117EXPORT_SYMBOL_GPL(mutex_lock_nested);
1118
1119void __sched
1120_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1121{
1122 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1123}
1124EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1125
1126int __sched
1127mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1128{
1129 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1130}
1131EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1132
1133int __sched
1134mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1135{
1136 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1137}
1138EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1139
1140void __sched
1141mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1142{
1143 int token;
1144
1145 might_sleep();
1146
1147 token = io_schedule_prepare();
1148 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1149 subclass, NULL, _RET_IP_, NULL, 0);
1150 io_schedule_finish(token);
1151}
1152EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1153
1154static inline int
1155ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1156{
1157#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1158 unsigned tmp;
1159
1160 if (ctx->deadlock_inject_countdown-- == 0) {
1161 tmp = ctx->deadlock_inject_interval;
1162 if (tmp > UINT_MAX/4)
1163 tmp = UINT_MAX;
1164 else
1165 tmp = tmp*2 + tmp + tmp/2;
1166
1167 ctx->deadlock_inject_interval = tmp;
1168 ctx->deadlock_inject_countdown = tmp;
1169 ctx->contending_lock = lock;
1170
1171 ww_mutex_unlock(lock);
1172
1173 return -EDEADLK;
1174 }
1175#endif
1176
1177 return 0;
1178}
1179
1180int __sched
1181ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1182{
1183 int ret;
1184
1185 might_sleep();
1186 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1187 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1188 ctx);
1189 if (!ret && ctx && ctx->acquired > 1)
1190 return ww_mutex_deadlock_injection(lock, ctx);
1191
1192 return ret;
1193}
1194EXPORT_SYMBOL_GPL(ww_mutex_lock);
1195
1196int __sched
1197ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1198{
1199 int ret;
1200
1201 might_sleep();
1202 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1203 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1204 ctx);
1205
1206 if (!ret && ctx && ctx->acquired > 1)
1207 return ww_mutex_deadlock_injection(lock, ctx);
1208
1209 return ret;
1210}
1211EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1212
1213#endif
1214
1215/*
1216 * Release the lock, slowpath:
1217 */
1218static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1219{
1220 struct task_struct *next = NULL;
1221 DEFINE_WAKE_Q(wake_q);
1222 unsigned long owner;
1223
1224 mutex_release(&lock->dep_map, ip);
1225
1226 /*
1227 * Release the lock before (potentially) taking the spinlock such that
1228 * other contenders can get on with things ASAP.
1229 *
1230 * Except when HANDOFF, in that case we must not clear the owner field,
1231 * but instead set it to the top waiter.
1232 */
1233 owner = atomic_long_read(&lock->owner);
1234 for (;;) {
1235 unsigned long old;
1236
1237#ifdef CONFIG_DEBUG_MUTEXES
1238 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1239 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1240#endif
1241
1242 if (owner & MUTEX_FLAG_HANDOFF)
1243 break;
1244
1245 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1246 __owner_flags(owner));
1247 if (old == owner) {
1248 if (owner & MUTEX_FLAG_WAITERS)
1249 break;
1250
1251 return;
1252 }
1253
1254 owner = old;
1255 }
1256
1257 spin_lock(&lock->wait_lock);
1258 debug_mutex_unlock(lock);
1259 if (!list_empty(&lock->wait_list)) {
1260 /* get the first entry from the wait-list: */
1261 struct mutex_waiter *waiter =
1262 list_first_entry(&lock->wait_list,
1263 struct mutex_waiter, list);
1264
1265 next = waiter->task;
1266
1267 debug_mutex_wake_waiter(lock, waiter);
1268 wake_q_add(&wake_q, next);
1269 }
1270
1271 if (owner & MUTEX_FLAG_HANDOFF)
1272 __mutex_handoff(lock, next);
1273
1274 spin_unlock(&lock->wait_lock);
1275
1276 wake_up_q(&wake_q);
1277}
1278
1279#ifndef CONFIG_DEBUG_LOCK_ALLOC
1280/*
1281 * Here come the less common (and hence less performance-critical) APIs:
1282 * mutex_lock_interruptible() and mutex_trylock().
1283 */
1284static noinline int __sched
1285__mutex_lock_killable_slowpath(struct mutex *lock);
1286
1287static noinline int __sched
1288__mutex_lock_interruptible_slowpath(struct mutex *lock);
1289
1290/**
1291 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1292 * @lock: The mutex to be acquired.
1293 *
1294 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1295 * process is sleeping, this function will return without acquiring the
1296 * mutex.
1297 *
1298 * Context: Process context.
1299 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1300 * signal arrived.
1301 */
1302int __sched mutex_lock_interruptible(struct mutex *lock)
1303{
1304 might_sleep();
1305
1306 if (__mutex_trylock_fast(lock))
1307 return 0;
1308
1309 return __mutex_lock_interruptible_slowpath(lock);
1310}
1311
1312EXPORT_SYMBOL(mutex_lock_interruptible);
1313
1314/**
1315 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1316 * @lock: The mutex to be acquired.
1317 *
1318 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1319 * the current process is delivered while the process is sleeping, this
1320 * function will return without acquiring the mutex.
1321 *
1322 * Context: Process context.
1323 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1324 * fatal signal arrived.
1325 */
1326int __sched mutex_lock_killable(struct mutex *lock)
1327{
1328 might_sleep();
1329
1330 if (__mutex_trylock_fast(lock))
1331 return 0;
1332
1333 return __mutex_lock_killable_slowpath(lock);
1334}
1335EXPORT_SYMBOL(mutex_lock_killable);
1336
1337/**
1338 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1339 * @lock: The mutex to be acquired.
1340 *
1341 * Lock the mutex like mutex_lock(). While the task is waiting for this
1342 * mutex, it will be accounted as being in the IO wait state by the
1343 * scheduler.
1344 *
1345 * Context: Process context.
1346 */
1347void __sched mutex_lock_io(struct mutex *lock)
1348{
1349 int token;
1350
1351 token = io_schedule_prepare();
1352 mutex_lock(lock);
1353 io_schedule_finish(token);
1354}
1355EXPORT_SYMBOL_GPL(mutex_lock_io);
1356
1357static noinline void __sched
1358__mutex_lock_slowpath(struct mutex *lock)
1359{
1360 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1361}
1362
1363static noinline int __sched
1364__mutex_lock_killable_slowpath(struct mutex *lock)
1365{
1366 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1367}
1368
1369static noinline int __sched
1370__mutex_lock_interruptible_slowpath(struct mutex *lock)
1371{
1372 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1373}
1374
1375static noinline int __sched
1376__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1377{
1378 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1379 _RET_IP_, ctx);
1380}
1381
1382static noinline int __sched
1383__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1384 struct ww_acquire_ctx *ctx)
1385{
1386 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1387 _RET_IP_, ctx);
1388}
1389
1390#endif
1391
1392/**
1393 * mutex_trylock - try to acquire the mutex, without waiting
1394 * @lock: the mutex to be acquired
1395 *
1396 * Try to acquire the mutex atomically. Returns 1 if the mutex
1397 * has been acquired successfully, and 0 on contention.
1398 *
1399 * NOTE: this function follows the spin_trylock() convention, so
1400 * it is negated from the down_trylock() return values! Be careful
1401 * about this when converting semaphore users to mutexes.
1402 *
1403 * This function must not be used in interrupt context. The
1404 * mutex must be released by the same task that acquired it.
1405 */
1406int __sched mutex_trylock(struct mutex *lock)
1407{
1408 bool locked;
1409
1410#ifdef CONFIG_DEBUG_MUTEXES
1411 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1412#endif
1413
1414 locked = __mutex_trylock(lock);
1415 if (locked)
1416 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1417
1418 return locked;
1419}
1420EXPORT_SYMBOL(mutex_trylock);
1421
1422#ifndef CONFIG_DEBUG_LOCK_ALLOC
1423int __sched
1424ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1425{
1426 might_sleep();
1427
1428 if (__mutex_trylock_fast(&lock->base)) {
1429 if (ctx)
1430 ww_mutex_set_context_fastpath(lock, ctx);
1431 return 0;
1432 }
1433
1434 return __ww_mutex_lock_slowpath(lock, ctx);
1435}
1436EXPORT_SYMBOL(ww_mutex_lock);
1437
1438int __sched
1439ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1440{
1441 might_sleep();
1442
1443 if (__mutex_trylock_fast(&lock->base)) {
1444 if (ctx)
1445 ww_mutex_set_context_fastpath(lock, ctx);
1446 return 0;
1447 }
1448
1449 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1450}
1451EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1452
1453#endif
1454
1455/**
1456 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1457 * @cnt: the atomic which we are to dec
1458 * @lock: the mutex to return holding if we dec to 0
1459 *
1460 * return true and hold lock if we dec to 0, return false otherwise
1461 */
1462int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1463{
1464 /* dec if we can't possibly hit 0 */
1465 if (atomic_add_unless(cnt, -1, 1))
1466 return 0;
1467 /* we might hit 0, so take the lock */
1468 mutex_lock(lock);
1469 if (!atomic_dec_and_test(cnt)) {
1470 /* when we actually did the dec, we didn't hit 0 */
1471 mutex_unlock(lock);
1472 return 0;
1473 }
1474 /* we hit 0, and we hold the lock */
1475 return 1;
1476}
1477EXPORT_SYMBOL(atomic_dec_and_mutex_lock);