Loading...
1/*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
18 * Also see Documentation/locking/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
22#include <linux/sched/signal.h>
23#include <linux/sched/rt.h>
24#include <linux/sched/wake_q.h>
25#include <linux/sched/debug.h>
26#include <linux/export.h>
27#include <linux/spinlock.h>
28#include <linux/interrupt.h>
29#include <linux/debug_locks.h>
30#include <linux/osq_lock.h>
31
32#ifdef CONFIG_DEBUG_MUTEXES
33# include "mutex-debug.h"
34#else
35# include "mutex.h"
36#endif
37
38void
39__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
40{
41 atomic_long_set(&lock->owner, 0);
42 spin_lock_init(&lock->wait_lock);
43 INIT_LIST_HEAD(&lock->wait_list);
44#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
45 osq_lock_init(&lock->osq);
46#endif
47
48 debug_mutex_init(lock, name, key);
49}
50EXPORT_SYMBOL(__mutex_init);
51
52/*
53 * @owner: contains: 'struct task_struct *' to the current lock owner,
54 * NULL means not owned. Since task_struct pointers are aligned at
55 * at least L1_CACHE_BYTES, we have low bits to store extra state.
56 *
57 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
58 * Bit1 indicates unlock needs to hand the lock to the top-waiter
59 * Bit2 indicates handoff has been done and we're waiting for pickup.
60 */
61#define MUTEX_FLAG_WAITERS 0x01
62#define MUTEX_FLAG_HANDOFF 0x02
63#define MUTEX_FLAG_PICKUP 0x04
64
65#define MUTEX_FLAGS 0x07
66
67static inline struct task_struct *__owner_task(unsigned long owner)
68{
69 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
70}
71
72static inline unsigned long __owner_flags(unsigned long owner)
73{
74 return owner & MUTEX_FLAGS;
75}
76
77/*
78 * Trylock variant that retuns the owning task on failure.
79 */
80static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
81{
82 unsigned long owner, curr = (unsigned long)current;
83
84 owner = atomic_long_read(&lock->owner);
85 for (;;) { /* must loop, can race against a flag */
86 unsigned long old, flags = __owner_flags(owner);
87 unsigned long task = owner & ~MUTEX_FLAGS;
88
89 if (task) {
90 if (likely(task != curr))
91 break;
92
93 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
94 break;
95
96 flags &= ~MUTEX_FLAG_PICKUP;
97 } else {
98#ifdef CONFIG_DEBUG_MUTEXES
99 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
100#endif
101 }
102
103 /*
104 * We set the HANDOFF bit, we must make sure it doesn't live
105 * past the point where we acquire it. This would be possible
106 * if we (accidentally) set the bit on an unlocked mutex.
107 */
108 flags &= ~MUTEX_FLAG_HANDOFF;
109
110 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
111 if (old == owner)
112 return NULL;
113
114 owner = old;
115 }
116
117 return __owner_task(owner);
118}
119
120/*
121 * Actual trylock that will work on any unlocked state.
122 */
123static inline bool __mutex_trylock(struct mutex *lock)
124{
125 return !__mutex_trylock_or_owner(lock);
126}
127
128#ifndef CONFIG_DEBUG_LOCK_ALLOC
129/*
130 * Lockdep annotations are contained to the slow paths for simplicity.
131 * There is nothing that would stop spreading the lockdep annotations outwards
132 * except more code.
133 */
134
135/*
136 * Optimistic trylock that only works in the uncontended case. Make sure to
137 * follow with a __mutex_trylock() before failing.
138 */
139static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
140{
141 unsigned long curr = (unsigned long)current;
142
143 if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
144 return true;
145
146 return false;
147}
148
149static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
150{
151 unsigned long curr = (unsigned long)current;
152
153 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
154 return true;
155
156 return false;
157}
158#endif
159
160static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
161{
162 atomic_long_or(flag, &lock->owner);
163}
164
165static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
166{
167 atomic_long_andnot(flag, &lock->owner);
168}
169
170static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
171{
172 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
173}
174
175/*
176 * Give up ownership to a specific task, when @task = NULL, this is equivalent
177 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
178 * WAITERS. Provides RELEASE semantics like a regular unlock, the
179 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
180 */
181static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
182{
183 unsigned long owner = atomic_long_read(&lock->owner);
184
185 for (;;) {
186 unsigned long old, new;
187
188#ifdef CONFIG_DEBUG_MUTEXES
189 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
190 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
191#endif
192
193 new = (owner & MUTEX_FLAG_WAITERS);
194 new |= (unsigned long)task;
195 if (task)
196 new |= MUTEX_FLAG_PICKUP;
197
198 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
199 if (old == owner)
200 break;
201
202 owner = old;
203 }
204}
205
206#ifndef CONFIG_DEBUG_LOCK_ALLOC
207/*
208 * We split the mutex lock/unlock logic into separate fastpath and
209 * slowpath functions, to reduce the register pressure on the fastpath.
210 * We also put the fastpath first in the kernel image, to make sure the
211 * branch is predicted by the CPU as default-untaken.
212 */
213static void __sched __mutex_lock_slowpath(struct mutex *lock);
214
215/**
216 * mutex_lock - acquire the mutex
217 * @lock: the mutex to be acquired
218 *
219 * Lock the mutex exclusively for this task. If the mutex is not
220 * available right now, it will sleep until it can get it.
221 *
222 * The mutex must later on be released by the same task that
223 * acquired it. Recursive locking is not allowed. The task
224 * may not exit without first unlocking the mutex. Also, kernel
225 * memory where the mutex resides must not be freed with
226 * the mutex still locked. The mutex must first be initialized
227 * (or statically defined) before it can be locked. memset()-ing
228 * the mutex to 0 is not allowed.
229 *
230 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
231 * checks that will enforce the restrictions and will also do
232 * deadlock debugging)
233 *
234 * This function is similar to (but not equivalent to) down().
235 */
236void __sched mutex_lock(struct mutex *lock)
237{
238 might_sleep();
239
240 if (!__mutex_trylock_fast(lock))
241 __mutex_lock_slowpath(lock);
242}
243EXPORT_SYMBOL(mutex_lock);
244#endif
245
246static __always_inline void
247ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
248{
249#ifdef CONFIG_DEBUG_MUTEXES
250 /*
251 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
252 * but released with a normal mutex_unlock in this call.
253 *
254 * This should never happen, always use ww_mutex_unlock.
255 */
256 DEBUG_LOCKS_WARN_ON(ww->ctx);
257
258 /*
259 * Not quite done after calling ww_acquire_done() ?
260 */
261 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
262
263 if (ww_ctx->contending_lock) {
264 /*
265 * After -EDEADLK you tried to
266 * acquire a different ww_mutex? Bad!
267 */
268 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
269
270 /*
271 * You called ww_mutex_lock after receiving -EDEADLK,
272 * but 'forgot' to unlock everything else first?
273 */
274 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
275 ww_ctx->contending_lock = NULL;
276 }
277
278 /*
279 * Naughty, using a different class will lead to undefined behavior!
280 */
281 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
282#endif
283 ww_ctx->acquired++;
284}
285
286static inline bool __sched
287__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
288{
289 return a->stamp - b->stamp <= LONG_MAX &&
290 (a->stamp != b->stamp || a > b);
291}
292
293/*
294 * Wake up any waiters that may have to back off when the lock is held by the
295 * given context.
296 *
297 * Due to the invariants on the wait list, this can only affect the first
298 * waiter with a context.
299 *
300 * The current task must not be on the wait list.
301 */
302static void __sched
303__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
304{
305 struct mutex_waiter *cur;
306
307 lockdep_assert_held(&lock->wait_lock);
308
309 list_for_each_entry(cur, &lock->wait_list, list) {
310 if (!cur->ww_ctx)
311 continue;
312
313 if (cur->ww_ctx->acquired > 0 &&
314 __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
315 debug_mutex_wake_waiter(lock, cur);
316 wake_up_process(cur->task);
317 }
318
319 break;
320 }
321}
322
323/*
324 * After acquiring lock with fastpath or when we lost out in contested
325 * slowpath, set ctx and wake up any waiters so they can recheck.
326 */
327static __always_inline void
328ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
329{
330 ww_mutex_lock_acquired(lock, ctx);
331
332 lock->ctx = ctx;
333
334 /*
335 * The lock->ctx update should be visible on all cores before
336 * the atomic read is done, otherwise contended waiters might be
337 * missed. The contended waiters will either see ww_ctx == NULL
338 * and keep spinning, or it will acquire wait_lock, add itself
339 * to waiter list and sleep.
340 */
341 smp_mb(); /* ^^^ */
342
343 /*
344 * Check if lock is contended, if not there is nobody to wake up
345 */
346 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
347 return;
348
349 /*
350 * Uh oh, we raced in fastpath, wake up everyone in this case,
351 * so they can see the new lock->ctx.
352 */
353 spin_lock(&lock->base.wait_lock);
354 __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
355 spin_unlock(&lock->base.wait_lock);
356}
357
358/*
359 * After acquiring lock in the slowpath set ctx.
360 *
361 * Unlike for the fast path, the caller ensures that waiters are woken up where
362 * necessary.
363 *
364 * Callers must hold the mutex wait_lock.
365 */
366static __always_inline void
367ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
368{
369 ww_mutex_lock_acquired(lock, ctx);
370 lock->ctx = ctx;
371}
372
373#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
374
375static inline
376bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
377 struct mutex_waiter *waiter)
378{
379 struct ww_mutex *ww;
380
381 ww = container_of(lock, struct ww_mutex, base);
382
383 /*
384 * If ww->ctx is set the contents are undefined, only
385 * by acquiring wait_lock there is a guarantee that
386 * they are not invalid when reading.
387 *
388 * As such, when deadlock detection needs to be
389 * performed the optimistic spinning cannot be done.
390 *
391 * Check this in every inner iteration because we may
392 * be racing against another thread's ww_mutex_lock.
393 */
394 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
395 return false;
396
397 /*
398 * If we aren't on the wait list yet, cancel the spin
399 * if there are waiters. We want to avoid stealing the
400 * lock from a waiter with an earlier stamp, since the
401 * other thread may already own a lock that we also
402 * need.
403 */
404 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
405 return false;
406
407 /*
408 * Similarly, stop spinning if we are no longer the
409 * first waiter.
410 */
411 if (waiter && !__mutex_waiter_is_first(lock, waiter))
412 return false;
413
414 return true;
415}
416
417/*
418 * Look out! "owner" is an entirely speculative pointer access and not
419 * reliable.
420 *
421 * "noinline" so that this function shows up on perf profiles.
422 */
423static noinline
424bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
425 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
426{
427 bool ret = true;
428
429 rcu_read_lock();
430 while (__mutex_owner(lock) == owner) {
431 /*
432 * Ensure we emit the owner->on_cpu, dereference _after_
433 * checking lock->owner still matches owner. If that fails,
434 * owner might point to freed memory. If it still matches,
435 * the rcu_read_lock() ensures the memory stays valid.
436 */
437 barrier();
438
439 /*
440 * Use vcpu_is_preempted to detect lock holder preemption issue.
441 */
442 if (!owner->on_cpu || need_resched() ||
443 vcpu_is_preempted(task_cpu(owner))) {
444 ret = false;
445 break;
446 }
447
448 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
449 ret = false;
450 break;
451 }
452
453 cpu_relax();
454 }
455 rcu_read_unlock();
456
457 return ret;
458}
459
460/*
461 * Initial check for entering the mutex spinning loop
462 */
463static inline int mutex_can_spin_on_owner(struct mutex *lock)
464{
465 struct task_struct *owner;
466 int retval = 1;
467
468 if (need_resched())
469 return 0;
470
471 rcu_read_lock();
472 owner = __mutex_owner(lock);
473
474 /*
475 * As lock holder preemption issue, we both skip spinning if task is not
476 * on cpu or its cpu is preempted
477 */
478 if (owner)
479 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
480 rcu_read_unlock();
481
482 /*
483 * If lock->owner is not set, the mutex has been released. Return true
484 * such that we'll trylock in the spin path, which is a faster option
485 * than the blocking slow path.
486 */
487 return retval;
488}
489
490/*
491 * Optimistic spinning.
492 *
493 * We try to spin for acquisition when we find that the lock owner
494 * is currently running on a (different) CPU and while we don't
495 * need to reschedule. The rationale is that if the lock owner is
496 * running, it is likely to release the lock soon.
497 *
498 * The mutex spinners are queued up using MCS lock so that only one
499 * spinner can compete for the mutex. However, if mutex spinning isn't
500 * going to happen, there is no point in going through the lock/unlock
501 * overhead.
502 *
503 * Returns true when the lock was taken, otherwise false, indicating
504 * that we need to jump to the slowpath and sleep.
505 *
506 * The waiter flag is set to true if the spinner is a waiter in the wait
507 * queue. The waiter-spinner will spin on the lock directly and concurrently
508 * with the spinner at the head of the OSQ, if present, until the owner is
509 * changed to itself.
510 */
511static __always_inline bool
512mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
513 const bool use_ww_ctx, struct mutex_waiter *waiter)
514{
515 if (!waiter) {
516 /*
517 * The purpose of the mutex_can_spin_on_owner() function is
518 * to eliminate the overhead of osq_lock() and osq_unlock()
519 * in case spinning isn't possible. As a waiter-spinner
520 * is not going to take OSQ lock anyway, there is no need
521 * to call mutex_can_spin_on_owner().
522 */
523 if (!mutex_can_spin_on_owner(lock))
524 goto fail;
525
526 /*
527 * In order to avoid a stampede of mutex spinners trying to
528 * acquire the mutex all at once, the spinners need to take a
529 * MCS (queued) lock first before spinning on the owner field.
530 */
531 if (!osq_lock(&lock->osq))
532 goto fail;
533 }
534
535 for (;;) {
536 struct task_struct *owner;
537
538 /* Try to acquire the mutex... */
539 owner = __mutex_trylock_or_owner(lock);
540 if (!owner)
541 break;
542
543 /*
544 * There's an owner, wait for it to either
545 * release the lock or go to sleep.
546 */
547 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
548 goto fail_unlock;
549
550 /*
551 * The cpu_relax() call is a compiler barrier which forces
552 * everything in this loop to be re-loaded. We don't need
553 * memory barriers as we'll eventually observe the right
554 * values at the cost of a few extra spins.
555 */
556 cpu_relax();
557 }
558
559 if (!waiter)
560 osq_unlock(&lock->osq);
561
562 return true;
563
564
565fail_unlock:
566 if (!waiter)
567 osq_unlock(&lock->osq);
568
569fail:
570 /*
571 * If we fell out of the spin path because of need_resched(),
572 * reschedule now, before we try-lock the mutex. This avoids getting
573 * scheduled out right after we obtained the mutex.
574 */
575 if (need_resched()) {
576 /*
577 * We _should_ have TASK_RUNNING here, but just in case
578 * we do not, make it so, otherwise we might get stuck.
579 */
580 __set_current_state(TASK_RUNNING);
581 schedule_preempt_disabled();
582 }
583
584 return false;
585}
586#else
587static __always_inline bool
588mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
589 const bool use_ww_ctx, struct mutex_waiter *waiter)
590{
591 return false;
592}
593#endif
594
595static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
596
597/**
598 * mutex_unlock - release the mutex
599 * @lock: the mutex to be released
600 *
601 * Unlock a mutex that has been locked by this task previously.
602 *
603 * This function must not be used in interrupt context. Unlocking
604 * of a not locked mutex is not allowed.
605 *
606 * This function is similar to (but not equivalent to) up().
607 */
608void __sched mutex_unlock(struct mutex *lock)
609{
610#ifndef CONFIG_DEBUG_LOCK_ALLOC
611 if (__mutex_unlock_fast(lock))
612 return;
613#endif
614 __mutex_unlock_slowpath(lock, _RET_IP_);
615}
616EXPORT_SYMBOL(mutex_unlock);
617
618/**
619 * ww_mutex_unlock - release the w/w mutex
620 * @lock: the mutex to be released
621 *
622 * Unlock a mutex that has been locked by this task previously with any of the
623 * ww_mutex_lock* functions (with or without an acquire context). It is
624 * forbidden to release the locks after releasing the acquire context.
625 *
626 * This function must not be used in interrupt context. Unlocking
627 * of a unlocked mutex is not allowed.
628 */
629void __sched ww_mutex_unlock(struct ww_mutex *lock)
630{
631 /*
632 * The unlocking fastpath is the 0->1 transition from 'locked'
633 * into 'unlocked' state:
634 */
635 if (lock->ctx) {
636#ifdef CONFIG_DEBUG_MUTEXES
637 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
638#endif
639 if (lock->ctx->acquired > 0)
640 lock->ctx->acquired--;
641 lock->ctx = NULL;
642 }
643
644 mutex_unlock(&lock->base);
645}
646EXPORT_SYMBOL(ww_mutex_unlock);
647
648static inline int __sched
649__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
650 struct ww_acquire_ctx *ctx)
651{
652 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
653 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
654 struct mutex_waiter *cur;
655
656 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
657 goto deadlock;
658
659 /*
660 * If there is a waiter in front of us that has a context, then its
661 * stamp is earlier than ours and we must back off.
662 */
663 cur = waiter;
664 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
665 if (cur->ww_ctx)
666 goto deadlock;
667 }
668
669 return 0;
670
671deadlock:
672#ifdef CONFIG_DEBUG_MUTEXES
673 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
674 ctx->contending_lock = ww;
675#endif
676 return -EDEADLK;
677}
678
679static inline int __sched
680__ww_mutex_add_waiter(struct mutex_waiter *waiter,
681 struct mutex *lock,
682 struct ww_acquire_ctx *ww_ctx)
683{
684 struct mutex_waiter *cur;
685 struct list_head *pos;
686
687 if (!ww_ctx) {
688 list_add_tail(&waiter->list, &lock->wait_list);
689 return 0;
690 }
691
692 /*
693 * Add the waiter before the first waiter with a higher stamp.
694 * Waiters without a context are skipped to avoid starving
695 * them.
696 */
697 pos = &lock->wait_list;
698 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
699 if (!cur->ww_ctx)
700 continue;
701
702 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
703 /* Back off immediately if necessary. */
704 if (ww_ctx->acquired > 0) {
705#ifdef CONFIG_DEBUG_MUTEXES
706 struct ww_mutex *ww;
707
708 ww = container_of(lock, struct ww_mutex, base);
709 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
710 ww_ctx->contending_lock = ww;
711#endif
712 return -EDEADLK;
713 }
714
715 break;
716 }
717
718 pos = &cur->list;
719
720 /*
721 * Wake up the waiter so that it gets a chance to back
722 * off.
723 */
724 if (cur->ww_ctx->acquired > 0) {
725 debug_mutex_wake_waiter(lock, cur);
726 wake_up_process(cur->task);
727 }
728 }
729
730 list_add_tail(&waiter->list, pos);
731 return 0;
732}
733
734/*
735 * Lock a mutex (possibly interruptible), slowpath:
736 */
737static __always_inline int __sched
738__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
739 struct lockdep_map *nest_lock, unsigned long ip,
740 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
741{
742 struct mutex_waiter waiter;
743 bool first = false;
744 struct ww_mutex *ww;
745 int ret;
746
747 might_sleep();
748
749 ww = container_of(lock, struct ww_mutex, base);
750 if (use_ww_ctx && ww_ctx) {
751 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
752 return -EALREADY;
753 }
754
755 preempt_disable();
756 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
757
758 if (__mutex_trylock(lock) ||
759 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
760 /* got the lock, yay! */
761 lock_acquired(&lock->dep_map, ip);
762 if (use_ww_ctx && ww_ctx)
763 ww_mutex_set_context_fastpath(ww, ww_ctx);
764 preempt_enable();
765 return 0;
766 }
767
768 spin_lock(&lock->wait_lock);
769 /*
770 * After waiting to acquire the wait_lock, try again.
771 */
772 if (__mutex_trylock(lock)) {
773 if (use_ww_ctx && ww_ctx)
774 __ww_mutex_wakeup_for_backoff(lock, ww_ctx);
775
776 goto skip_wait;
777 }
778
779 debug_mutex_lock_common(lock, &waiter);
780 debug_mutex_add_waiter(lock, &waiter, current);
781
782 lock_contended(&lock->dep_map, ip);
783
784 if (!use_ww_ctx) {
785 /* add waiting tasks to the end of the waitqueue (FIFO): */
786 list_add_tail(&waiter.list, &lock->wait_list);
787
788#ifdef CONFIG_DEBUG_MUTEXES
789 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
790#endif
791 } else {
792 /* Add in stamp order, waking up waiters that must back off. */
793 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
794 if (ret)
795 goto err_early_backoff;
796
797 waiter.ww_ctx = ww_ctx;
798 }
799
800 waiter.task = current;
801
802 if (__mutex_waiter_is_first(lock, &waiter))
803 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
804
805 set_current_state(state);
806 for (;;) {
807 /*
808 * Once we hold wait_lock, we're serialized against
809 * mutex_unlock() handing the lock off to us, do a trylock
810 * before testing the error conditions to make sure we pick up
811 * the handoff.
812 */
813 if (__mutex_trylock(lock))
814 goto acquired;
815
816 /*
817 * Check for signals and wound conditions while holding
818 * wait_lock. This ensures the lock cancellation is ordered
819 * against mutex_unlock() and wake-ups do not go missing.
820 */
821 if (unlikely(signal_pending_state(state, current))) {
822 ret = -EINTR;
823 goto err;
824 }
825
826 if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
827 ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
828 if (ret)
829 goto err;
830 }
831
832 spin_unlock(&lock->wait_lock);
833 schedule_preempt_disabled();
834
835 /*
836 * ww_mutex needs to always recheck its position since its waiter
837 * list is not FIFO ordered.
838 */
839 if ((use_ww_ctx && ww_ctx) || !first) {
840 first = __mutex_waiter_is_first(lock, &waiter);
841 if (first)
842 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
843 }
844
845 set_current_state(state);
846 /*
847 * Here we order against unlock; we must either see it change
848 * state back to RUNNING and fall through the next schedule(),
849 * or we must see its unlock and acquire.
850 */
851 if (__mutex_trylock(lock) ||
852 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
853 break;
854
855 spin_lock(&lock->wait_lock);
856 }
857 spin_lock(&lock->wait_lock);
858acquired:
859 __set_current_state(TASK_RUNNING);
860
861 mutex_remove_waiter(lock, &waiter, current);
862 if (likely(list_empty(&lock->wait_list)))
863 __mutex_clear_flag(lock, MUTEX_FLAGS);
864
865 debug_mutex_free_waiter(&waiter);
866
867skip_wait:
868 /* got the lock - cleanup and rejoice! */
869 lock_acquired(&lock->dep_map, ip);
870
871 if (use_ww_ctx && ww_ctx)
872 ww_mutex_set_context_slowpath(ww, ww_ctx);
873
874 spin_unlock(&lock->wait_lock);
875 preempt_enable();
876 return 0;
877
878err:
879 __set_current_state(TASK_RUNNING);
880 mutex_remove_waiter(lock, &waiter, current);
881err_early_backoff:
882 spin_unlock(&lock->wait_lock);
883 debug_mutex_free_waiter(&waiter);
884 mutex_release(&lock->dep_map, 1, ip);
885 preempt_enable();
886 return ret;
887}
888
889static int __sched
890__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
891 struct lockdep_map *nest_lock, unsigned long ip)
892{
893 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
894}
895
896static int __sched
897__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
898 struct lockdep_map *nest_lock, unsigned long ip,
899 struct ww_acquire_ctx *ww_ctx)
900{
901 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
902}
903
904#ifdef CONFIG_DEBUG_LOCK_ALLOC
905void __sched
906mutex_lock_nested(struct mutex *lock, unsigned int subclass)
907{
908 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
909}
910
911EXPORT_SYMBOL_GPL(mutex_lock_nested);
912
913void __sched
914_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
915{
916 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
917}
918EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
919
920int __sched
921mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
922{
923 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
924}
925EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
926
927int __sched
928mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
929{
930 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
931}
932EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
933
934void __sched
935mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
936{
937 int token;
938
939 might_sleep();
940
941 token = io_schedule_prepare();
942 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
943 subclass, NULL, _RET_IP_, NULL, 0);
944 io_schedule_finish(token);
945}
946EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
947
948static inline int
949ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
950{
951#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
952 unsigned tmp;
953
954 if (ctx->deadlock_inject_countdown-- == 0) {
955 tmp = ctx->deadlock_inject_interval;
956 if (tmp > UINT_MAX/4)
957 tmp = UINT_MAX;
958 else
959 tmp = tmp*2 + tmp + tmp/2;
960
961 ctx->deadlock_inject_interval = tmp;
962 ctx->deadlock_inject_countdown = tmp;
963 ctx->contending_lock = lock;
964
965 ww_mutex_unlock(lock);
966
967 return -EDEADLK;
968 }
969#endif
970
971 return 0;
972}
973
974int __sched
975ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
976{
977 int ret;
978
979 might_sleep();
980 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
981 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
982 ctx);
983 if (!ret && ctx && ctx->acquired > 1)
984 return ww_mutex_deadlock_injection(lock, ctx);
985
986 return ret;
987}
988EXPORT_SYMBOL_GPL(ww_mutex_lock);
989
990int __sched
991ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
992{
993 int ret;
994
995 might_sleep();
996 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
997 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
998 ctx);
999
1000 if (!ret && ctx && ctx->acquired > 1)
1001 return ww_mutex_deadlock_injection(lock, ctx);
1002
1003 return ret;
1004}
1005EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1006
1007#endif
1008
1009/*
1010 * Release the lock, slowpath:
1011 */
1012static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1013{
1014 struct task_struct *next = NULL;
1015 DEFINE_WAKE_Q(wake_q);
1016 unsigned long owner;
1017
1018 mutex_release(&lock->dep_map, 1, ip);
1019
1020 /*
1021 * Release the lock before (potentially) taking the spinlock such that
1022 * other contenders can get on with things ASAP.
1023 *
1024 * Except when HANDOFF, in that case we must not clear the owner field,
1025 * but instead set it to the top waiter.
1026 */
1027 owner = atomic_long_read(&lock->owner);
1028 for (;;) {
1029 unsigned long old;
1030
1031#ifdef CONFIG_DEBUG_MUTEXES
1032 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1033 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1034#endif
1035
1036 if (owner & MUTEX_FLAG_HANDOFF)
1037 break;
1038
1039 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1040 __owner_flags(owner));
1041 if (old == owner) {
1042 if (owner & MUTEX_FLAG_WAITERS)
1043 break;
1044
1045 return;
1046 }
1047
1048 owner = old;
1049 }
1050
1051 spin_lock(&lock->wait_lock);
1052 debug_mutex_unlock(lock);
1053 if (!list_empty(&lock->wait_list)) {
1054 /* get the first entry from the wait-list: */
1055 struct mutex_waiter *waiter =
1056 list_first_entry(&lock->wait_list,
1057 struct mutex_waiter, list);
1058
1059 next = waiter->task;
1060
1061 debug_mutex_wake_waiter(lock, waiter);
1062 wake_q_add(&wake_q, next);
1063 }
1064
1065 if (owner & MUTEX_FLAG_HANDOFF)
1066 __mutex_handoff(lock, next);
1067
1068 spin_unlock(&lock->wait_lock);
1069
1070 wake_up_q(&wake_q);
1071}
1072
1073#ifndef CONFIG_DEBUG_LOCK_ALLOC
1074/*
1075 * Here come the less common (and hence less performance-critical) APIs:
1076 * mutex_lock_interruptible() and mutex_trylock().
1077 */
1078static noinline int __sched
1079__mutex_lock_killable_slowpath(struct mutex *lock);
1080
1081static noinline int __sched
1082__mutex_lock_interruptible_slowpath(struct mutex *lock);
1083
1084/**
1085 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1086 * @lock: The mutex to be acquired.
1087 *
1088 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1089 * process is sleeping, this function will return without acquiring the
1090 * mutex.
1091 *
1092 * Context: Process context.
1093 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1094 * signal arrived.
1095 */
1096int __sched mutex_lock_interruptible(struct mutex *lock)
1097{
1098 might_sleep();
1099
1100 if (__mutex_trylock_fast(lock))
1101 return 0;
1102
1103 return __mutex_lock_interruptible_slowpath(lock);
1104}
1105
1106EXPORT_SYMBOL(mutex_lock_interruptible);
1107
1108/**
1109 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1110 * @lock: The mutex to be acquired.
1111 *
1112 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1113 * the current process is delivered while the process is sleeping, this
1114 * function will return without acquiring the mutex.
1115 *
1116 * Context: Process context.
1117 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1118 * fatal signal arrived.
1119 */
1120int __sched mutex_lock_killable(struct mutex *lock)
1121{
1122 might_sleep();
1123
1124 if (__mutex_trylock_fast(lock))
1125 return 0;
1126
1127 return __mutex_lock_killable_slowpath(lock);
1128}
1129EXPORT_SYMBOL(mutex_lock_killable);
1130
1131/**
1132 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1133 * @lock: The mutex to be acquired.
1134 *
1135 * Lock the mutex like mutex_lock(). While the task is waiting for this
1136 * mutex, it will be accounted as being in the IO wait state by the
1137 * scheduler.
1138 *
1139 * Context: Process context.
1140 */
1141void __sched mutex_lock_io(struct mutex *lock)
1142{
1143 int token;
1144
1145 token = io_schedule_prepare();
1146 mutex_lock(lock);
1147 io_schedule_finish(token);
1148}
1149EXPORT_SYMBOL_GPL(mutex_lock_io);
1150
1151static noinline void __sched
1152__mutex_lock_slowpath(struct mutex *lock)
1153{
1154 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1155}
1156
1157static noinline int __sched
1158__mutex_lock_killable_slowpath(struct mutex *lock)
1159{
1160 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1161}
1162
1163static noinline int __sched
1164__mutex_lock_interruptible_slowpath(struct mutex *lock)
1165{
1166 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1167}
1168
1169static noinline int __sched
1170__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1171{
1172 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1173 _RET_IP_, ctx);
1174}
1175
1176static noinline int __sched
1177__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1178 struct ww_acquire_ctx *ctx)
1179{
1180 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1181 _RET_IP_, ctx);
1182}
1183
1184#endif
1185
1186/**
1187 * mutex_trylock - try to acquire the mutex, without waiting
1188 * @lock: the mutex to be acquired
1189 *
1190 * Try to acquire the mutex atomically. Returns 1 if the mutex
1191 * has been acquired successfully, and 0 on contention.
1192 *
1193 * NOTE: this function follows the spin_trylock() convention, so
1194 * it is negated from the down_trylock() return values! Be careful
1195 * about this when converting semaphore users to mutexes.
1196 *
1197 * This function must not be used in interrupt context. The
1198 * mutex must be released by the same task that acquired it.
1199 */
1200int __sched mutex_trylock(struct mutex *lock)
1201{
1202 bool locked = __mutex_trylock(lock);
1203
1204 if (locked)
1205 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1206
1207 return locked;
1208}
1209EXPORT_SYMBOL(mutex_trylock);
1210
1211#ifndef CONFIG_DEBUG_LOCK_ALLOC
1212int __sched
1213ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1214{
1215 might_sleep();
1216
1217 if (__mutex_trylock_fast(&lock->base)) {
1218 if (ctx)
1219 ww_mutex_set_context_fastpath(lock, ctx);
1220 return 0;
1221 }
1222
1223 return __ww_mutex_lock_slowpath(lock, ctx);
1224}
1225EXPORT_SYMBOL(ww_mutex_lock);
1226
1227int __sched
1228ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1229{
1230 might_sleep();
1231
1232 if (__mutex_trylock_fast(&lock->base)) {
1233 if (ctx)
1234 ww_mutex_set_context_fastpath(lock, ctx);
1235 return 0;
1236 }
1237
1238 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1239}
1240EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1241
1242#endif
1243
1244/**
1245 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1246 * @cnt: the atomic which we are to dec
1247 * @lock: the mutex to return holding if we dec to 0
1248 *
1249 * return true and hold lock if we dec to 0, return false otherwise
1250 */
1251int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1252{
1253 /* dec if we can't possibly hit 0 */
1254 if (atomic_add_unless(cnt, -1, 1))
1255 return 0;
1256 /* we might hit 0, so take the lock */
1257 mutex_lock(lock);
1258 if (!atomic_dec_and_test(cnt)) {
1259 /* when we actually did the dec, we didn't hit 0 */
1260 mutex_unlock(lock);
1261 return 0;
1262 }
1263 /* we hit 0, and we hold the lock */
1264 return 1;
1265}
1266EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1/*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
18 * Also see Documentation/locking/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
22#include <linux/sched.h>
23#include <linux/sched/rt.h>
24#include <linux/export.h>
25#include <linux/spinlock.h>
26#include <linux/interrupt.h>
27#include <linux/debug_locks.h>
28#include <linux/osq_lock.h>
29
30/*
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath:
33 */
34#ifdef CONFIG_DEBUG_MUTEXES
35# include "mutex-debug.h"
36# include <asm-generic/mutex-null.h>
37/*
38 * Must be 0 for the debug case so we do not do the unlock outside of the
39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
40 * case.
41 */
42# undef __mutex_slowpath_needs_to_unlock
43# define __mutex_slowpath_needs_to_unlock() 0
44#else
45# include "mutex.h"
46# include <asm/mutex.h>
47#endif
48
49void
50__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
51{
52 atomic_set(&lock->count, 1);
53 spin_lock_init(&lock->wait_lock);
54 INIT_LIST_HEAD(&lock->wait_list);
55 mutex_clear_owner(lock);
56#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
57 osq_lock_init(&lock->osq);
58#endif
59
60 debug_mutex_init(lock, name, key);
61}
62
63EXPORT_SYMBOL(__mutex_init);
64
65#ifndef CONFIG_DEBUG_LOCK_ALLOC
66/*
67 * We split the mutex lock/unlock logic into separate fastpath and
68 * slowpath functions, to reduce the register pressure on the fastpath.
69 * We also put the fastpath first in the kernel image, to make sure the
70 * branch is predicted by the CPU as default-untaken.
71 */
72__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
73
74/**
75 * mutex_lock - acquire the mutex
76 * @lock: the mutex to be acquired
77 *
78 * Lock the mutex exclusively for this task. If the mutex is not
79 * available right now, it will sleep until it can get it.
80 *
81 * The mutex must later on be released by the same task that
82 * acquired it. Recursive locking is not allowed. The task
83 * may not exit without first unlocking the mutex. Also, kernel
84 * memory where the mutex resides must not be freed with
85 * the mutex still locked. The mutex must first be initialized
86 * (or statically defined) before it can be locked. memset()-ing
87 * the mutex to 0 is not allowed.
88 *
89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
90 * checks that will enforce the restrictions and will also do
91 * deadlock debugging. )
92 *
93 * This function is similar to (but not equivalent to) down().
94 */
95void __sched mutex_lock(struct mutex *lock)
96{
97 might_sleep();
98 /*
99 * The locking fastpath is the 1->0 transition from
100 * 'unlocked' into 'locked' state.
101 */
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103 mutex_set_owner(lock);
104}
105
106EXPORT_SYMBOL(mutex_lock);
107#endif
108
109static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
110 struct ww_acquire_ctx *ww_ctx)
111{
112#ifdef CONFIG_DEBUG_MUTEXES
113 /*
114 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
115 * but released with a normal mutex_unlock in this call.
116 *
117 * This should never happen, always use ww_mutex_unlock.
118 */
119 DEBUG_LOCKS_WARN_ON(ww->ctx);
120
121 /*
122 * Not quite done after calling ww_acquire_done() ?
123 */
124 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
125
126 if (ww_ctx->contending_lock) {
127 /*
128 * After -EDEADLK you tried to
129 * acquire a different ww_mutex? Bad!
130 */
131 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
132
133 /*
134 * You called ww_mutex_lock after receiving -EDEADLK,
135 * but 'forgot' to unlock everything else first?
136 */
137 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
138 ww_ctx->contending_lock = NULL;
139 }
140
141 /*
142 * Naughty, using a different class will lead to undefined behavior!
143 */
144 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
145#endif
146 ww_ctx->acquired++;
147}
148
149/*
150 * After acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck.
152 *
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
154 * as the fastpath and opportunistic spinning are disabled in that case.
155 */
156static __always_inline void
157ww_mutex_set_context_fastpath(struct ww_mutex *lock,
158 struct ww_acquire_ctx *ctx)
159{
160 unsigned long flags;
161 struct mutex_waiter *cur;
162
163 ww_mutex_lock_acquired(lock, ctx);
164
165 lock->ctx = ctx;
166
167 /*
168 * The lock->ctx update should be visible on all cores before
169 * the atomic read is done, otherwise contended waiters might be
170 * missed. The contended waiters will either see ww_ctx == NULL
171 * and keep spinning, or it will acquire wait_lock, add itself
172 * to waiter list and sleep.
173 */
174 smp_mb(); /* ^^^ */
175
176 /*
177 * Check if lock is contended, if not there is nobody to wake up
178 */
179 if (likely(atomic_read(&lock->base.count) == 0))
180 return;
181
182 /*
183 * Uh oh, we raced in fastpath, wake up everyone in this case,
184 * so they can see the new lock->ctx.
185 */
186 spin_lock_mutex(&lock->base.wait_lock, flags);
187 list_for_each_entry(cur, &lock->base.wait_list, list) {
188 debug_mutex_wake_waiter(&lock->base, cur);
189 wake_up_process(cur->task);
190 }
191 spin_unlock_mutex(&lock->base.wait_lock, flags);
192}
193
194/*
195 * After acquiring lock in the slowpath set ctx and wake up any
196 * waiters so they can recheck.
197 *
198 * Callers must hold the mutex wait_lock.
199 */
200static __always_inline void
201ww_mutex_set_context_slowpath(struct ww_mutex *lock,
202 struct ww_acquire_ctx *ctx)
203{
204 struct mutex_waiter *cur;
205
206 ww_mutex_lock_acquired(lock, ctx);
207 lock->ctx = ctx;
208
209 /*
210 * Give any possible sleeping processes the chance to wake up,
211 * so they can recheck if they have to back off.
212 */
213 list_for_each_entry(cur, &lock->base.wait_list, list) {
214 debug_mutex_wake_waiter(&lock->base, cur);
215 wake_up_process(cur->task);
216 }
217}
218
219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
220/*
221 * Look out! "owner" is an entirely speculative pointer
222 * access and not reliable.
223 */
224static noinline
225bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
226{
227 bool ret = true;
228
229 rcu_read_lock();
230 while (lock->owner == owner) {
231 /*
232 * Ensure we emit the owner->on_cpu, dereference _after_
233 * checking lock->owner still matches owner. If that fails,
234 * owner might point to freed memory. If it still matches,
235 * the rcu_read_lock() ensures the memory stays valid.
236 */
237 barrier();
238
239 if (!owner->on_cpu || need_resched()) {
240 ret = false;
241 break;
242 }
243
244 cpu_relax_lowlatency();
245 }
246 rcu_read_unlock();
247
248 return ret;
249}
250
251/*
252 * Initial check for entering the mutex spinning loop
253 */
254static inline int mutex_can_spin_on_owner(struct mutex *lock)
255{
256 struct task_struct *owner;
257 int retval = 1;
258
259 if (need_resched())
260 return 0;
261
262 rcu_read_lock();
263 owner = READ_ONCE(lock->owner);
264 if (owner)
265 retval = owner->on_cpu;
266 rcu_read_unlock();
267 /*
268 * if lock->owner is not set, the mutex owner may have just acquired
269 * it and not set the owner yet or the mutex has been released.
270 */
271 return retval;
272}
273
274/*
275 * Atomically try to take the lock when it is available
276 */
277static inline bool mutex_try_to_acquire(struct mutex *lock)
278{
279 return !mutex_is_locked(lock) &&
280 (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
281}
282
283/*
284 * Optimistic spinning.
285 *
286 * We try to spin for acquisition when we find that the lock owner
287 * is currently running on a (different) CPU and while we don't
288 * need to reschedule. The rationale is that if the lock owner is
289 * running, it is likely to release the lock soon.
290 *
291 * Since this needs the lock owner, and this mutex implementation
292 * doesn't track the owner atomically in the lock field, we need to
293 * track it non-atomically.
294 *
295 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
296 * to serialize everything.
297 *
298 * The mutex spinners are queued up using MCS lock so that only one
299 * spinner can compete for the mutex. However, if mutex spinning isn't
300 * going to happen, there is no point in going through the lock/unlock
301 * overhead.
302 *
303 * Returns true when the lock was taken, otherwise false, indicating
304 * that we need to jump to the slowpath and sleep.
305 */
306static bool mutex_optimistic_spin(struct mutex *lock,
307 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
308{
309 struct task_struct *task = current;
310
311 if (!mutex_can_spin_on_owner(lock))
312 goto done;
313
314 /*
315 * In order to avoid a stampede of mutex spinners trying to
316 * acquire the mutex all at once, the spinners need to take a
317 * MCS (queued) lock first before spinning on the owner field.
318 */
319 if (!osq_lock(&lock->osq))
320 goto done;
321
322 while (true) {
323 struct task_struct *owner;
324
325 if (use_ww_ctx && ww_ctx->acquired > 0) {
326 struct ww_mutex *ww;
327
328 ww = container_of(lock, struct ww_mutex, base);
329 /*
330 * If ww->ctx is set the contents are undefined, only
331 * by acquiring wait_lock there is a guarantee that
332 * they are not invalid when reading.
333 *
334 * As such, when deadlock detection needs to be
335 * performed the optimistic spinning cannot be done.
336 */
337 if (READ_ONCE(ww->ctx))
338 break;
339 }
340
341 /*
342 * If there's an owner, wait for it to either
343 * release the lock or go to sleep.
344 */
345 owner = READ_ONCE(lock->owner);
346 if (owner && !mutex_spin_on_owner(lock, owner))
347 break;
348
349 /* Try to acquire the mutex if it is unlocked. */
350 if (mutex_try_to_acquire(lock)) {
351 lock_acquired(&lock->dep_map, ip);
352
353 if (use_ww_ctx) {
354 struct ww_mutex *ww;
355 ww = container_of(lock, struct ww_mutex, base);
356
357 ww_mutex_set_context_fastpath(ww, ww_ctx);
358 }
359
360 mutex_set_owner(lock);
361 osq_unlock(&lock->osq);
362 return true;
363 }
364
365 /*
366 * When there's no owner, we might have preempted between the
367 * owner acquiring the lock and setting the owner field. If
368 * we're an RT task that will live-lock because we won't let
369 * the owner complete.
370 */
371 if (!owner && (need_resched() || rt_task(task)))
372 break;
373
374 /*
375 * The cpu_relax() call is a compiler barrier which forces
376 * everything in this loop to be re-loaded. We don't need
377 * memory barriers as we'll eventually observe the right
378 * values at the cost of a few extra spins.
379 */
380 cpu_relax_lowlatency();
381 }
382
383 osq_unlock(&lock->osq);
384done:
385 /*
386 * If we fell out of the spin path because of need_resched(),
387 * reschedule now, before we try-lock the mutex. This avoids getting
388 * scheduled out right after we obtained the mutex.
389 */
390 if (need_resched()) {
391 /*
392 * We _should_ have TASK_RUNNING here, but just in case
393 * we do not, make it so, otherwise we might get stuck.
394 */
395 __set_current_state(TASK_RUNNING);
396 schedule_preempt_disabled();
397 }
398
399 return false;
400}
401#else
402static bool mutex_optimistic_spin(struct mutex *lock,
403 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
404{
405 return false;
406}
407#endif
408
409__visible __used noinline
410void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
411
412/**
413 * mutex_unlock - release the mutex
414 * @lock: the mutex to be released
415 *
416 * Unlock a mutex that has been locked by this task previously.
417 *
418 * This function must not be used in interrupt context. Unlocking
419 * of a not locked mutex is not allowed.
420 *
421 * This function is similar to (but not equivalent to) up().
422 */
423void __sched mutex_unlock(struct mutex *lock)
424{
425 /*
426 * The unlocking fastpath is the 0->1 transition from 'locked'
427 * into 'unlocked' state:
428 */
429#ifndef CONFIG_DEBUG_MUTEXES
430 /*
431 * When debugging is enabled we must not clear the owner before time,
432 * the slow path will always be taken, and that clears the owner field
433 * after verifying that it was indeed current.
434 */
435 mutex_clear_owner(lock);
436#endif
437 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
438}
439
440EXPORT_SYMBOL(mutex_unlock);
441
442/**
443 * ww_mutex_unlock - release the w/w mutex
444 * @lock: the mutex to be released
445 *
446 * Unlock a mutex that has been locked by this task previously with any of the
447 * ww_mutex_lock* functions (with or without an acquire context). It is
448 * forbidden to release the locks after releasing the acquire context.
449 *
450 * This function must not be used in interrupt context. Unlocking
451 * of a unlocked mutex is not allowed.
452 */
453void __sched ww_mutex_unlock(struct ww_mutex *lock)
454{
455 /*
456 * The unlocking fastpath is the 0->1 transition from 'locked'
457 * into 'unlocked' state:
458 */
459 if (lock->ctx) {
460#ifdef CONFIG_DEBUG_MUTEXES
461 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
462#endif
463 if (lock->ctx->acquired > 0)
464 lock->ctx->acquired--;
465 lock->ctx = NULL;
466 }
467
468#ifndef CONFIG_DEBUG_MUTEXES
469 /*
470 * When debugging is enabled we must not clear the owner before time,
471 * the slow path will always be taken, and that clears the owner field
472 * after verifying that it was indeed current.
473 */
474 mutex_clear_owner(&lock->base);
475#endif
476 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
477}
478EXPORT_SYMBOL(ww_mutex_unlock);
479
480static inline int __sched
481__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
482{
483 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
484 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
485
486 if (!hold_ctx)
487 return 0;
488
489 if (unlikely(ctx == hold_ctx))
490 return -EALREADY;
491
492 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
493 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
494#ifdef CONFIG_DEBUG_MUTEXES
495 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
496 ctx->contending_lock = ww;
497#endif
498 return -EDEADLK;
499 }
500
501 return 0;
502}
503
504/*
505 * Lock a mutex (possibly interruptible), slowpath:
506 */
507static __always_inline int __sched
508__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
509 struct lockdep_map *nest_lock, unsigned long ip,
510 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
511{
512 struct task_struct *task = current;
513 struct mutex_waiter waiter;
514 unsigned long flags;
515 int ret;
516
517 preempt_disable();
518 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
519
520 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
521 /* got the lock, yay! */
522 preempt_enable();
523 return 0;
524 }
525
526 spin_lock_mutex(&lock->wait_lock, flags);
527
528 /*
529 * Once more, try to acquire the lock. Only try-lock the mutex if
530 * it is unlocked to reduce unnecessary xchg() operations.
531 */
532 if (!mutex_is_locked(lock) &&
533 (atomic_xchg_acquire(&lock->count, 0) == 1))
534 goto skip_wait;
535
536 debug_mutex_lock_common(lock, &waiter);
537 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
538
539 /* add waiting tasks to the end of the waitqueue (FIFO): */
540 list_add_tail(&waiter.list, &lock->wait_list);
541 waiter.task = task;
542
543 lock_contended(&lock->dep_map, ip);
544
545 for (;;) {
546 /*
547 * Lets try to take the lock again - this is needed even if
548 * we get here for the first time (shortly after failing to
549 * acquire the lock), to make sure that we get a wakeup once
550 * it's unlocked. Later on, if we sleep, this is the
551 * operation that gives us the lock. We xchg it to -1, so
552 * that when we release the lock, we properly wake up the
553 * other waiters. We only attempt the xchg if the count is
554 * non-negative in order to avoid unnecessary xchg operations:
555 */
556 if (atomic_read(&lock->count) >= 0 &&
557 (atomic_xchg_acquire(&lock->count, -1) == 1))
558 break;
559
560 /*
561 * got a signal? (This code gets eliminated in the
562 * TASK_UNINTERRUPTIBLE case.)
563 */
564 if (unlikely(signal_pending_state(state, task))) {
565 ret = -EINTR;
566 goto err;
567 }
568
569 if (use_ww_ctx && ww_ctx->acquired > 0) {
570 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
571 if (ret)
572 goto err;
573 }
574
575 __set_task_state(task, state);
576
577 /* didn't get the lock, go to sleep: */
578 spin_unlock_mutex(&lock->wait_lock, flags);
579 schedule_preempt_disabled();
580 spin_lock_mutex(&lock->wait_lock, flags);
581 }
582 __set_task_state(task, TASK_RUNNING);
583
584 mutex_remove_waiter(lock, &waiter, current_thread_info());
585 /* set it to 0 if there are no waiters left: */
586 if (likely(list_empty(&lock->wait_list)))
587 atomic_set(&lock->count, 0);
588 debug_mutex_free_waiter(&waiter);
589
590skip_wait:
591 /* got the lock - cleanup and rejoice! */
592 lock_acquired(&lock->dep_map, ip);
593 mutex_set_owner(lock);
594
595 if (use_ww_ctx) {
596 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
597 ww_mutex_set_context_slowpath(ww, ww_ctx);
598 }
599
600 spin_unlock_mutex(&lock->wait_lock, flags);
601 preempt_enable();
602 return 0;
603
604err:
605 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
606 spin_unlock_mutex(&lock->wait_lock, flags);
607 debug_mutex_free_waiter(&waiter);
608 mutex_release(&lock->dep_map, 1, ip);
609 preempt_enable();
610 return ret;
611}
612
613#ifdef CONFIG_DEBUG_LOCK_ALLOC
614void __sched
615mutex_lock_nested(struct mutex *lock, unsigned int subclass)
616{
617 might_sleep();
618 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
619 subclass, NULL, _RET_IP_, NULL, 0);
620}
621
622EXPORT_SYMBOL_GPL(mutex_lock_nested);
623
624void __sched
625_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
626{
627 might_sleep();
628 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
629 0, nest, _RET_IP_, NULL, 0);
630}
631
632EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
633
634int __sched
635mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
636{
637 might_sleep();
638 return __mutex_lock_common(lock, TASK_KILLABLE,
639 subclass, NULL, _RET_IP_, NULL, 0);
640}
641EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
642
643int __sched
644mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
645{
646 might_sleep();
647 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
648 subclass, NULL, _RET_IP_, NULL, 0);
649}
650
651EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
652
653static inline int
654ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
655{
656#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
657 unsigned tmp;
658
659 if (ctx->deadlock_inject_countdown-- == 0) {
660 tmp = ctx->deadlock_inject_interval;
661 if (tmp > UINT_MAX/4)
662 tmp = UINT_MAX;
663 else
664 tmp = tmp*2 + tmp + tmp/2;
665
666 ctx->deadlock_inject_interval = tmp;
667 ctx->deadlock_inject_countdown = tmp;
668 ctx->contending_lock = lock;
669
670 ww_mutex_unlock(lock);
671
672 return -EDEADLK;
673 }
674#endif
675
676 return 0;
677}
678
679int __sched
680__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
681{
682 int ret;
683
684 might_sleep();
685 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
686 0, &ctx->dep_map, _RET_IP_, ctx, 1);
687 if (!ret && ctx->acquired > 1)
688 return ww_mutex_deadlock_injection(lock, ctx);
689
690 return ret;
691}
692EXPORT_SYMBOL_GPL(__ww_mutex_lock);
693
694int __sched
695__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
696{
697 int ret;
698
699 might_sleep();
700 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
701 0, &ctx->dep_map, _RET_IP_, ctx, 1);
702
703 if (!ret && ctx->acquired > 1)
704 return ww_mutex_deadlock_injection(lock, ctx);
705
706 return ret;
707}
708EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
709
710#endif
711
712/*
713 * Release the lock, slowpath:
714 */
715static inline void
716__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
717{
718 unsigned long flags;
719 WAKE_Q(wake_q);
720
721 /*
722 * As a performance measurement, release the lock before doing other
723 * wakeup related duties to follow. This allows other tasks to acquire
724 * the lock sooner, while still handling cleanups in past unlock calls.
725 * This can be done as we do not enforce strict equivalence between the
726 * mutex counter and wait_list.
727 *
728 *
729 * Some architectures leave the lock unlocked in the fastpath failure
730 * case, others need to leave it locked. In the later case we have to
731 * unlock it here - as the lock counter is currently 0 or negative.
732 */
733 if (__mutex_slowpath_needs_to_unlock())
734 atomic_set(&lock->count, 1);
735
736 spin_lock_mutex(&lock->wait_lock, flags);
737 mutex_release(&lock->dep_map, nested, _RET_IP_);
738 debug_mutex_unlock(lock);
739
740 if (!list_empty(&lock->wait_list)) {
741 /* get the first entry from the wait-list: */
742 struct mutex_waiter *waiter =
743 list_entry(lock->wait_list.next,
744 struct mutex_waiter, list);
745
746 debug_mutex_wake_waiter(lock, waiter);
747 wake_q_add(&wake_q, waiter->task);
748 }
749
750 spin_unlock_mutex(&lock->wait_lock, flags);
751 wake_up_q(&wake_q);
752}
753
754/*
755 * Release the lock, slowpath:
756 */
757__visible void
758__mutex_unlock_slowpath(atomic_t *lock_count)
759{
760 struct mutex *lock = container_of(lock_count, struct mutex, count);
761
762 __mutex_unlock_common_slowpath(lock, 1);
763}
764
765#ifndef CONFIG_DEBUG_LOCK_ALLOC
766/*
767 * Here come the less common (and hence less performance-critical) APIs:
768 * mutex_lock_interruptible() and mutex_trylock().
769 */
770static noinline int __sched
771__mutex_lock_killable_slowpath(struct mutex *lock);
772
773static noinline int __sched
774__mutex_lock_interruptible_slowpath(struct mutex *lock);
775
776/**
777 * mutex_lock_interruptible - acquire the mutex, interruptible
778 * @lock: the mutex to be acquired
779 *
780 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
781 * been acquired or sleep until the mutex becomes available. If a
782 * signal arrives while waiting for the lock then this function
783 * returns -EINTR.
784 *
785 * This function is similar to (but not equivalent to) down_interruptible().
786 */
787int __sched mutex_lock_interruptible(struct mutex *lock)
788{
789 int ret;
790
791 might_sleep();
792 ret = __mutex_fastpath_lock_retval(&lock->count);
793 if (likely(!ret)) {
794 mutex_set_owner(lock);
795 return 0;
796 } else
797 return __mutex_lock_interruptible_slowpath(lock);
798}
799
800EXPORT_SYMBOL(mutex_lock_interruptible);
801
802int __sched mutex_lock_killable(struct mutex *lock)
803{
804 int ret;
805
806 might_sleep();
807 ret = __mutex_fastpath_lock_retval(&lock->count);
808 if (likely(!ret)) {
809 mutex_set_owner(lock);
810 return 0;
811 } else
812 return __mutex_lock_killable_slowpath(lock);
813}
814EXPORT_SYMBOL(mutex_lock_killable);
815
816__visible void __sched
817__mutex_lock_slowpath(atomic_t *lock_count)
818{
819 struct mutex *lock = container_of(lock_count, struct mutex, count);
820
821 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
822 NULL, _RET_IP_, NULL, 0);
823}
824
825static noinline int __sched
826__mutex_lock_killable_slowpath(struct mutex *lock)
827{
828 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
829 NULL, _RET_IP_, NULL, 0);
830}
831
832static noinline int __sched
833__mutex_lock_interruptible_slowpath(struct mutex *lock)
834{
835 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
836 NULL, _RET_IP_, NULL, 0);
837}
838
839static noinline int __sched
840__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
841{
842 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
843 NULL, _RET_IP_, ctx, 1);
844}
845
846static noinline int __sched
847__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
848 struct ww_acquire_ctx *ctx)
849{
850 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
851 NULL, _RET_IP_, ctx, 1);
852}
853
854#endif
855
856/*
857 * Spinlock based trylock, we take the spinlock and check whether we
858 * can get the lock:
859 */
860static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
861{
862 struct mutex *lock = container_of(lock_count, struct mutex, count);
863 unsigned long flags;
864 int prev;
865
866 /* No need to trylock if the mutex is locked. */
867 if (mutex_is_locked(lock))
868 return 0;
869
870 spin_lock_mutex(&lock->wait_lock, flags);
871
872 prev = atomic_xchg_acquire(&lock->count, -1);
873 if (likely(prev == 1)) {
874 mutex_set_owner(lock);
875 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
876 }
877
878 /* Set it back to 0 if there are no waiters: */
879 if (likely(list_empty(&lock->wait_list)))
880 atomic_set(&lock->count, 0);
881
882 spin_unlock_mutex(&lock->wait_lock, flags);
883
884 return prev == 1;
885}
886
887/**
888 * mutex_trylock - try to acquire the mutex, without waiting
889 * @lock: the mutex to be acquired
890 *
891 * Try to acquire the mutex atomically. Returns 1 if the mutex
892 * has been acquired successfully, and 0 on contention.
893 *
894 * NOTE: this function follows the spin_trylock() convention, so
895 * it is negated from the down_trylock() return values! Be careful
896 * about this when converting semaphore users to mutexes.
897 *
898 * This function must not be used in interrupt context. The
899 * mutex must be released by the same task that acquired it.
900 */
901int __sched mutex_trylock(struct mutex *lock)
902{
903 int ret;
904
905 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
906 if (ret)
907 mutex_set_owner(lock);
908
909 return ret;
910}
911EXPORT_SYMBOL(mutex_trylock);
912
913#ifndef CONFIG_DEBUG_LOCK_ALLOC
914int __sched
915__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
916{
917 int ret;
918
919 might_sleep();
920
921 ret = __mutex_fastpath_lock_retval(&lock->base.count);
922
923 if (likely(!ret)) {
924 ww_mutex_set_context_fastpath(lock, ctx);
925 mutex_set_owner(&lock->base);
926 } else
927 ret = __ww_mutex_lock_slowpath(lock, ctx);
928 return ret;
929}
930EXPORT_SYMBOL(__ww_mutex_lock);
931
932int __sched
933__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
934{
935 int ret;
936
937 might_sleep();
938
939 ret = __mutex_fastpath_lock_retval(&lock->base.count);
940
941 if (likely(!ret)) {
942 ww_mutex_set_context_fastpath(lock, ctx);
943 mutex_set_owner(&lock->base);
944 } else
945 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
946 return ret;
947}
948EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
949
950#endif
951
952/**
953 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
954 * @cnt: the atomic which we are to dec
955 * @lock: the mutex to return holding if we dec to 0
956 *
957 * return true and hold lock if we dec to 0, return false otherwise
958 */
959int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
960{
961 /* dec if we can't possibly hit 0 */
962 if (atomic_add_unless(cnt, -1, 1))
963 return 0;
964 /* we might hit 0, so take the lock */
965 mutex_lock(lock);
966 if (!atomic_dec_and_test(cnt)) {
967 /* when we actually did the dec, we didn't hit 0 */
968 mutex_unlock(lock);
969 return 0;
970 }
971 /* we hit 0, and we hold the lock */
972 return 1;
973}
974EXPORT_SYMBOL(atomic_dec_and_mutex_lock);