Loading...
1/*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
18 * Also see Documentation/locking/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
22#include <linux/sched.h>
23#include <linux/sched/rt.h>
24#include <linux/export.h>
25#include <linux/spinlock.h>
26#include <linux/interrupt.h>
27#include <linux/debug_locks.h>
28#include <linux/osq_lock.h>
29
30#ifdef CONFIG_DEBUG_MUTEXES
31# include "mutex-debug.h"
32#else
33# include "mutex.h"
34#endif
35
36void
37__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
38{
39 atomic_long_set(&lock->owner, 0);
40 spin_lock_init(&lock->wait_lock);
41 INIT_LIST_HEAD(&lock->wait_list);
42#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
43 osq_lock_init(&lock->osq);
44#endif
45
46 debug_mutex_init(lock, name, key);
47}
48EXPORT_SYMBOL(__mutex_init);
49
50/*
51 * @owner: contains: 'struct task_struct *' to the current lock owner,
52 * NULL means not owned. Since task_struct pointers are aligned at
53 * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
54 * bits to store extra state.
55 *
56 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
57 * Bit1 indicates unlock needs to hand the lock to the top-waiter
58 */
59#define MUTEX_FLAG_WAITERS 0x01
60#define MUTEX_FLAG_HANDOFF 0x02
61
62#define MUTEX_FLAGS 0x03
63
64static inline struct task_struct *__owner_task(unsigned long owner)
65{
66 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
67}
68
69static inline unsigned long __owner_flags(unsigned long owner)
70{
71 return owner & MUTEX_FLAGS;
72}
73
74/*
75 * Actual trylock that will work on any unlocked state.
76 *
77 * When setting the owner field, we must preserve the low flag bits.
78 *
79 * Be careful with @handoff, only set that in a wait-loop (where you set
80 * HANDOFF) to avoid recursive lock attempts.
81 */
82static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
83{
84 unsigned long owner, curr = (unsigned long)current;
85
86 owner = atomic_long_read(&lock->owner);
87 for (;;) { /* must loop, can race against a flag */
88 unsigned long old, flags = __owner_flags(owner);
89
90 if (__owner_task(owner)) {
91 if (handoff && unlikely(__owner_task(owner) == current)) {
92 /*
93 * Provide ACQUIRE semantics for the lock-handoff.
94 *
95 * We cannot easily use load-acquire here, since
96 * the actual load is a failed cmpxchg, which
97 * doesn't imply any barriers.
98 *
99 * Also, this is a fairly unlikely scenario, and
100 * this contains the cost.
101 */
102 smp_mb(); /* ACQUIRE */
103 return true;
104 }
105
106 return false;
107 }
108
109 /*
110 * We set the HANDOFF bit, we must make sure it doesn't live
111 * past the point where we acquire it. This would be possible
112 * if we (accidentally) set the bit on an unlocked mutex.
113 */
114 if (handoff)
115 flags &= ~MUTEX_FLAG_HANDOFF;
116
117 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
118 if (old == owner)
119 return true;
120
121 owner = old;
122 }
123}
124
125#ifndef CONFIG_DEBUG_LOCK_ALLOC
126/*
127 * Lockdep annotations are contained to the slow paths for simplicity.
128 * There is nothing that would stop spreading the lockdep annotations outwards
129 * except more code.
130 */
131
132/*
133 * Optimistic trylock that only works in the uncontended case. Make sure to
134 * follow with a __mutex_trylock() before failing.
135 */
136static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
137{
138 unsigned long curr = (unsigned long)current;
139
140 if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
141 return true;
142
143 return false;
144}
145
146static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
147{
148 unsigned long curr = (unsigned long)current;
149
150 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
151 return true;
152
153 return false;
154}
155#endif
156
157static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
158{
159 atomic_long_or(flag, &lock->owner);
160}
161
162static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
163{
164 atomic_long_andnot(flag, &lock->owner);
165}
166
167static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
168{
169 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
170}
171
172/*
173 * Give up ownership to a specific task, when @task = NULL, this is equivalent
174 * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
175 * semantics like a regular unlock, the __mutex_trylock() provides matching
176 * ACQUIRE semantics for the handoff.
177 */
178static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
179{
180 unsigned long owner = atomic_long_read(&lock->owner);
181
182 for (;;) {
183 unsigned long old, new;
184
185#ifdef CONFIG_DEBUG_MUTEXES
186 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
187#endif
188
189 new = (owner & MUTEX_FLAG_WAITERS);
190 new |= (unsigned long)task;
191
192 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
193 if (old == owner)
194 break;
195
196 owner = old;
197 }
198}
199
200#ifndef CONFIG_DEBUG_LOCK_ALLOC
201/*
202 * We split the mutex lock/unlock logic into separate fastpath and
203 * slowpath functions, to reduce the register pressure on the fastpath.
204 * We also put the fastpath first in the kernel image, to make sure the
205 * branch is predicted by the CPU as default-untaken.
206 */
207static void __sched __mutex_lock_slowpath(struct mutex *lock);
208
209/**
210 * mutex_lock - acquire the mutex
211 * @lock: the mutex to be acquired
212 *
213 * Lock the mutex exclusively for this task. If the mutex is not
214 * available right now, it will sleep until it can get it.
215 *
216 * The mutex must later on be released by the same task that
217 * acquired it. Recursive locking is not allowed. The task
218 * may not exit without first unlocking the mutex. Also, kernel
219 * memory where the mutex resides must not be freed with
220 * the mutex still locked. The mutex must first be initialized
221 * (or statically defined) before it can be locked. memset()-ing
222 * the mutex to 0 is not allowed.
223 *
224 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
225 * checks that will enforce the restrictions and will also do
226 * deadlock debugging. )
227 *
228 * This function is similar to (but not equivalent to) down().
229 */
230void __sched mutex_lock(struct mutex *lock)
231{
232 might_sleep();
233
234 if (!__mutex_trylock_fast(lock))
235 __mutex_lock_slowpath(lock);
236}
237EXPORT_SYMBOL(mutex_lock);
238#endif
239
240static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
241 struct ww_acquire_ctx *ww_ctx)
242{
243#ifdef CONFIG_DEBUG_MUTEXES
244 /*
245 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
246 * but released with a normal mutex_unlock in this call.
247 *
248 * This should never happen, always use ww_mutex_unlock.
249 */
250 DEBUG_LOCKS_WARN_ON(ww->ctx);
251
252 /*
253 * Not quite done after calling ww_acquire_done() ?
254 */
255 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
256
257 if (ww_ctx->contending_lock) {
258 /*
259 * After -EDEADLK you tried to
260 * acquire a different ww_mutex? Bad!
261 */
262 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
263
264 /*
265 * You called ww_mutex_lock after receiving -EDEADLK,
266 * but 'forgot' to unlock everything else first?
267 */
268 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
269 ww_ctx->contending_lock = NULL;
270 }
271
272 /*
273 * Naughty, using a different class will lead to undefined behavior!
274 */
275 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
276#endif
277 ww_ctx->acquired++;
278}
279
280/*
281 * After acquiring lock with fastpath or when we lost out in contested
282 * slowpath, set ctx and wake up any waiters so they can recheck.
283 */
284static __always_inline void
285ww_mutex_set_context_fastpath(struct ww_mutex *lock,
286 struct ww_acquire_ctx *ctx)
287{
288 unsigned long flags;
289 struct mutex_waiter *cur;
290
291 ww_mutex_lock_acquired(lock, ctx);
292
293 lock->ctx = ctx;
294
295 /*
296 * The lock->ctx update should be visible on all cores before
297 * the atomic read is done, otherwise contended waiters might be
298 * missed. The contended waiters will either see ww_ctx == NULL
299 * and keep spinning, or it will acquire wait_lock, add itself
300 * to waiter list and sleep.
301 */
302 smp_mb(); /* ^^^ */
303
304 /*
305 * Check if lock is contended, if not there is nobody to wake up
306 */
307 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
308 return;
309
310 /*
311 * Uh oh, we raced in fastpath, wake up everyone in this case,
312 * so they can see the new lock->ctx.
313 */
314 spin_lock_mutex(&lock->base.wait_lock, flags);
315 list_for_each_entry(cur, &lock->base.wait_list, list) {
316 debug_mutex_wake_waiter(&lock->base, cur);
317 wake_up_process(cur->task);
318 }
319 spin_unlock_mutex(&lock->base.wait_lock, flags);
320}
321
322/*
323 * After acquiring lock in the slowpath set ctx and wake up any
324 * waiters so they can recheck.
325 *
326 * Callers must hold the mutex wait_lock.
327 */
328static __always_inline void
329ww_mutex_set_context_slowpath(struct ww_mutex *lock,
330 struct ww_acquire_ctx *ctx)
331{
332 struct mutex_waiter *cur;
333
334 ww_mutex_lock_acquired(lock, ctx);
335 lock->ctx = ctx;
336
337 /*
338 * Give any possible sleeping processes the chance to wake up,
339 * so they can recheck if they have to back off.
340 */
341 list_for_each_entry(cur, &lock->base.wait_list, list) {
342 debug_mutex_wake_waiter(&lock->base, cur);
343 wake_up_process(cur->task);
344 }
345}
346
347#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
348/*
349 * Look out! "owner" is an entirely speculative pointer
350 * access and not reliable.
351 */
352static noinline
353bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
354{
355 bool ret = true;
356
357 rcu_read_lock();
358 while (__mutex_owner(lock) == owner) {
359 /*
360 * Ensure we emit the owner->on_cpu, dereference _after_
361 * checking lock->owner still matches owner. If that fails,
362 * owner might point to freed memory. If it still matches,
363 * the rcu_read_lock() ensures the memory stays valid.
364 */
365 barrier();
366
367 /*
368 * Use vcpu_is_preempted to detect lock holder preemption issue.
369 */
370 if (!owner->on_cpu || need_resched() ||
371 vcpu_is_preempted(task_cpu(owner))) {
372 ret = false;
373 break;
374 }
375
376 cpu_relax();
377 }
378 rcu_read_unlock();
379
380 return ret;
381}
382
383/*
384 * Initial check for entering the mutex spinning loop
385 */
386static inline int mutex_can_spin_on_owner(struct mutex *lock)
387{
388 struct task_struct *owner;
389 int retval = 1;
390
391 if (need_resched())
392 return 0;
393
394 rcu_read_lock();
395 owner = __mutex_owner(lock);
396
397 /*
398 * As lock holder preemption issue, we both skip spinning if task is not
399 * on cpu or its cpu is preempted
400 */
401 if (owner)
402 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
403 rcu_read_unlock();
404
405 /*
406 * If lock->owner is not set, the mutex has been released. Return true
407 * such that we'll trylock in the spin path, which is a faster option
408 * than the blocking slow path.
409 */
410 return retval;
411}
412
413/*
414 * Optimistic spinning.
415 *
416 * We try to spin for acquisition when we find that the lock owner
417 * is currently running on a (different) CPU and while we don't
418 * need to reschedule. The rationale is that if the lock owner is
419 * running, it is likely to release the lock soon.
420 *
421 * The mutex spinners are queued up using MCS lock so that only one
422 * spinner can compete for the mutex. However, if mutex spinning isn't
423 * going to happen, there is no point in going through the lock/unlock
424 * overhead.
425 *
426 * Returns true when the lock was taken, otherwise false, indicating
427 * that we need to jump to the slowpath and sleep.
428 *
429 * The waiter flag is set to true if the spinner is a waiter in the wait
430 * queue. The waiter-spinner will spin on the lock directly and concurrently
431 * with the spinner at the head of the OSQ, if present, until the owner is
432 * changed to itself.
433 */
434static bool mutex_optimistic_spin(struct mutex *lock,
435 struct ww_acquire_ctx *ww_ctx,
436 const bool use_ww_ctx, const bool waiter)
437{
438 struct task_struct *task = current;
439
440 if (!waiter) {
441 /*
442 * The purpose of the mutex_can_spin_on_owner() function is
443 * to eliminate the overhead of osq_lock() and osq_unlock()
444 * in case spinning isn't possible. As a waiter-spinner
445 * is not going to take OSQ lock anyway, there is no need
446 * to call mutex_can_spin_on_owner().
447 */
448 if (!mutex_can_spin_on_owner(lock))
449 goto fail;
450
451 /*
452 * In order to avoid a stampede of mutex spinners trying to
453 * acquire the mutex all at once, the spinners need to take a
454 * MCS (queued) lock first before spinning on the owner field.
455 */
456 if (!osq_lock(&lock->osq))
457 goto fail;
458 }
459
460 for (;;) {
461 struct task_struct *owner;
462
463 if (use_ww_ctx && ww_ctx->acquired > 0) {
464 struct ww_mutex *ww;
465
466 ww = container_of(lock, struct ww_mutex, base);
467 /*
468 * If ww->ctx is set the contents are undefined, only
469 * by acquiring wait_lock there is a guarantee that
470 * they are not invalid when reading.
471 *
472 * As such, when deadlock detection needs to be
473 * performed the optimistic spinning cannot be done.
474 */
475 if (READ_ONCE(ww->ctx))
476 goto fail_unlock;
477 }
478
479 /*
480 * If there's an owner, wait for it to either
481 * release the lock or go to sleep.
482 */
483 owner = __mutex_owner(lock);
484 if (owner) {
485 if (waiter && owner == task) {
486 smp_mb(); /* ACQUIRE */
487 break;
488 }
489
490 if (!mutex_spin_on_owner(lock, owner))
491 goto fail_unlock;
492 }
493
494 /* Try to acquire the mutex if it is unlocked. */
495 if (__mutex_trylock(lock, waiter))
496 break;
497
498 /*
499 * The cpu_relax() call is a compiler barrier which forces
500 * everything in this loop to be re-loaded. We don't need
501 * memory barriers as we'll eventually observe the right
502 * values at the cost of a few extra spins.
503 */
504 cpu_relax();
505 }
506
507 if (!waiter)
508 osq_unlock(&lock->osq);
509
510 return true;
511
512
513fail_unlock:
514 if (!waiter)
515 osq_unlock(&lock->osq);
516
517fail:
518 /*
519 * If we fell out of the spin path because of need_resched(),
520 * reschedule now, before we try-lock the mutex. This avoids getting
521 * scheduled out right after we obtained the mutex.
522 */
523 if (need_resched()) {
524 /*
525 * We _should_ have TASK_RUNNING here, but just in case
526 * we do not, make it so, otherwise we might get stuck.
527 */
528 __set_current_state(TASK_RUNNING);
529 schedule_preempt_disabled();
530 }
531
532 return false;
533}
534#else
535static bool mutex_optimistic_spin(struct mutex *lock,
536 struct ww_acquire_ctx *ww_ctx,
537 const bool use_ww_ctx, const bool waiter)
538{
539 return false;
540}
541#endif
542
543static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
544
545/**
546 * mutex_unlock - release the mutex
547 * @lock: the mutex to be released
548 *
549 * Unlock a mutex that has been locked by this task previously.
550 *
551 * This function must not be used in interrupt context. Unlocking
552 * of a not locked mutex is not allowed.
553 *
554 * This function is similar to (but not equivalent to) up().
555 */
556void __sched mutex_unlock(struct mutex *lock)
557{
558#ifndef CONFIG_DEBUG_LOCK_ALLOC
559 if (__mutex_unlock_fast(lock))
560 return;
561#endif
562 __mutex_unlock_slowpath(lock, _RET_IP_);
563}
564EXPORT_SYMBOL(mutex_unlock);
565
566/**
567 * ww_mutex_unlock - release the w/w mutex
568 * @lock: the mutex to be released
569 *
570 * Unlock a mutex that has been locked by this task previously with any of the
571 * ww_mutex_lock* functions (with or without an acquire context). It is
572 * forbidden to release the locks after releasing the acquire context.
573 *
574 * This function must not be used in interrupt context. Unlocking
575 * of a unlocked mutex is not allowed.
576 */
577void __sched ww_mutex_unlock(struct ww_mutex *lock)
578{
579 /*
580 * The unlocking fastpath is the 0->1 transition from 'locked'
581 * into 'unlocked' state:
582 */
583 if (lock->ctx) {
584#ifdef CONFIG_DEBUG_MUTEXES
585 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
586#endif
587 if (lock->ctx->acquired > 0)
588 lock->ctx->acquired--;
589 lock->ctx = NULL;
590 }
591
592 mutex_unlock(&lock->base);
593}
594EXPORT_SYMBOL(ww_mutex_unlock);
595
596static inline int __sched
597__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
598{
599 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
600 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
601
602 if (!hold_ctx)
603 return 0;
604
605 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
606 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
607#ifdef CONFIG_DEBUG_MUTEXES
608 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
609 ctx->contending_lock = ww;
610#endif
611 return -EDEADLK;
612 }
613
614 return 0;
615}
616
617/*
618 * Lock a mutex (possibly interruptible), slowpath:
619 */
620static __always_inline int __sched
621__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
622 struct lockdep_map *nest_lock, unsigned long ip,
623 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
624{
625 struct task_struct *task = current;
626 struct mutex_waiter waiter;
627 unsigned long flags;
628 bool first = false;
629 struct ww_mutex *ww;
630 int ret;
631
632 if (use_ww_ctx) {
633 ww = container_of(lock, struct ww_mutex, base);
634 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
635 return -EALREADY;
636 }
637
638 preempt_disable();
639 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
640
641 if (__mutex_trylock(lock, false) ||
642 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
643 /* got the lock, yay! */
644 lock_acquired(&lock->dep_map, ip);
645 if (use_ww_ctx)
646 ww_mutex_set_context_fastpath(ww, ww_ctx);
647 preempt_enable();
648 return 0;
649 }
650
651 spin_lock_mutex(&lock->wait_lock, flags);
652 /*
653 * After waiting to acquire the wait_lock, try again.
654 */
655 if (__mutex_trylock(lock, false))
656 goto skip_wait;
657
658 debug_mutex_lock_common(lock, &waiter);
659 debug_mutex_add_waiter(lock, &waiter, task);
660
661 /* add waiting tasks to the end of the waitqueue (FIFO): */
662 list_add_tail(&waiter.list, &lock->wait_list);
663 waiter.task = task;
664
665 if (__mutex_waiter_is_first(lock, &waiter))
666 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
667
668 lock_contended(&lock->dep_map, ip);
669
670 set_task_state(task, state);
671 for (;;) {
672 /*
673 * Once we hold wait_lock, we're serialized against
674 * mutex_unlock() handing the lock off to us, do a trylock
675 * before testing the error conditions to make sure we pick up
676 * the handoff.
677 */
678 if (__mutex_trylock(lock, first))
679 goto acquired;
680
681 /*
682 * Check for signals and wound conditions while holding
683 * wait_lock. This ensures the lock cancellation is ordered
684 * against mutex_unlock() and wake-ups do not go missing.
685 */
686 if (unlikely(signal_pending_state(state, task))) {
687 ret = -EINTR;
688 goto err;
689 }
690
691 if (use_ww_ctx && ww_ctx->acquired > 0) {
692 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
693 if (ret)
694 goto err;
695 }
696
697 spin_unlock_mutex(&lock->wait_lock, flags);
698 schedule_preempt_disabled();
699
700 if (!first && __mutex_waiter_is_first(lock, &waiter)) {
701 first = true;
702 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
703 }
704
705 set_task_state(task, state);
706 /*
707 * Here we order against unlock; we must either see it change
708 * state back to RUNNING and fall through the next schedule(),
709 * or we must see its unlock and acquire.
710 */
711 if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
712 __mutex_trylock(lock, first))
713 break;
714
715 spin_lock_mutex(&lock->wait_lock, flags);
716 }
717 spin_lock_mutex(&lock->wait_lock, flags);
718acquired:
719 __set_task_state(task, TASK_RUNNING);
720
721 mutex_remove_waiter(lock, &waiter, task);
722 if (likely(list_empty(&lock->wait_list)))
723 __mutex_clear_flag(lock, MUTEX_FLAGS);
724
725 debug_mutex_free_waiter(&waiter);
726
727skip_wait:
728 /* got the lock - cleanup and rejoice! */
729 lock_acquired(&lock->dep_map, ip);
730
731 if (use_ww_ctx)
732 ww_mutex_set_context_slowpath(ww, ww_ctx);
733
734 spin_unlock_mutex(&lock->wait_lock, flags);
735 preempt_enable();
736 return 0;
737
738err:
739 __set_task_state(task, TASK_RUNNING);
740 mutex_remove_waiter(lock, &waiter, task);
741 spin_unlock_mutex(&lock->wait_lock, flags);
742 debug_mutex_free_waiter(&waiter);
743 mutex_release(&lock->dep_map, 1, ip);
744 preempt_enable();
745 return ret;
746}
747
748#ifdef CONFIG_DEBUG_LOCK_ALLOC
749void __sched
750mutex_lock_nested(struct mutex *lock, unsigned int subclass)
751{
752 might_sleep();
753 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
754 subclass, NULL, _RET_IP_, NULL, 0);
755}
756
757EXPORT_SYMBOL_GPL(mutex_lock_nested);
758
759void __sched
760_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
761{
762 might_sleep();
763 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
764 0, nest, _RET_IP_, NULL, 0);
765}
766EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
767
768int __sched
769mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
770{
771 might_sleep();
772 return __mutex_lock_common(lock, TASK_KILLABLE,
773 subclass, NULL, _RET_IP_, NULL, 0);
774}
775EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
776
777int __sched
778mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
779{
780 might_sleep();
781 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
782 subclass, NULL, _RET_IP_, NULL, 0);
783}
784EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
785
786static inline int
787ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
788{
789#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
790 unsigned tmp;
791
792 if (ctx->deadlock_inject_countdown-- == 0) {
793 tmp = ctx->deadlock_inject_interval;
794 if (tmp > UINT_MAX/4)
795 tmp = UINT_MAX;
796 else
797 tmp = tmp*2 + tmp + tmp/2;
798
799 ctx->deadlock_inject_interval = tmp;
800 ctx->deadlock_inject_countdown = tmp;
801 ctx->contending_lock = lock;
802
803 ww_mutex_unlock(lock);
804
805 return -EDEADLK;
806 }
807#endif
808
809 return 0;
810}
811
812int __sched
813__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
814{
815 int ret;
816
817 might_sleep();
818 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
819 0, &ctx->dep_map, _RET_IP_, ctx, 1);
820 if (!ret && ctx->acquired > 1)
821 return ww_mutex_deadlock_injection(lock, ctx);
822
823 return ret;
824}
825EXPORT_SYMBOL_GPL(__ww_mutex_lock);
826
827int __sched
828__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
829{
830 int ret;
831
832 might_sleep();
833 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
834 0, &ctx->dep_map, _RET_IP_, ctx, 1);
835
836 if (!ret && ctx->acquired > 1)
837 return ww_mutex_deadlock_injection(lock, ctx);
838
839 return ret;
840}
841EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
842
843#endif
844
845/*
846 * Release the lock, slowpath:
847 */
848static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
849{
850 struct task_struct *next = NULL;
851 unsigned long owner, flags;
852 DEFINE_WAKE_Q(wake_q);
853
854 mutex_release(&lock->dep_map, 1, ip);
855
856 /*
857 * Release the lock before (potentially) taking the spinlock such that
858 * other contenders can get on with things ASAP.
859 *
860 * Except when HANDOFF, in that case we must not clear the owner field,
861 * but instead set it to the top waiter.
862 */
863 owner = atomic_long_read(&lock->owner);
864 for (;;) {
865 unsigned long old;
866
867#ifdef CONFIG_DEBUG_MUTEXES
868 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
869#endif
870
871 if (owner & MUTEX_FLAG_HANDOFF)
872 break;
873
874 old = atomic_long_cmpxchg_release(&lock->owner, owner,
875 __owner_flags(owner));
876 if (old == owner) {
877 if (owner & MUTEX_FLAG_WAITERS)
878 break;
879
880 return;
881 }
882
883 owner = old;
884 }
885
886 spin_lock_mutex(&lock->wait_lock, flags);
887 debug_mutex_unlock(lock);
888 if (!list_empty(&lock->wait_list)) {
889 /* get the first entry from the wait-list: */
890 struct mutex_waiter *waiter =
891 list_first_entry(&lock->wait_list,
892 struct mutex_waiter, list);
893
894 next = waiter->task;
895
896 debug_mutex_wake_waiter(lock, waiter);
897 wake_q_add(&wake_q, next);
898 }
899
900 if (owner & MUTEX_FLAG_HANDOFF)
901 __mutex_handoff(lock, next);
902
903 spin_unlock_mutex(&lock->wait_lock, flags);
904
905 wake_up_q(&wake_q);
906}
907
908#ifndef CONFIG_DEBUG_LOCK_ALLOC
909/*
910 * Here come the less common (and hence less performance-critical) APIs:
911 * mutex_lock_interruptible() and mutex_trylock().
912 */
913static noinline int __sched
914__mutex_lock_killable_slowpath(struct mutex *lock);
915
916static noinline int __sched
917__mutex_lock_interruptible_slowpath(struct mutex *lock);
918
919/**
920 * mutex_lock_interruptible - acquire the mutex, interruptible
921 * @lock: the mutex to be acquired
922 *
923 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
924 * been acquired or sleep until the mutex becomes available. If a
925 * signal arrives while waiting for the lock then this function
926 * returns -EINTR.
927 *
928 * This function is similar to (but not equivalent to) down_interruptible().
929 */
930int __sched mutex_lock_interruptible(struct mutex *lock)
931{
932 might_sleep();
933
934 if (__mutex_trylock_fast(lock))
935 return 0;
936
937 return __mutex_lock_interruptible_slowpath(lock);
938}
939
940EXPORT_SYMBOL(mutex_lock_interruptible);
941
942int __sched mutex_lock_killable(struct mutex *lock)
943{
944 might_sleep();
945
946 if (__mutex_trylock_fast(lock))
947 return 0;
948
949 return __mutex_lock_killable_slowpath(lock);
950}
951EXPORT_SYMBOL(mutex_lock_killable);
952
953static noinline void __sched
954__mutex_lock_slowpath(struct mutex *lock)
955{
956 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
957 NULL, _RET_IP_, NULL, 0);
958}
959
960static noinline int __sched
961__mutex_lock_killable_slowpath(struct mutex *lock)
962{
963 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
964 NULL, _RET_IP_, NULL, 0);
965}
966
967static noinline int __sched
968__mutex_lock_interruptible_slowpath(struct mutex *lock)
969{
970 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
971 NULL, _RET_IP_, NULL, 0);
972}
973
974static noinline int __sched
975__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
976{
977 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
978 NULL, _RET_IP_, ctx, 1);
979}
980
981static noinline int __sched
982__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
983 struct ww_acquire_ctx *ctx)
984{
985 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
986 NULL, _RET_IP_, ctx, 1);
987}
988
989#endif
990
991/**
992 * mutex_trylock - try to acquire the mutex, without waiting
993 * @lock: the mutex to be acquired
994 *
995 * Try to acquire the mutex atomically. Returns 1 if the mutex
996 * has been acquired successfully, and 0 on contention.
997 *
998 * NOTE: this function follows the spin_trylock() convention, so
999 * it is negated from the down_trylock() return values! Be careful
1000 * about this when converting semaphore users to mutexes.
1001 *
1002 * This function must not be used in interrupt context. The
1003 * mutex must be released by the same task that acquired it.
1004 */
1005int __sched mutex_trylock(struct mutex *lock)
1006{
1007 bool locked = __mutex_trylock(lock, false);
1008
1009 if (locked)
1010 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1011
1012 return locked;
1013}
1014EXPORT_SYMBOL(mutex_trylock);
1015
1016#ifndef CONFIG_DEBUG_LOCK_ALLOC
1017int __sched
1018__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1019{
1020 might_sleep();
1021
1022 if (__mutex_trylock_fast(&lock->base)) {
1023 ww_mutex_set_context_fastpath(lock, ctx);
1024 return 0;
1025 }
1026
1027 return __ww_mutex_lock_slowpath(lock, ctx);
1028}
1029EXPORT_SYMBOL(__ww_mutex_lock);
1030
1031int __sched
1032__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1033{
1034 might_sleep();
1035
1036 if (__mutex_trylock_fast(&lock->base)) {
1037 ww_mutex_set_context_fastpath(lock, ctx);
1038 return 0;
1039 }
1040
1041 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1042}
1043EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
1044
1045#endif
1046
1047/**
1048 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1049 * @cnt: the atomic which we are to dec
1050 * @lock: the mutex to return holding if we dec to 0
1051 *
1052 * return true and hold lock if we dec to 0, return false otherwise
1053 */
1054int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1055{
1056 /* dec if we can't possibly hit 0 */
1057 if (atomic_add_unless(cnt, -1, 1))
1058 return 0;
1059 /* we might hit 0, so take the lock */
1060 mutex_lock(lock);
1061 if (!atomic_dec_and_test(cnt)) {
1062 /* when we actually did the dec, we didn't hit 0 */
1063 mutex_unlock(lock);
1064 return 0;
1065 }
1066 /* we hit 0, and we hold the lock */
1067 return 1;
1068}
1069EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21#include <linux/mutex.h>
22#include <linux/ww_mutex.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/wake_q.h>
26#include <linux/sched/debug.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/interrupt.h>
30#include <linux/debug_locks.h>
31#include <linux/osq_lock.h>
32
33#define CREATE_TRACE_POINTS
34#include <trace/events/lock.h>
35
36#ifndef CONFIG_PREEMPT_RT
37#include "mutex.h"
38
39#ifdef CONFIG_DEBUG_MUTEXES
40# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
41#else
42# define MUTEX_WARN_ON(cond)
43#endif
44
45void
46__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
47{
48 atomic_long_set(&lock->owner, 0);
49 raw_spin_lock_init(&lock->wait_lock);
50 INIT_LIST_HEAD(&lock->wait_list);
51#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
52 osq_lock_init(&lock->osq);
53#endif
54
55 debug_mutex_init(lock, name, key);
56}
57EXPORT_SYMBOL(__mutex_init);
58
59/*
60 * @owner: contains: 'struct task_struct *' to the current lock owner,
61 * NULL means not owned. Since task_struct pointers are aligned at
62 * at least L1_CACHE_BYTES, we have low bits to store extra state.
63 *
64 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
65 * Bit1 indicates unlock needs to hand the lock to the top-waiter
66 * Bit2 indicates handoff has been done and we're waiting for pickup.
67 */
68#define MUTEX_FLAG_WAITERS 0x01
69#define MUTEX_FLAG_HANDOFF 0x02
70#define MUTEX_FLAG_PICKUP 0x04
71
72#define MUTEX_FLAGS 0x07
73
74/*
75 * Internal helper function; C doesn't allow us to hide it :/
76 *
77 * DO NOT USE (outside of mutex code).
78 */
79static inline struct task_struct *__mutex_owner(struct mutex *lock)
80{
81 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
82}
83
84static inline struct task_struct *__owner_task(unsigned long owner)
85{
86 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
87}
88
89bool mutex_is_locked(struct mutex *lock)
90{
91 return __mutex_owner(lock) != NULL;
92}
93EXPORT_SYMBOL(mutex_is_locked);
94
95static inline unsigned long __owner_flags(unsigned long owner)
96{
97 return owner & MUTEX_FLAGS;
98}
99
100/*
101 * Returns: __mutex_owner(lock) on failure or NULL on success.
102 */
103static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
104{
105 unsigned long owner, curr = (unsigned long)current;
106
107 owner = atomic_long_read(&lock->owner);
108 for (;;) { /* must loop, can race against a flag */
109 unsigned long flags = __owner_flags(owner);
110 unsigned long task = owner & ~MUTEX_FLAGS;
111
112 if (task) {
113 if (flags & MUTEX_FLAG_PICKUP) {
114 if (task != curr)
115 break;
116 flags &= ~MUTEX_FLAG_PICKUP;
117 } else if (handoff) {
118 if (flags & MUTEX_FLAG_HANDOFF)
119 break;
120 flags |= MUTEX_FLAG_HANDOFF;
121 } else {
122 break;
123 }
124 } else {
125 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
126 task = curr;
127 }
128
129 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
130 if (task == curr)
131 return NULL;
132 break;
133 }
134 }
135
136 return __owner_task(owner);
137}
138
139/*
140 * Trylock or set HANDOFF
141 */
142static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
143{
144 return !__mutex_trylock_common(lock, handoff);
145}
146
147/*
148 * Actual trylock that will work on any unlocked state.
149 */
150static inline bool __mutex_trylock(struct mutex *lock)
151{
152 return !__mutex_trylock_common(lock, false);
153}
154
155#ifndef CONFIG_DEBUG_LOCK_ALLOC
156/*
157 * Lockdep annotations are contained to the slow paths for simplicity.
158 * There is nothing that would stop spreading the lockdep annotations outwards
159 * except more code.
160 */
161
162/*
163 * Optimistic trylock that only works in the uncontended case. Make sure to
164 * follow with a __mutex_trylock() before failing.
165 */
166static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
167{
168 unsigned long curr = (unsigned long)current;
169 unsigned long zero = 0UL;
170
171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
172 return true;
173
174 return false;
175}
176
177static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
178{
179 unsigned long curr = (unsigned long)current;
180
181 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
182}
183#endif
184
185static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
186{
187 atomic_long_or(flag, &lock->owner);
188}
189
190static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
191{
192 atomic_long_andnot(flag, &lock->owner);
193}
194
195static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
196{
197 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
198}
199
200/*
201 * Add @waiter to a given location in the lock wait_list and set the
202 * FLAG_WAITERS flag if it's the first waiter.
203 */
204static void
205__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
206 struct list_head *list)
207{
208 debug_mutex_add_waiter(lock, waiter, current);
209
210 list_add_tail(&waiter->list, list);
211 if (__mutex_waiter_is_first(lock, waiter))
212 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
213}
214
215static void
216__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
217{
218 list_del(&waiter->list);
219 if (likely(list_empty(&lock->wait_list)))
220 __mutex_clear_flag(lock, MUTEX_FLAGS);
221
222 debug_mutex_remove_waiter(lock, waiter, current);
223}
224
225/*
226 * Give up ownership to a specific task, when @task = NULL, this is equivalent
227 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
228 * WAITERS. Provides RELEASE semantics like a regular unlock, the
229 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
230 */
231static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
232{
233 unsigned long owner = atomic_long_read(&lock->owner);
234
235 for (;;) {
236 unsigned long new;
237
238 MUTEX_WARN_ON(__owner_task(owner) != current);
239 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
240
241 new = (owner & MUTEX_FLAG_WAITERS);
242 new |= (unsigned long)task;
243 if (task)
244 new |= MUTEX_FLAG_PICKUP;
245
246 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
247 break;
248 }
249}
250
251#ifndef CONFIG_DEBUG_LOCK_ALLOC
252/*
253 * We split the mutex lock/unlock logic into separate fastpath and
254 * slowpath functions, to reduce the register pressure on the fastpath.
255 * We also put the fastpath first in the kernel image, to make sure the
256 * branch is predicted by the CPU as default-untaken.
257 */
258static void __sched __mutex_lock_slowpath(struct mutex *lock);
259
260/**
261 * mutex_lock - acquire the mutex
262 * @lock: the mutex to be acquired
263 *
264 * Lock the mutex exclusively for this task. If the mutex is not
265 * available right now, it will sleep until it can get it.
266 *
267 * The mutex must later on be released by the same task that
268 * acquired it. Recursive locking is not allowed. The task
269 * may not exit without first unlocking the mutex. Also, kernel
270 * memory where the mutex resides must not be freed with
271 * the mutex still locked. The mutex must first be initialized
272 * (or statically defined) before it can be locked. memset()-ing
273 * the mutex to 0 is not allowed.
274 *
275 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
276 * checks that will enforce the restrictions and will also do
277 * deadlock debugging)
278 *
279 * This function is similar to (but not equivalent to) down().
280 */
281void __sched mutex_lock(struct mutex *lock)
282{
283 might_sleep();
284
285 if (!__mutex_trylock_fast(lock))
286 __mutex_lock_slowpath(lock);
287}
288EXPORT_SYMBOL(mutex_lock);
289#endif
290
291#include "ww_mutex.h"
292
293#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
294
295/*
296 * Trylock variant that returns the owning task on failure.
297 */
298static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
299{
300 return __mutex_trylock_common(lock, false);
301}
302
303static inline
304bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
305 struct mutex_waiter *waiter)
306{
307 struct ww_mutex *ww;
308
309 ww = container_of(lock, struct ww_mutex, base);
310
311 /*
312 * If ww->ctx is set the contents are undefined, only
313 * by acquiring wait_lock there is a guarantee that
314 * they are not invalid when reading.
315 *
316 * As such, when deadlock detection needs to be
317 * performed the optimistic spinning cannot be done.
318 *
319 * Check this in every inner iteration because we may
320 * be racing against another thread's ww_mutex_lock.
321 */
322 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
323 return false;
324
325 /*
326 * If we aren't on the wait list yet, cancel the spin
327 * if there are waiters. We want to avoid stealing the
328 * lock from a waiter with an earlier stamp, since the
329 * other thread may already own a lock that we also
330 * need.
331 */
332 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
333 return false;
334
335 /*
336 * Similarly, stop spinning if we are no longer the
337 * first waiter.
338 */
339 if (waiter && !__mutex_waiter_is_first(lock, waiter))
340 return false;
341
342 return true;
343}
344
345/*
346 * Look out! "owner" is an entirely speculative pointer access and not
347 * reliable.
348 *
349 * "noinline" so that this function shows up on perf profiles.
350 */
351static noinline
352bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
353 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
354{
355 bool ret = true;
356
357 lockdep_assert_preemption_disabled();
358
359 while (__mutex_owner(lock) == owner) {
360 /*
361 * Ensure we emit the owner->on_cpu, dereference _after_
362 * checking lock->owner still matches owner. And we already
363 * disabled preemption which is equal to the RCU read-side
364 * crital section in optimistic spinning code. Thus the
365 * task_strcut structure won't go away during the spinning
366 * period
367 */
368 barrier();
369
370 /*
371 * Use vcpu_is_preempted to detect lock holder preemption issue.
372 */
373 if (!owner_on_cpu(owner) || need_resched()) {
374 ret = false;
375 break;
376 }
377
378 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
379 ret = false;
380 break;
381 }
382
383 cpu_relax();
384 }
385
386 return ret;
387}
388
389/*
390 * Initial check for entering the mutex spinning loop
391 */
392static inline int mutex_can_spin_on_owner(struct mutex *lock)
393{
394 struct task_struct *owner;
395 int retval = 1;
396
397 lockdep_assert_preemption_disabled();
398
399 if (need_resched())
400 return 0;
401
402 /*
403 * We already disabled preemption which is equal to the RCU read-side
404 * crital section in optimistic spinning code. Thus the task_strcut
405 * structure won't go away during the spinning period.
406 */
407 owner = __mutex_owner(lock);
408 if (owner)
409 retval = owner_on_cpu(owner);
410
411 /*
412 * If lock->owner is not set, the mutex has been released. Return true
413 * such that we'll trylock in the spin path, which is a faster option
414 * than the blocking slow path.
415 */
416 return retval;
417}
418
419/*
420 * Optimistic spinning.
421 *
422 * We try to spin for acquisition when we find that the lock owner
423 * is currently running on a (different) CPU and while we don't
424 * need to reschedule. The rationale is that if the lock owner is
425 * running, it is likely to release the lock soon.
426 *
427 * The mutex spinners are queued up using MCS lock so that only one
428 * spinner can compete for the mutex. However, if mutex spinning isn't
429 * going to happen, there is no point in going through the lock/unlock
430 * overhead.
431 *
432 * Returns true when the lock was taken, otherwise false, indicating
433 * that we need to jump to the slowpath and sleep.
434 *
435 * The waiter flag is set to true if the spinner is a waiter in the wait
436 * queue. The waiter-spinner will spin on the lock directly and concurrently
437 * with the spinner at the head of the OSQ, if present, until the owner is
438 * changed to itself.
439 */
440static __always_inline bool
441mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
442 struct mutex_waiter *waiter)
443{
444 if (!waiter) {
445 /*
446 * The purpose of the mutex_can_spin_on_owner() function is
447 * to eliminate the overhead of osq_lock() and osq_unlock()
448 * in case spinning isn't possible. As a waiter-spinner
449 * is not going to take OSQ lock anyway, there is no need
450 * to call mutex_can_spin_on_owner().
451 */
452 if (!mutex_can_spin_on_owner(lock))
453 goto fail;
454
455 /*
456 * In order to avoid a stampede of mutex spinners trying to
457 * acquire the mutex all at once, the spinners need to take a
458 * MCS (queued) lock first before spinning on the owner field.
459 */
460 if (!osq_lock(&lock->osq))
461 goto fail;
462 }
463
464 for (;;) {
465 struct task_struct *owner;
466
467 /* Try to acquire the mutex... */
468 owner = __mutex_trylock_or_owner(lock);
469 if (!owner)
470 break;
471
472 /*
473 * There's an owner, wait for it to either
474 * release the lock or go to sleep.
475 */
476 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
477 goto fail_unlock;
478
479 /*
480 * The cpu_relax() call is a compiler barrier which forces
481 * everything in this loop to be re-loaded. We don't need
482 * memory barriers as we'll eventually observe the right
483 * values at the cost of a few extra spins.
484 */
485 cpu_relax();
486 }
487
488 if (!waiter)
489 osq_unlock(&lock->osq);
490
491 return true;
492
493
494fail_unlock:
495 if (!waiter)
496 osq_unlock(&lock->osq);
497
498fail:
499 /*
500 * If we fell out of the spin path because of need_resched(),
501 * reschedule now, before we try-lock the mutex. This avoids getting
502 * scheduled out right after we obtained the mutex.
503 */
504 if (need_resched()) {
505 /*
506 * We _should_ have TASK_RUNNING here, but just in case
507 * we do not, make it so, otherwise we might get stuck.
508 */
509 __set_current_state(TASK_RUNNING);
510 schedule_preempt_disabled();
511 }
512
513 return false;
514}
515#else
516static __always_inline bool
517mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
518 struct mutex_waiter *waiter)
519{
520 return false;
521}
522#endif
523
524static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
525
526/**
527 * mutex_unlock - release the mutex
528 * @lock: the mutex to be released
529 *
530 * Unlock a mutex that has been locked by this task previously.
531 *
532 * This function must not be used in interrupt context. Unlocking
533 * of a not locked mutex is not allowed.
534 *
535 * This function is similar to (but not equivalent to) up().
536 */
537void __sched mutex_unlock(struct mutex *lock)
538{
539#ifndef CONFIG_DEBUG_LOCK_ALLOC
540 if (__mutex_unlock_fast(lock))
541 return;
542#endif
543 __mutex_unlock_slowpath(lock, _RET_IP_);
544}
545EXPORT_SYMBOL(mutex_unlock);
546
547/**
548 * ww_mutex_unlock - release the w/w mutex
549 * @lock: the mutex to be released
550 *
551 * Unlock a mutex that has been locked by this task previously with any of the
552 * ww_mutex_lock* functions (with or without an acquire context). It is
553 * forbidden to release the locks after releasing the acquire context.
554 *
555 * This function must not be used in interrupt context. Unlocking
556 * of a unlocked mutex is not allowed.
557 */
558void __sched ww_mutex_unlock(struct ww_mutex *lock)
559{
560 __ww_mutex_unlock(lock);
561 mutex_unlock(&lock->base);
562}
563EXPORT_SYMBOL(ww_mutex_unlock);
564
565/*
566 * Lock a mutex (possibly interruptible), slowpath:
567 */
568static __always_inline int __sched
569__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
570 struct lockdep_map *nest_lock, unsigned long ip,
571 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
572{
573 struct mutex_waiter waiter;
574 struct ww_mutex *ww;
575 int ret;
576
577 if (!use_ww_ctx)
578 ww_ctx = NULL;
579
580 might_sleep();
581
582 MUTEX_WARN_ON(lock->magic != lock);
583
584 ww = container_of(lock, struct ww_mutex, base);
585 if (ww_ctx) {
586 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
587 return -EALREADY;
588
589 /*
590 * Reset the wounded flag after a kill. No other process can
591 * race and wound us here since they can't have a valid owner
592 * pointer if we don't have any locks held.
593 */
594 if (ww_ctx->acquired == 0)
595 ww_ctx->wounded = 0;
596
597#ifdef CONFIG_DEBUG_LOCK_ALLOC
598 nest_lock = &ww_ctx->dep_map;
599#endif
600 }
601
602 preempt_disable();
603 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
604
605 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
606 if (__mutex_trylock(lock) ||
607 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
608 /* got the lock, yay! */
609 lock_acquired(&lock->dep_map, ip);
610 if (ww_ctx)
611 ww_mutex_set_context_fastpath(ww, ww_ctx);
612 trace_contention_end(lock, 0);
613 preempt_enable();
614 return 0;
615 }
616
617 raw_spin_lock(&lock->wait_lock);
618 /*
619 * After waiting to acquire the wait_lock, try again.
620 */
621 if (__mutex_trylock(lock)) {
622 if (ww_ctx)
623 __ww_mutex_check_waiters(lock, ww_ctx);
624
625 goto skip_wait;
626 }
627
628 debug_mutex_lock_common(lock, &waiter);
629 waiter.task = current;
630 if (use_ww_ctx)
631 waiter.ww_ctx = ww_ctx;
632
633 lock_contended(&lock->dep_map, ip);
634
635 if (!use_ww_ctx) {
636 /* add waiting tasks to the end of the waitqueue (FIFO): */
637 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
638 } else {
639 /*
640 * Add in stamp order, waking up waiters that must kill
641 * themselves.
642 */
643 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
644 if (ret)
645 goto err_early_kill;
646 }
647
648 set_current_state(state);
649 trace_contention_begin(lock, LCB_F_MUTEX);
650 for (;;) {
651 bool first;
652
653 /*
654 * Once we hold wait_lock, we're serialized against
655 * mutex_unlock() handing the lock off to us, do a trylock
656 * before testing the error conditions to make sure we pick up
657 * the handoff.
658 */
659 if (__mutex_trylock(lock))
660 goto acquired;
661
662 /*
663 * Check for signals and kill conditions while holding
664 * wait_lock. This ensures the lock cancellation is ordered
665 * against mutex_unlock() and wake-ups do not go missing.
666 */
667 if (signal_pending_state(state, current)) {
668 ret = -EINTR;
669 goto err;
670 }
671
672 if (ww_ctx) {
673 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
674 if (ret)
675 goto err;
676 }
677
678 raw_spin_unlock(&lock->wait_lock);
679 schedule_preempt_disabled();
680
681 first = __mutex_waiter_is_first(lock, &waiter);
682
683 set_current_state(state);
684 /*
685 * Here we order against unlock; we must either see it change
686 * state back to RUNNING and fall through the next schedule(),
687 * or we must see its unlock and acquire.
688 */
689 if (__mutex_trylock_or_handoff(lock, first))
690 break;
691
692 if (first) {
693 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
694 if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
695 break;
696 trace_contention_begin(lock, LCB_F_MUTEX);
697 }
698
699 raw_spin_lock(&lock->wait_lock);
700 }
701 raw_spin_lock(&lock->wait_lock);
702acquired:
703 __set_current_state(TASK_RUNNING);
704
705 if (ww_ctx) {
706 /*
707 * Wound-Wait; we stole the lock (!first_waiter), check the
708 * waiters as anyone might want to wound us.
709 */
710 if (!ww_ctx->is_wait_die &&
711 !__mutex_waiter_is_first(lock, &waiter))
712 __ww_mutex_check_waiters(lock, ww_ctx);
713 }
714
715 __mutex_remove_waiter(lock, &waiter);
716
717 debug_mutex_free_waiter(&waiter);
718
719skip_wait:
720 /* got the lock - cleanup and rejoice! */
721 lock_acquired(&lock->dep_map, ip);
722 trace_contention_end(lock, 0);
723
724 if (ww_ctx)
725 ww_mutex_lock_acquired(ww, ww_ctx);
726
727 raw_spin_unlock(&lock->wait_lock);
728 preempt_enable();
729 return 0;
730
731err:
732 __set_current_state(TASK_RUNNING);
733 __mutex_remove_waiter(lock, &waiter);
734err_early_kill:
735 trace_contention_end(lock, ret);
736 raw_spin_unlock(&lock->wait_lock);
737 debug_mutex_free_waiter(&waiter);
738 mutex_release(&lock->dep_map, ip);
739 preempt_enable();
740 return ret;
741}
742
743static int __sched
744__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
745 struct lockdep_map *nest_lock, unsigned long ip)
746{
747 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
748}
749
750static int __sched
751__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
752 unsigned long ip, struct ww_acquire_ctx *ww_ctx)
753{
754 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
755}
756
757/**
758 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
759 * @ww: mutex to lock
760 * @ww_ctx: optional w/w acquire context
761 *
762 * Trylocks a mutex with the optional acquire context; no deadlock detection is
763 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
764 *
765 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
766 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
767 *
768 * A mutex acquired with this function must be released with ww_mutex_unlock.
769 */
770int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
771{
772 if (!ww_ctx)
773 return mutex_trylock(&ww->base);
774
775 MUTEX_WARN_ON(ww->base.magic != &ww->base);
776
777 /*
778 * Reset the wounded flag after a kill. No other process can
779 * race and wound us here, since they can't have a valid owner
780 * pointer if we don't have any locks held.
781 */
782 if (ww_ctx->acquired == 0)
783 ww_ctx->wounded = 0;
784
785 if (__mutex_trylock(&ww->base)) {
786 ww_mutex_set_context_fastpath(ww, ww_ctx);
787 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
788 return 1;
789 }
790
791 return 0;
792}
793EXPORT_SYMBOL(ww_mutex_trylock);
794
795#ifdef CONFIG_DEBUG_LOCK_ALLOC
796void __sched
797mutex_lock_nested(struct mutex *lock, unsigned int subclass)
798{
799 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
800}
801
802EXPORT_SYMBOL_GPL(mutex_lock_nested);
803
804void __sched
805_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
806{
807 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
808}
809EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
810
811int __sched
812mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
813{
814 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
815}
816EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
817
818int __sched
819mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
820{
821 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
822}
823EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
824
825void __sched
826mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
827{
828 int token;
829
830 might_sleep();
831
832 token = io_schedule_prepare();
833 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
834 subclass, NULL, _RET_IP_, NULL, 0);
835 io_schedule_finish(token);
836}
837EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
838
839static inline int
840ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
841{
842#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
843 unsigned tmp;
844
845 if (ctx->deadlock_inject_countdown-- == 0) {
846 tmp = ctx->deadlock_inject_interval;
847 if (tmp > UINT_MAX/4)
848 tmp = UINT_MAX;
849 else
850 tmp = tmp*2 + tmp + tmp/2;
851
852 ctx->deadlock_inject_interval = tmp;
853 ctx->deadlock_inject_countdown = tmp;
854 ctx->contending_lock = lock;
855
856 ww_mutex_unlock(lock);
857
858 return -EDEADLK;
859 }
860#endif
861
862 return 0;
863}
864
865int __sched
866ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
867{
868 int ret;
869
870 might_sleep();
871 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
872 0, _RET_IP_, ctx);
873 if (!ret && ctx && ctx->acquired > 1)
874 return ww_mutex_deadlock_injection(lock, ctx);
875
876 return ret;
877}
878EXPORT_SYMBOL_GPL(ww_mutex_lock);
879
880int __sched
881ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
882{
883 int ret;
884
885 might_sleep();
886 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
887 0, _RET_IP_, ctx);
888
889 if (!ret && ctx && ctx->acquired > 1)
890 return ww_mutex_deadlock_injection(lock, ctx);
891
892 return ret;
893}
894EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
895
896#endif
897
898/*
899 * Release the lock, slowpath:
900 */
901static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
902{
903 struct task_struct *next = NULL;
904 DEFINE_WAKE_Q(wake_q);
905 unsigned long owner;
906
907 mutex_release(&lock->dep_map, ip);
908
909 /*
910 * Release the lock before (potentially) taking the spinlock such that
911 * other contenders can get on with things ASAP.
912 *
913 * Except when HANDOFF, in that case we must not clear the owner field,
914 * but instead set it to the top waiter.
915 */
916 owner = atomic_long_read(&lock->owner);
917 for (;;) {
918 MUTEX_WARN_ON(__owner_task(owner) != current);
919 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
920
921 if (owner & MUTEX_FLAG_HANDOFF)
922 break;
923
924 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
925 if (owner & MUTEX_FLAG_WAITERS)
926 break;
927
928 return;
929 }
930 }
931
932 raw_spin_lock(&lock->wait_lock);
933 debug_mutex_unlock(lock);
934 if (!list_empty(&lock->wait_list)) {
935 /* get the first entry from the wait-list: */
936 struct mutex_waiter *waiter =
937 list_first_entry(&lock->wait_list,
938 struct mutex_waiter, list);
939
940 next = waiter->task;
941
942 debug_mutex_wake_waiter(lock, waiter);
943 wake_q_add(&wake_q, next);
944 }
945
946 if (owner & MUTEX_FLAG_HANDOFF)
947 __mutex_handoff(lock, next);
948
949 raw_spin_unlock(&lock->wait_lock);
950
951 wake_up_q(&wake_q);
952}
953
954#ifndef CONFIG_DEBUG_LOCK_ALLOC
955/*
956 * Here come the less common (and hence less performance-critical) APIs:
957 * mutex_lock_interruptible() and mutex_trylock().
958 */
959static noinline int __sched
960__mutex_lock_killable_slowpath(struct mutex *lock);
961
962static noinline int __sched
963__mutex_lock_interruptible_slowpath(struct mutex *lock);
964
965/**
966 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
967 * @lock: The mutex to be acquired.
968 *
969 * Lock the mutex like mutex_lock(). If a signal is delivered while the
970 * process is sleeping, this function will return without acquiring the
971 * mutex.
972 *
973 * Context: Process context.
974 * Return: 0 if the lock was successfully acquired or %-EINTR if a
975 * signal arrived.
976 */
977int __sched mutex_lock_interruptible(struct mutex *lock)
978{
979 might_sleep();
980
981 if (__mutex_trylock_fast(lock))
982 return 0;
983
984 return __mutex_lock_interruptible_slowpath(lock);
985}
986
987EXPORT_SYMBOL(mutex_lock_interruptible);
988
989/**
990 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
991 * @lock: The mutex to be acquired.
992 *
993 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
994 * the current process is delivered while the process is sleeping, this
995 * function will return without acquiring the mutex.
996 *
997 * Context: Process context.
998 * Return: 0 if the lock was successfully acquired or %-EINTR if a
999 * fatal signal arrived.
1000 */
1001int __sched mutex_lock_killable(struct mutex *lock)
1002{
1003 might_sleep();
1004
1005 if (__mutex_trylock_fast(lock))
1006 return 0;
1007
1008 return __mutex_lock_killable_slowpath(lock);
1009}
1010EXPORT_SYMBOL(mutex_lock_killable);
1011
1012/**
1013 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1014 * @lock: The mutex to be acquired.
1015 *
1016 * Lock the mutex like mutex_lock(). While the task is waiting for this
1017 * mutex, it will be accounted as being in the IO wait state by the
1018 * scheduler.
1019 *
1020 * Context: Process context.
1021 */
1022void __sched mutex_lock_io(struct mutex *lock)
1023{
1024 int token;
1025
1026 token = io_schedule_prepare();
1027 mutex_lock(lock);
1028 io_schedule_finish(token);
1029}
1030EXPORT_SYMBOL_GPL(mutex_lock_io);
1031
1032static noinline void __sched
1033__mutex_lock_slowpath(struct mutex *lock)
1034{
1035 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1036}
1037
1038static noinline int __sched
1039__mutex_lock_killable_slowpath(struct mutex *lock)
1040{
1041 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1042}
1043
1044static noinline int __sched
1045__mutex_lock_interruptible_slowpath(struct mutex *lock)
1046{
1047 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1048}
1049
1050static noinline int __sched
1051__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1052{
1053 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1054 _RET_IP_, ctx);
1055}
1056
1057static noinline int __sched
1058__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1059 struct ww_acquire_ctx *ctx)
1060{
1061 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1062 _RET_IP_, ctx);
1063}
1064
1065#endif
1066
1067/**
1068 * mutex_trylock - try to acquire the mutex, without waiting
1069 * @lock: the mutex to be acquired
1070 *
1071 * Try to acquire the mutex atomically. Returns 1 if the mutex
1072 * has been acquired successfully, and 0 on contention.
1073 *
1074 * NOTE: this function follows the spin_trylock() convention, so
1075 * it is negated from the down_trylock() return values! Be careful
1076 * about this when converting semaphore users to mutexes.
1077 *
1078 * This function must not be used in interrupt context. The
1079 * mutex must be released by the same task that acquired it.
1080 */
1081int __sched mutex_trylock(struct mutex *lock)
1082{
1083 bool locked;
1084
1085 MUTEX_WARN_ON(lock->magic != lock);
1086
1087 locked = __mutex_trylock(lock);
1088 if (locked)
1089 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1090
1091 return locked;
1092}
1093EXPORT_SYMBOL(mutex_trylock);
1094
1095#ifndef CONFIG_DEBUG_LOCK_ALLOC
1096int __sched
1097ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1098{
1099 might_sleep();
1100
1101 if (__mutex_trylock_fast(&lock->base)) {
1102 if (ctx)
1103 ww_mutex_set_context_fastpath(lock, ctx);
1104 return 0;
1105 }
1106
1107 return __ww_mutex_lock_slowpath(lock, ctx);
1108}
1109EXPORT_SYMBOL(ww_mutex_lock);
1110
1111int __sched
1112ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1113{
1114 might_sleep();
1115
1116 if (__mutex_trylock_fast(&lock->base)) {
1117 if (ctx)
1118 ww_mutex_set_context_fastpath(lock, ctx);
1119 return 0;
1120 }
1121
1122 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1123}
1124EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1125
1126#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1127#endif /* !CONFIG_PREEMPT_RT */
1128
1129/**
1130 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1131 * @cnt: the atomic which we are to dec
1132 * @lock: the mutex to return holding if we dec to 0
1133 *
1134 * return true and hold lock if we dec to 0, return false otherwise
1135 */
1136int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1137{
1138 /* dec if we can't possibly hit 0 */
1139 if (atomic_add_unless(cnt, -1, 1))
1140 return 0;
1141 /* we might hit 0, so take the lock */
1142 mutex_lock(lock);
1143 if (!atomic_dec_and_test(cnt)) {
1144 /* when we actually did the dec, we didn't hit 0 */
1145 mutex_unlock(lock);
1146 return 0;
1147 }
1148 /* we hit 0, and we hold the lock */
1149 return 1;
1150}
1151EXPORT_SYMBOL(atomic_dec_and_mutex_lock);