Loading...
1/*
2 * kernel/locking/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
18 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
22#include <linux/sched.h>
23#include <linux/sched/rt.h>
24#include <linux/export.h>
25#include <linux/spinlock.h>
26#include <linux/interrupt.h>
27#include <linux/debug_locks.h>
28#include "mcs_spinlock.h"
29
30/*
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath:
33 */
34#ifdef CONFIG_DEBUG_MUTEXES
35# include "mutex-debug.h"
36# include <asm-generic/mutex-null.h>
37/*
38 * Must be 0 for the debug case so we do not do the unlock outside of the
39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
40 * case.
41 */
42# undef __mutex_slowpath_needs_to_unlock
43# define __mutex_slowpath_needs_to_unlock() 0
44#else
45# include "mutex.h"
46# include <asm/mutex.h>
47#endif
48
49/*
50 * A negative mutex count indicates that waiters are sleeping waiting for the
51 * mutex.
52 */
53#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
54
55void
56__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
57{
58 atomic_set(&lock->count, 1);
59 spin_lock_init(&lock->wait_lock);
60 INIT_LIST_HEAD(&lock->wait_list);
61 mutex_clear_owner(lock);
62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
63 lock->osq = NULL;
64#endif
65
66 debug_mutex_init(lock, name, key);
67}
68
69EXPORT_SYMBOL(__mutex_init);
70
71#ifndef CONFIG_DEBUG_LOCK_ALLOC
72/*
73 * We split the mutex lock/unlock logic into separate fastpath and
74 * slowpath functions, to reduce the register pressure on the fastpath.
75 * We also put the fastpath first in the kernel image, to make sure the
76 * branch is predicted by the CPU as default-untaken.
77 */
78__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
79
80/**
81 * mutex_lock - acquire the mutex
82 * @lock: the mutex to be acquired
83 *
84 * Lock the mutex exclusively for this task. If the mutex is not
85 * available right now, it will sleep until it can get it.
86 *
87 * The mutex must later on be released by the same task that
88 * acquired it. Recursive locking is not allowed. The task
89 * may not exit without first unlocking the mutex. Also, kernel
90 * memory where the mutex resides mutex must not be freed with
91 * the mutex still locked. The mutex must first be initialized
92 * (or statically defined) before it can be locked. memset()-ing
93 * the mutex to 0 is not allowed.
94 *
95 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
96 * checks that will enforce the restrictions and will also do
97 * deadlock debugging. )
98 *
99 * This function is similar to (but not equivalent to) down().
100 */
101void __sched mutex_lock(struct mutex *lock)
102{
103 might_sleep();
104 /*
105 * The locking fastpath is the 1->0 transition from
106 * 'unlocked' into 'locked' state.
107 */
108 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
109 mutex_set_owner(lock);
110}
111
112EXPORT_SYMBOL(mutex_lock);
113#endif
114
115#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
116/*
117 * In order to avoid a stampede of mutex spinners from acquiring the mutex
118 * more or less simultaneously, the spinners need to acquire a MCS lock
119 * first before spinning on the owner field.
120 *
121 */
122
123/*
124 * Mutex spinning code migrated from kernel/sched/core.c
125 */
126
127static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
128{
129 if (lock->owner != owner)
130 return false;
131
132 /*
133 * Ensure we emit the owner->on_cpu, dereference _after_ checking
134 * lock->owner still matches owner, if that fails, owner might
135 * point to free()d memory, if it still matches, the rcu_read_lock()
136 * ensures the memory stays valid.
137 */
138 barrier();
139
140 return owner->on_cpu;
141}
142
143/*
144 * Look out! "owner" is an entirely speculative pointer
145 * access and not reliable.
146 */
147static noinline
148int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
149{
150 rcu_read_lock();
151 while (owner_running(lock, owner)) {
152 if (need_resched())
153 break;
154
155 arch_mutex_cpu_relax();
156 }
157 rcu_read_unlock();
158
159 /*
160 * We break out the loop above on need_resched() and when the
161 * owner changed, which is a sign for heavy contention. Return
162 * success only when lock->owner is NULL.
163 */
164 return lock->owner == NULL;
165}
166
167/*
168 * Initial check for entering the mutex spinning loop
169 */
170static inline int mutex_can_spin_on_owner(struct mutex *lock)
171{
172 struct task_struct *owner;
173 int retval = 1;
174
175 if (need_resched())
176 return 0;
177
178 rcu_read_lock();
179 owner = ACCESS_ONCE(lock->owner);
180 if (owner)
181 retval = owner->on_cpu;
182 rcu_read_unlock();
183 /*
184 * if lock->owner is not set, the mutex owner may have just acquired
185 * it and not set the owner yet or the mutex has been released.
186 */
187 return retval;
188}
189#endif
190
191__visible __used noinline
192void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
193
194/**
195 * mutex_unlock - release the mutex
196 * @lock: the mutex to be released
197 *
198 * Unlock a mutex that has been locked by this task previously.
199 *
200 * This function must not be used in interrupt context. Unlocking
201 * of a not locked mutex is not allowed.
202 *
203 * This function is similar to (but not equivalent to) up().
204 */
205void __sched mutex_unlock(struct mutex *lock)
206{
207 /*
208 * The unlocking fastpath is the 0->1 transition from 'locked'
209 * into 'unlocked' state:
210 */
211#ifndef CONFIG_DEBUG_MUTEXES
212 /*
213 * When debugging is enabled we must not clear the owner before time,
214 * the slow path will always be taken, and that clears the owner field
215 * after verifying that it was indeed current.
216 */
217 mutex_clear_owner(lock);
218#endif
219 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
220}
221
222EXPORT_SYMBOL(mutex_unlock);
223
224/**
225 * ww_mutex_unlock - release the w/w mutex
226 * @lock: the mutex to be released
227 *
228 * Unlock a mutex that has been locked by this task previously with any of the
229 * ww_mutex_lock* functions (with or without an acquire context). It is
230 * forbidden to release the locks after releasing the acquire context.
231 *
232 * This function must not be used in interrupt context. Unlocking
233 * of a unlocked mutex is not allowed.
234 */
235void __sched ww_mutex_unlock(struct ww_mutex *lock)
236{
237 /*
238 * The unlocking fastpath is the 0->1 transition from 'locked'
239 * into 'unlocked' state:
240 */
241 if (lock->ctx) {
242#ifdef CONFIG_DEBUG_MUTEXES
243 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
244#endif
245 if (lock->ctx->acquired > 0)
246 lock->ctx->acquired--;
247 lock->ctx = NULL;
248 }
249
250#ifndef CONFIG_DEBUG_MUTEXES
251 /*
252 * When debugging is enabled we must not clear the owner before time,
253 * the slow path will always be taken, and that clears the owner field
254 * after verifying that it was indeed current.
255 */
256 mutex_clear_owner(&lock->base);
257#endif
258 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
259}
260EXPORT_SYMBOL(ww_mutex_unlock);
261
262static inline int __sched
263__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
264{
265 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
266 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
267
268 if (!hold_ctx)
269 return 0;
270
271 if (unlikely(ctx == hold_ctx))
272 return -EALREADY;
273
274 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
275 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
276#ifdef CONFIG_DEBUG_MUTEXES
277 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
278 ctx->contending_lock = ww;
279#endif
280 return -EDEADLK;
281 }
282
283 return 0;
284}
285
286static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
287 struct ww_acquire_ctx *ww_ctx)
288{
289#ifdef CONFIG_DEBUG_MUTEXES
290 /*
291 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
292 * but released with a normal mutex_unlock in this call.
293 *
294 * This should never happen, always use ww_mutex_unlock.
295 */
296 DEBUG_LOCKS_WARN_ON(ww->ctx);
297
298 /*
299 * Not quite done after calling ww_acquire_done() ?
300 */
301 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
302
303 if (ww_ctx->contending_lock) {
304 /*
305 * After -EDEADLK you tried to
306 * acquire a different ww_mutex? Bad!
307 */
308 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
309
310 /*
311 * You called ww_mutex_lock after receiving -EDEADLK,
312 * but 'forgot' to unlock everything else first?
313 */
314 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
315 ww_ctx->contending_lock = NULL;
316 }
317
318 /*
319 * Naughty, using a different class will lead to undefined behavior!
320 */
321 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
322#endif
323 ww_ctx->acquired++;
324}
325
326/*
327 * after acquiring lock with fastpath or when we lost out in contested
328 * slowpath, set ctx and wake up any waiters so they can recheck.
329 *
330 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
331 * as the fastpath and opportunistic spinning are disabled in that case.
332 */
333static __always_inline void
334ww_mutex_set_context_fastpath(struct ww_mutex *lock,
335 struct ww_acquire_ctx *ctx)
336{
337 unsigned long flags;
338 struct mutex_waiter *cur;
339
340 ww_mutex_lock_acquired(lock, ctx);
341
342 lock->ctx = ctx;
343
344 /*
345 * The lock->ctx update should be visible on all cores before
346 * the atomic read is done, otherwise contended waiters might be
347 * missed. The contended waiters will either see ww_ctx == NULL
348 * and keep spinning, or it will acquire wait_lock, add itself
349 * to waiter list and sleep.
350 */
351 smp_mb(); /* ^^^ */
352
353 /*
354 * Check if lock is contended, if not there is nobody to wake up
355 */
356 if (likely(atomic_read(&lock->base.count) == 0))
357 return;
358
359 /*
360 * Uh oh, we raced in fastpath, wake up everyone in this case,
361 * so they can see the new lock->ctx.
362 */
363 spin_lock_mutex(&lock->base.wait_lock, flags);
364 list_for_each_entry(cur, &lock->base.wait_list, list) {
365 debug_mutex_wake_waiter(&lock->base, cur);
366 wake_up_process(cur->task);
367 }
368 spin_unlock_mutex(&lock->base.wait_lock, flags);
369}
370
371/*
372 * Lock a mutex (possibly interruptible), slowpath:
373 */
374static __always_inline int __sched
375__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
376 struct lockdep_map *nest_lock, unsigned long ip,
377 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
378{
379 struct task_struct *task = current;
380 struct mutex_waiter waiter;
381 unsigned long flags;
382 int ret;
383
384 preempt_disable();
385 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
386
387#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
388 /*
389 * Optimistic spinning.
390 *
391 * We try to spin for acquisition when we find that there are no
392 * pending waiters and the lock owner is currently running on a
393 * (different) CPU.
394 *
395 * The rationale is that if the lock owner is running, it is likely to
396 * release the lock soon.
397 *
398 * Since this needs the lock owner, and this mutex implementation
399 * doesn't track the owner atomically in the lock field, we need to
400 * track it non-atomically.
401 *
402 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
403 * to serialize everything.
404 *
405 * The mutex spinners are queued up using MCS lock so that only one
406 * spinner can compete for the mutex. However, if mutex spinning isn't
407 * going to happen, there is no point in going through the lock/unlock
408 * overhead.
409 */
410 if (!mutex_can_spin_on_owner(lock))
411 goto slowpath;
412
413 if (!osq_lock(&lock->osq))
414 goto slowpath;
415
416 for (;;) {
417 struct task_struct *owner;
418
419 if (use_ww_ctx && ww_ctx->acquired > 0) {
420 struct ww_mutex *ww;
421
422 ww = container_of(lock, struct ww_mutex, base);
423 /*
424 * If ww->ctx is set the contents are undefined, only
425 * by acquiring wait_lock there is a guarantee that
426 * they are not invalid when reading.
427 *
428 * As such, when deadlock detection needs to be
429 * performed the optimistic spinning cannot be done.
430 */
431 if (ACCESS_ONCE(ww->ctx))
432 break;
433 }
434
435 /*
436 * If there's an owner, wait for it to either
437 * release the lock or go to sleep.
438 */
439 owner = ACCESS_ONCE(lock->owner);
440 if (owner && !mutex_spin_on_owner(lock, owner))
441 break;
442
443 if ((atomic_read(&lock->count) == 1) &&
444 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
445 lock_acquired(&lock->dep_map, ip);
446 if (use_ww_ctx) {
447 struct ww_mutex *ww;
448 ww = container_of(lock, struct ww_mutex, base);
449
450 ww_mutex_set_context_fastpath(ww, ww_ctx);
451 }
452
453 mutex_set_owner(lock);
454 osq_unlock(&lock->osq);
455 preempt_enable();
456 return 0;
457 }
458
459 /*
460 * When there's no owner, we might have preempted between the
461 * owner acquiring the lock and setting the owner field. If
462 * we're an RT task that will live-lock because we won't let
463 * the owner complete.
464 */
465 if (!owner && (need_resched() || rt_task(task)))
466 break;
467
468 /*
469 * The cpu_relax() call is a compiler barrier which forces
470 * everything in this loop to be re-loaded. We don't need
471 * memory barriers as we'll eventually observe the right
472 * values at the cost of a few extra spins.
473 */
474 arch_mutex_cpu_relax();
475 }
476 osq_unlock(&lock->osq);
477slowpath:
478 /*
479 * If we fell out of the spin path because of need_resched(),
480 * reschedule now, before we try-lock the mutex. This avoids getting
481 * scheduled out right after we obtained the mutex.
482 */
483 if (need_resched())
484 schedule_preempt_disabled();
485#endif
486 spin_lock_mutex(&lock->wait_lock, flags);
487
488 /* once more, can we acquire the lock? */
489 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
490 goto skip_wait;
491
492 debug_mutex_lock_common(lock, &waiter);
493 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
494
495 /* add waiting tasks to the end of the waitqueue (FIFO): */
496 list_add_tail(&waiter.list, &lock->wait_list);
497 waiter.task = task;
498
499 lock_contended(&lock->dep_map, ip);
500
501 for (;;) {
502 /*
503 * Lets try to take the lock again - this is needed even if
504 * we get here for the first time (shortly after failing to
505 * acquire the lock), to make sure that we get a wakeup once
506 * it's unlocked. Later on, if we sleep, this is the
507 * operation that gives us the lock. We xchg it to -1, so
508 * that when we release the lock, we properly wake up the
509 * other waiters:
510 */
511 if (MUTEX_SHOW_NO_WAITER(lock) &&
512 (atomic_xchg(&lock->count, -1) == 1))
513 break;
514
515 /*
516 * got a signal? (This code gets eliminated in the
517 * TASK_UNINTERRUPTIBLE case.)
518 */
519 if (unlikely(signal_pending_state(state, task))) {
520 ret = -EINTR;
521 goto err;
522 }
523
524 if (use_ww_ctx && ww_ctx->acquired > 0) {
525 ret = __mutex_lock_check_stamp(lock, ww_ctx);
526 if (ret)
527 goto err;
528 }
529
530 __set_task_state(task, state);
531
532 /* didn't get the lock, go to sleep: */
533 spin_unlock_mutex(&lock->wait_lock, flags);
534 schedule_preempt_disabled();
535 spin_lock_mutex(&lock->wait_lock, flags);
536 }
537 mutex_remove_waiter(lock, &waiter, current_thread_info());
538 /* set it to 0 if there are no waiters left: */
539 if (likely(list_empty(&lock->wait_list)))
540 atomic_set(&lock->count, 0);
541 debug_mutex_free_waiter(&waiter);
542
543skip_wait:
544 /* got the lock - cleanup and rejoice! */
545 lock_acquired(&lock->dep_map, ip);
546 mutex_set_owner(lock);
547
548 if (use_ww_ctx) {
549 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
550 struct mutex_waiter *cur;
551
552 /*
553 * This branch gets optimized out for the common case,
554 * and is only important for ww_mutex_lock.
555 */
556 ww_mutex_lock_acquired(ww, ww_ctx);
557 ww->ctx = ww_ctx;
558
559 /*
560 * Give any possible sleeping processes the chance to wake up,
561 * so they can recheck if they have to back off.
562 */
563 list_for_each_entry(cur, &lock->wait_list, list) {
564 debug_mutex_wake_waiter(lock, cur);
565 wake_up_process(cur->task);
566 }
567 }
568
569 spin_unlock_mutex(&lock->wait_lock, flags);
570 preempt_enable();
571 return 0;
572
573err:
574 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
575 spin_unlock_mutex(&lock->wait_lock, flags);
576 debug_mutex_free_waiter(&waiter);
577 mutex_release(&lock->dep_map, 1, ip);
578 preempt_enable();
579 return ret;
580}
581
582#ifdef CONFIG_DEBUG_LOCK_ALLOC
583void __sched
584mutex_lock_nested(struct mutex *lock, unsigned int subclass)
585{
586 might_sleep();
587 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
588 subclass, NULL, _RET_IP_, NULL, 0);
589}
590
591EXPORT_SYMBOL_GPL(mutex_lock_nested);
592
593void __sched
594_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
595{
596 might_sleep();
597 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
598 0, nest, _RET_IP_, NULL, 0);
599}
600
601EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
602
603int __sched
604mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
605{
606 might_sleep();
607 return __mutex_lock_common(lock, TASK_KILLABLE,
608 subclass, NULL, _RET_IP_, NULL, 0);
609}
610EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
611
612int __sched
613mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
614{
615 might_sleep();
616 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
617 subclass, NULL, _RET_IP_, NULL, 0);
618}
619
620EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
621
622static inline int
623ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
624{
625#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
626 unsigned tmp;
627
628 if (ctx->deadlock_inject_countdown-- == 0) {
629 tmp = ctx->deadlock_inject_interval;
630 if (tmp > UINT_MAX/4)
631 tmp = UINT_MAX;
632 else
633 tmp = tmp*2 + tmp + tmp/2;
634
635 ctx->deadlock_inject_interval = tmp;
636 ctx->deadlock_inject_countdown = tmp;
637 ctx->contending_lock = lock;
638
639 ww_mutex_unlock(lock);
640
641 return -EDEADLK;
642 }
643#endif
644
645 return 0;
646}
647
648int __sched
649__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
650{
651 int ret;
652
653 might_sleep();
654 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
655 0, &ctx->dep_map, _RET_IP_, ctx, 1);
656 if (!ret && ctx->acquired > 1)
657 return ww_mutex_deadlock_injection(lock, ctx);
658
659 return ret;
660}
661EXPORT_SYMBOL_GPL(__ww_mutex_lock);
662
663int __sched
664__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
665{
666 int ret;
667
668 might_sleep();
669 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
670 0, &ctx->dep_map, _RET_IP_, ctx, 1);
671
672 if (!ret && ctx->acquired > 1)
673 return ww_mutex_deadlock_injection(lock, ctx);
674
675 return ret;
676}
677EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
678
679#endif
680
681/*
682 * Release the lock, slowpath:
683 */
684static inline void
685__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
686{
687 struct mutex *lock = container_of(lock_count, struct mutex, count);
688 unsigned long flags;
689
690 /*
691 * some architectures leave the lock unlocked in the fastpath failure
692 * case, others need to leave it locked. In the later case we have to
693 * unlock it here
694 */
695 if (__mutex_slowpath_needs_to_unlock())
696 atomic_set(&lock->count, 1);
697
698 spin_lock_mutex(&lock->wait_lock, flags);
699 mutex_release(&lock->dep_map, nested, _RET_IP_);
700 debug_mutex_unlock(lock);
701
702 if (!list_empty(&lock->wait_list)) {
703 /* get the first entry from the wait-list: */
704 struct mutex_waiter *waiter =
705 list_entry(lock->wait_list.next,
706 struct mutex_waiter, list);
707
708 debug_mutex_wake_waiter(lock, waiter);
709
710 wake_up_process(waiter->task);
711 }
712
713 spin_unlock_mutex(&lock->wait_lock, flags);
714}
715
716/*
717 * Release the lock, slowpath:
718 */
719__visible void
720__mutex_unlock_slowpath(atomic_t *lock_count)
721{
722 __mutex_unlock_common_slowpath(lock_count, 1);
723}
724
725#ifndef CONFIG_DEBUG_LOCK_ALLOC
726/*
727 * Here come the less common (and hence less performance-critical) APIs:
728 * mutex_lock_interruptible() and mutex_trylock().
729 */
730static noinline int __sched
731__mutex_lock_killable_slowpath(struct mutex *lock);
732
733static noinline int __sched
734__mutex_lock_interruptible_slowpath(struct mutex *lock);
735
736/**
737 * mutex_lock_interruptible - acquire the mutex, interruptible
738 * @lock: the mutex to be acquired
739 *
740 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
741 * been acquired or sleep until the mutex becomes available. If a
742 * signal arrives while waiting for the lock then this function
743 * returns -EINTR.
744 *
745 * This function is similar to (but not equivalent to) down_interruptible().
746 */
747int __sched mutex_lock_interruptible(struct mutex *lock)
748{
749 int ret;
750
751 might_sleep();
752 ret = __mutex_fastpath_lock_retval(&lock->count);
753 if (likely(!ret)) {
754 mutex_set_owner(lock);
755 return 0;
756 } else
757 return __mutex_lock_interruptible_slowpath(lock);
758}
759
760EXPORT_SYMBOL(mutex_lock_interruptible);
761
762int __sched mutex_lock_killable(struct mutex *lock)
763{
764 int ret;
765
766 might_sleep();
767 ret = __mutex_fastpath_lock_retval(&lock->count);
768 if (likely(!ret)) {
769 mutex_set_owner(lock);
770 return 0;
771 } else
772 return __mutex_lock_killable_slowpath(lock);
773}
774EXPORT_SYMBOL(mutex_lock_killable);
775
776__visible void __sched
777__mutex_lock_slowpath(atomic_t *lock_count)
778{
779 struct mutex *lock = container_of(lock_count, struct mutex, count);
780
781 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
782 NULL, _RET_IP_, NULL, 0);
783}
784
785static noinline int __sched
786__mutex_lock_killable_slowpath(struct mutex *lock)
787{
788 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
789 NULL, _RET_IP_, NULL, 0);
790}
791
792static noinline int __sched
793__mutex_lock_interruptible_slowpath(struct mutex *lock)
794{
795 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
796 NULL, _RET_IP_, NULL, 0);
797}
798
799static noinline int __sched
800__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
801{
802 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
803 NULL, _RET_IP_, ctx, 1);
804}
805
806static noinline int __sched
807__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
808 struct ww_acquire_ctx *ctx)
809{
810 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
811 NULL, _RET_IP_, ctx, 1);
812}
813
814#endif
815
816/*
817 * Spinlock based trylock, we take the spinlock and check whether we
818 * can get the lock:
819 */
820static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
821{
822 struct mutex *lock = container_of(lock_count, struct mutex, count);
823 unsigned long flags;
824 int prev;
825
826 spin_lock_mutex(&lock->wait_lock, flags);
827
828 prev = atomic_xchg(&lock->count, -1);
829 if (likely(prev == 1)) {
830 mutex_set_owner(lock);
831 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
832 }
833
834 /* Set it back to 0 if there are no waiters: */
835 if (likely(list_empty(&lock->wait_list)))
836 atomic_set(&lock->count, 0);
837
838 spin_unlock_mutex(&lock->wait_lock, flags);
839
840 return prev == 1;
841}
842
843/**
844 * mutex_trylock - try to acquire the mutex, without waiting
845 * @lock: the mutex to be acquired
846 *
847 * Try to acquire the mutex atomically. Returns 1 if the mutex
848 * has been acquired successfully, and 0 on contention.
849 *
850 * NOTE: this function follows the spin_trylock() convention, so
851 * it is negated from the down_trylock() return values! Be careful
852 * about this when converting semaphore users to mutexes.
853 *
854 * This function must not be used in interrupt context. The
855 * mutex must be released by the same task that acquired it.
856 */
857int __sched mutex_trylock(struct mutex *lock)
858{
859 int ret;
860
861 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
862 if (ret)
863 mutex_set_owner(lock);
864
865 return ret;
866}
867EXPORT_SYMBOL(mutex_trylock);
868
869#ifndef CONFIG_DEBUG_LOCK_ALLOC
870int __sched
871__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
872{
873 int ret;
874
875 might_sleep();
876
877 ret = __mutex_fastpath_lock_retval(&lock->base.count);
878
879 if (likely(!ret)) {
880 ww_mutex_set_context_fastpath(lock, ctx);
881 mutex_set_owner(&lock->base);
882 } else
883 ret = __ww_mutex_lock_slowpath(lock, ctx);
884 return ret;
885}
886EXPORT_SYMBOL(__ww_mutex_lock);
887
888int __sched
889__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
890{
891 int ret;
892
893 might_sleep();
894
895 ret = __mutex_fastpath_lock_retval(&lock->base.count);
896
897 if (likely(!ret)) {
898 ww_mutex_set_context_fastpath(lock, ctx);
899 mutex_set_owner(&lock->base);
900 } else
901 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
902 return ret;
903}
904EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
905
906#endif
907
908/**
909 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
910 * @cnt: the atomic which we are to dec
911 * @lock: the mutex to return holding if we dec to 0
912 *
913 * return true and hold lock if we dec to 0, return false otherwise
914 */
915int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
916{
917 /* dec if we can't possibly hit 0 */
918 if (atomic_add_unless(cnt, -1, 1))
919 return 0;
920 /* we might hit 0, so take the lock */
921 mutex_lock(lock);
922 if (!atomic_dec_and_test(cnt)) {
923 /* when we actually did the dec, we didn't hit 0 */
924 mutex_unlock(lock);
925 return 0;
926 }
927 /* we hit 0, and we hold the lock */
928 return 1;
929}
930EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/locking/mutex.c
4 *
5 * Mutexes: blocking mutual exclusion locks
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
13 *
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17 * and Sven Dietrich.
18 *
19 * Also see Documentation/locking/mutex-design.rst.
20 */
21#include <linux/mutex.h>
22#include <linux/ww_mutex.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/wake_q.h>
26#include <linux/sched/debug.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/interrupt.h>
30#include <linux/debug_locks.h>
31#include <linux/osq_lock.h>
32
33#define CREATE_TRACE_POINTS
34#include <trace/events/lock.h>
35
36#ifndef CONFIG_PREEMPT_RT
37#include "mutex.h"
38
39#ifdef CONFIG_DEBUG_MUTEXES
40# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
41#else
42# define MUTEX_WARN_ON(cond)
43#endif
44
45void
46__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
47{
48 atomic_long_set(&lock->owner, 0);
49 raw_spin_lock_init(&lock->wait_lock);
50 INIT_LIST_HEAD(&lock->wait_list);
51#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
52 osq_lock_init(&lock->osq);
53#endif
54
55 debug_mutex_init(lock, name, key);
56}
57EXPORT_SYMBOL(__mutex_init);
58
59/*
60 * @owner: contains: 'struct task_struct *' to the current lock owner,
61 * NULL means not owned. Since task_struct pointers are aligned at
62 * at least L1_CACHE_BYTES, we have low bits to store extra state.
63 *
64 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
65 * Bit1 indicates unlock needs to hand the lock to the top-waiter
66 * Bit2 indicates handoff has been done and we're waiting for pickup.
67 */
68#define MUTEX_FLAG_WAITERS 0x01
69#define MUTEX_FLAG_HANDOFF 0x02
70#define MUTEX_FLAG_PICKUP 0x04
71
72#define MUTEX_FLAGS 0x07
73
74/*
75 * Internal helper function; C doesn't allow us to hide it :/
76 *
77 * DO NOT USE (outside of mutex code).
78 */
79static inline struct task_struct *__mutex_owner(struct mutex *lock)
80{
81 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
82}
83
84static inline struct task_struct *__owner_task(unsigned long owner)
85{
86 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
87}
88
89bool mutex_is_locked(struct mutex *lock)
90{
91 return __mutex_owner(lock) != NULL;
92}
93EXPORT_SYMBOL(mutex_is_locked);
94
95static inline unsigned long __owner_flags(unsigned long owner)
96{
97 return owner & MUTEX_FLAGS;
98}
99
100/*
101 * Returns: __mutex_owner(lock) on failure or NULL on success.
102 */
103static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
104{
105 unsigned long owner, curr = (unsigned long)current;
106
107 owner = atomic_long_read(&lock->owner);
108 for (;;) { /* must loop, can race against a flag */
109 unsigned long flags = __owner_flags(owner);
110 unsigned long task = owner & ~MUTEX_FLAGS;
111
112 if (task) {
113 if (flags & MUTEX_FLAG_PICKUP) {
114 if (task != curr)
115 break;
116 flags &= ~MUTEX_FLAG_PICKUP;
117 } else if (handoff) {
118 if (flags & MUTEX_FLAG_HANDOFF)
119 break;
120 flags |= MUTEX_FLAG_HANDOFF;
121 } else {
122 break;
123 }
124 } else {
125 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
126 task = curr;
127 }
128
129 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
130 if (task == curr)
131 return NULL;
132 break;
133 }
134 }
135
136 return __owner_task(owner);
137}
138
139/*
140 * Trylock or set HANDOFF
141 */
142static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
143{
144 return !__mutex_trylock_common(lock, handoff);
145}
146
147/*
148 * Actual trylock that will work on any unlocked state.
149 */
150static inline bool __mutex_trylock(struct mutex *lock)
151{
152 return !__mutex_trylock_common(lock, false);
153}
154
155#ifndef CONFIG_DEBUG_LOCK_ALLOC
156/*
157 * Lockdep annotations are contained to the slow paths for simplicity.
158 * There is nothing that would stop spreading the lockdep annotations outwards
159 * except more code.
160 */
161
162/*
163 * Optimistic trylock that only works in the uncontended case. Make sure to
164 * follow with a __mutex_trylock() before failing.
165 */
166static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
167{
168 unsigned long curr = (unsigned long)current;
169 unsigned long zero = 0UL;
170
171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
172 return true;
173
174 return false;
175}
176
177static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
178{
179 unsigned long curr = (unsigned long)current;
180
181 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
182}
183#endif
184
185static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
186{
187 atomic_long_or(flag, &lock->owner);
188}
189
190static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
191{
192 atomic_long_andnot(flag, &lock->owner);
193}
194
195static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
196{
197 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
198}
199
200/*
201 * Add @waiter to a given location in the lock wait_list and set the
202 * FLAG_WAITERS flag if it's the first waiter.
203 */
204static void
205__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
206 struct list_head *list)
207{
208 debug_mutex_add_waiter(lock, waiter, current);
209
210 list_add_tail(&waiter->list, list);
211 if (__mutex_waiter_is_first(lock, waiter))
212 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
213}
214
215static void
216__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
217{
218 list_del(&waiter->list);
219 if (likely(list_empty(&lock->wait_list)))
220 __mutex_clear_flag(lock, MUTEX_FLAGS);
221
222 debug_mutex_remove_waiter(lock, waiter, current);
223}
224
225/*
226 * Give up ownership to a specific task, when @task = NULL, this is equivalent
227 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
228 * WAITERS. Provides RELEASE semantics like a regular unlock, the
229 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
230 */
231static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
232{
233 unsigned long owner = atomic_long_read(&lock->owner);
234
235 for (;;) {
236 unsigned long new;
237
238 MUTEX_WARN_ON(__owner_task(owner) != current);
239 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
240
241 new = (owner & MUTEX_FLAG_WAITERS);
242 new |= (unsigned long)task;
243 if (task)
244 new |= MUTEX_FLAG_PICKUP;
245
246 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
247 break;
248 }
249}
250
251#ifndef CONFIG_DEBUG_LOCK_ALLOC
252/*
253 * We split the mutex lock/unlock logic into separate fastpath and
254 * slowpath functions, to reduce the register pressure on the fastpath.
255 * We also put the fastpath first in the kernel image, to make sure the
256 * branch is predicted by the CPU as default-untaken.
257 */
258static void __sched __mutex_lock_slowpath(struct mutex *lock);
259
260/**
261 * mutex_lock - acquire the mutex
262 * @lock: the mutex to be acquired
263 *
264 * Lock the mutex exclusively for this task. If the mutex is not
265 * available right now, it will sleep until it can get it.
266 *
267 * The mutex must later on be released by the same task that
268 * acquired it. Recursive locking is not allowed. The task
269 * may not exit without first unlocking the mutex. Also, kernel
270 * memory where the mutex resides must not be freed with
271 * the mutex still locked. The mutex must first be initialized
272 * (or statically defined) before it can be locked. memset()-ing
273 * the mutex to 0 is not allowed.
274 *
275 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
276 * checks that will enforce the restrictions and will also do
277 * deadlock debugging)
278 *
279 * This function is similar to (but not equivalent to) down().
280 */
281void __sched mutex_lock(struct mutex *lock)
282{
283 might_sleep();
284
285 if (!__mutex_trylock_fast(lock))
286 __mutex_lock_slowpath(lock);
287}
288EXPORT_SYMBOL(mutex_lock);
289#endif
290
291#include "ww_mutex.h"
292
293#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
294
295/*
296 * Trylock variant that returns the owning task on failure.
297 */
298static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
299{
300 return __mutex_trylock_common(lock, false);
301}
302
303static inline
304bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
305 struct mutex_waiter *waiter)
306{
307 struct ww_mutex *ww;
308
309 ww = container_of(lock, struct ww_mutex, base);
310
311 /*
312 * If ww->ctx is set the contents are undefined, only
313 * by acquiring wait_lock there is a guarantee that
314 * they are not invalid when reading.
315 *
316 * As such, when deadlock detection needs to be
317 * performed the optimistic spinning cannot be done.
318 *
319 * Check this in every inner iteration because we may
320 * be racing against another thread's ww_mutex_lock.
321 */
322 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
323 return false;
324
325 /*
326 * If we aren't on the wait list yet, cancel the spin
327 * if there are waiters. We want to avoid stealing the
328 * lock from a waiter with an earlier stamp, since the
329 * other thread may already own a lock that we also
330 * need.
331 */
332 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
333 return false;
334
335 /*
336 * Similarly, stop spinning if we are no longer the
337 * first waiter.
338 */
339 if (waiter && !__mutex_waiter_is_first(lock, waiter))
340 return false;
341
342 return true;
343}
344
345/*
346 * Look out! "owner" is an entirely speculative pointer access and not
347 * reliable.
348 *
349 * "noinline" so that this function shows up on perf profiles.
350 */
351static noinline
352bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
353 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
354{
355 bool ret = true;
356
357 lockdep_assert_preemption_disabled();
358
359 while (__mutex_owner(lock) == owner) {
360 /*
361 * Ensure we emit the owner->on_cpu, dereference _after_
362 * checking lock->owner still matches owner. And we already
363 * disabled preemption which is equal to the RCU read-side
364 * crital section in optimistic spinning code. Thus the
365 * task_strcut structure won't go away during the spinning
366 * period
367 */
368 barrier();
369
370 /*
371 * Use vcpu_is_preempted to detect lock holder preemption issue.
372 */
373 if (!owner_on_cpu(owner) || need_resched()) {
374 ret = false;
375 break;
376 }
377
378 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
379 ret = false;
380 break;
381 }
382
383 cpu_relax();
384 }
385
386 return ret;
387}
388
389/*
390 * Initial check for entering the mutex spinning loop
391 */
392static inline int mutex_can_spin_on_owner(struct mutex *lock)
393{
394 struct task_struct *owner;
395 int retval = 1;
396
397 lockdep_assert_preemption_disabled();
398
399 if (need_resched())
400 return 0;
401
402 /*
403 * We already disabled preemption which is equal to the RCU read-side
404 * crital section in optimistic spinning code. Thus the task_strcut
405 * structure won't go away during the spinning period.
406 */
407 owner = __mutex_owner(lock);
408 if (owner)
409 retval = owner_on_cpu(owner);
410
411 /*
412 * If lock->owner is not set, the mutex has been released. Return true
413 * such that we'll trylock in the spin path, which is a faster option
414 * than the blocking slow path.
415 */
416 return retval;
417}
418
419/*
420 * Optimistic spinning.
421 *
422 * We try to spin for acquisition when we find that the lock owner
423 * is currently running on a (different) CPU and while we don't
424 * need to reschedule. The rationale is that if the lock owner is
425 * running, it is likely to release the lock soon.
426 *
427 * The mutex spinners are queued up using MCS lock so that only one
428 * spinner can compete for the mutex. However, if mutex spinning isn't
429 * going to happen, there is no point in going through the lock/unlock
430 * overhead.
431 *
432 * Returns true when the lock was taken, otherwise false, indicating
433 * that we need to jump to the slowpath and sleep.
434 *
435 * The waiter flag is set to true if the spinner is a waiter in the wait
436 * queue. The waiter-spinner will spin on the lock directly and concurrently
437 * with the spinner at the head of the OSQ, if present, until the owner is
438 * changed to itself.
439 */
440static __always_inline bool
441mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
442 struct mutex_waiter *waiter)
443{
444 if (!waiter) {
445 /*
446 * The purpose of the mutex_can_spin_on_owner() function is
447 * to eliminate the overhead of osq_lock() and osq_unlock()
448 * in case spinning isn't possible. As a waiter-spinner
449 * is not going to take OSQ lock anyway, there is no need
450 * to call mutex_can_spin_on_owner().
451 */
452 if (!mutex_can_spin_on_owner(lock))
453 goto fail;
454
455 /*
456 * In order to avoid a stampede of mutex spinners trying to
457 * acquire the mutex all at once, the spinners need to take a
458 * MCS (queued) lock first before spinning on the owner field.
459 */
460 if (!osq_lock(&lock->osq))
461 goto fail;
462 }
463
464 for (;;) {
465 struct task_struct *owner;
466
467 /* Try to acquire the mutex... */
468 owner = __mutex_trylock_or_owner(lock);
469 if (!owner)
470 break;
471
472 /*
473 * There's an owner, wait for it to either
474 * release the lock or go to sleep.
475 */
476 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
477 goto fail_unlock;
478
479 /*
480 * The cpu_relax() call is a compiler barrier which forces
481 * everything in this loop to be re-loaded. We don't need
482 * memory barriers as we'll eventually observe the right
483 * values at the cost of a few extra spins.
484 */
485 cpu_relax();
486 }
487
488 if (!waiter)
489 osq_unlock(&lock->osq);
490
491 return true;
492
493
494fail_unlock:
495 if (!waiter)
496 osq_unlock(&lock->osq);
497
498fail:
499 /*
500 * If we fell out of the spin path because of need_resched(),
501 * reschedule now, before we try-lock the mutex. This avoids getting
502 * scheduled out right after we obtained the mutex.
503 */
504 if (need_resched()) {
505 /*
506 * We _should_ have TASK_RUNNING here, but just in case
507 * we do not, make it so, otherwise we might get stuck.
508 */
509 __set_current_state(TASK_RUNNING);
510 schedule_preempt_disabled();
511 }
512
513 return false;
514}
515#else
516static __always_inline bool
517mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
518 struct mutex_waiter *waiter)
519{
520 return false;
521}
522#endif
523
524static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
525
526/**
527 * mutex_unlock - release the mutex
528 * @lock: the mutex to be released
529 *
530 * Unlock a mutex that has been locked by this task previously.
531 *
532 * This function must not be used in interrupt context. Unlocking
533 * of a not locked mutex is not allowed.
534 *
535 * The caller must ensure that the mutex stays alive until this function has
536 * returned - mutex_unlock() can NOT directly be used to release an object such
537 * that another concurrent task can free it.
538 * Mutexes are different from spinlocks & refcounts in this aspect.
539 *
540 * This function is similar to (but not equivalent to) up().
541 */
542void __sched mutex_unlock(struct mutex *lock)
543{
544#ifndef CONFIG_DEBUG_LOCK_ALLOC
545 if (__mutex_unlock_fast(lock))
546 return;
547#endif
548 __mutex_unlock_slowpath(lock, _RET_IP_);
549}
550EXPORT_SYMBOL(mutex_unlock);
551
552/**
553 * ww_mutex_unlock - release the w/w mutex
554 * @lock: the mutex to be released
555 *
556 * Unlock a mutex that has been locked by this task previously with any of the
557 * ww_mutex_lock* functions (with or without an acquire context). It is
558 * forbidden to release the locks after releasing the acquire context.
559 *
560 * This function must not be used in interrupt context. Unlocking
561 * of a unlocked mutex is not allowed.
562 */
563void __sched ww_mutex_unlock(struct ww_mutex *lock)
564{
565 __ww_mutex_unlock(lock);
566 mutex_unlock(&lock->base);
567}
568EXPORT_SYMBOL(ww_mutex_unlock);
569
570/*
571 * Lock a mutex (possibly interruptible), slowpath:
572 */
573static __always_inline int __sched
574__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
575 struct lockdep_map *nest_lock, unsigned long ip,
576 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
577{
578 struct mutex_waiter waiter;
579 struct ww_mutex *ww;
580 int ret;
581
582 if (!use_ww_ctx)
583 ww_ctx = NULL;
584
585 might_sleep();
586
587 MUTEX_WARN_ON(lock->magic != lock);
588
589 ww = container_of(lock, struct ww_mutex, base);
590 if (ww_ctx) {
591 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
592 return -EALREADY;
593
594 /*
595 * Reset the wounded flag after a kill. No other process can
596 * race and wound us here since they can't have a valid owner
597 * pointer if we don't have any locks held.
598 */
599 if (ww_ctx->acquired == 0)
600 ww_ctx->wounded = 0;
601
602#ifdef CONFIG_DEBUG_LOCK_ALLOC
603 nest_lock = &ww_ctx->dep_map;
604#endif
605 }
606
607 preempt_disable();
608 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
609
610 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
611 if (__mutex_trylock(lock) ||
612 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
613 /* got the lock, yay! */
614 lock_acquired(&lock->dep_map, ip);
615 if (ww_ctx)
616 ww_mutex_set_context_fastpath(ww, ww_ctx);
617 trace_contention_end(lock, 0);
618 preempt_enable();
619 return 0;
620 }
621
622 raw_spin_lock(&lock->wait_lock);
623 /*
624 * After waiting to acquire the wait_lock, try again.
625 */
626 if (__mutex_trylock(lock)) {
627 if (ww_ctx)
628 __ww_mutex_check_waiters(lock, ww_ctx);
629
630 goto skip_wait;
631 }
632
633 debug_mutex_lock_common(lock, &waiter);
634 waiter.task = current;
635 if (use_ww_ctx)
636 waiter.ww_ctx = ww_ctx;
637
638 lock_contended(&lock->dep_map, ip);
639
640 if (!use_ww_ctx) {
641 /* add waiting tasks to the end of the waitqueue (FIFO): */
642 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
643 } else {
644 /*
645 * Add in stamp order, waking up waiters that must kill
646 * themselves.
647 */
648 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
649 if (ret)
650 goto err_early_kill;
651 }
652
653 set_current_state(state);
654 trace_contention_begin(lock, LCB_F_MUTEX);
655 for (;;) {
656 bool first;
657
658 /*
659 * Once we hold wait_lock, we're serialized against
660 * mutex_unlock() handing the lock off to us, do a trylock
661 * before testing the error conditions to make sure we pick up
662 * the handoff.
663 */
664 if (__mutex_trylock(lock))
665 goto acquired;
666
667 /*
668 * Check for signals and kill conditions while holding
669 * wait_lock. This ensures the lock cancellation is ordered
670 * against mutex_unlock() and wake-ups do not go missing.
671 */
672 if (signal_pending_state(state, current)) {
673 ret = -EINTR;
674 goto err;
675 }
676
677 if (ww_ctx) {
678 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
679 if (ret)
680 goto err;
681 }
682
683 raw_spin_unlock(&lock->wait_lock);
684 schedule_preempt_disabled();
685
686 first = __mutex_waiter_is_first(lock, &waiter);
687
688 set_current_state(state);
689 /*
690 * Here we order against unlock; we must either see it change
691 * state back to RUNNING and fall through the next schedule(),
692 * or we must see its unlock and acquire.
693 */
694 if (__mutex_trylock_or_handoff(lock, first))
695 break;
696
697 if (first) {
698 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
699 if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
700 break;
701 trace_contention_begin(lock, LCB_F_MUTEX);
702 }
703
704 raw_spin_lock(&lock->wait_lock);
705 }
706 raw_spin_lock(&lock->wait_lock);
707acquired:
708 __set_current_state(TASK_RUNNING);
709
710 if (ww_ctx) {
711 /*
712 * Wound-Wait; we stole the lock (!first_waiter), check the
713 * waiters as anyone might want to wound us.
714 */
715 if (!ww_ctx->is_wait_die &&
716 !__mutex_waiter_is_first(lock, &waiter))
717 __ww_mutex_check_waiters(lock, ww_ctx);
718 }
719
720 __mutex_remove_waiter(lock, &waiter);
721
722 debug_mutex_free_waiter(&waiter);
723
724skip_wait:
725 /* got the lock - cleanup and rejoice! */
726 lock_acquired(&lock->dep_map, ip);
727 trace_contention_end(lock, 0);
728
729 if (ww_ctx)
730 ww_mutex_lock_acquired(ww, ww_ctx);
731
732 raw_spin_unlock(&lock->wait_lock);
733 preempt_enable();
734 return 0;
735
736err:
737 __set_current_state(TASK_RUNNING);
738 __mutex_remove_waiter(lock, &waiter);
739err_early_kill:
740 trace_contention_end(lock, ret);
741 raw_spin_unlock(&lock->wait_lock);
742 debug_mutex_free_waiter(&waiter);
743 mutex_release(&lock->dep_map, ip);
744 preempt_enable();
745 return ret;
746}
747
748static int __sched
749__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
750 struct lockdep_map *nest_lock, unsigned long ip)
751{
752 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
753}
754
755static int __sched
756__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
757 unsigned long ip, struct ww_acquire_ctx *ww_ctx)
758{
759 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
760}
761
762/**
763 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
764 * @ww: mutex to lock
765 * @ww_ctx: optional w/w acquire context
766 *
767 * Trylocks a mutex with the optional acquire context; no deadlock detection is
768 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
769 *
770 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
771 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
772 *
773 * A mutex acquired with this function must be released with ww_mutex_unlock.
774 */
775int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
776{
777 if (!ww_ctx)
778 return mutex_trylock(&ww->base);
779
780 MUTEX_WARN_ON(ww->base.magic != &ww->base);
781
782 /*
783 * Reset the wounded flag after a kill. No other process can
784 * race and wound us here, since they can't have a valid owner
785 * pointer if we don't have any locks held.
786 */
787 if (ww_ctx->acquired == 0)
788 ww_ctx->wounded = 0;
789
790 if (__mutex_trylock(&ww->base)) {
791 ww_mutex_set_context_fastpath(ww, ww_ctx);
792 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
793 return 1;
794 }
795
796 return 0;
797}
798EXPORT_SYMBOL(ww_mutex_trylock);
799
800#ifdef CONFIG_DEBUG_LOCK_ALLOC
801void __sched
802mutex_lock_nested(struct mutex *lock, unsigned int subclass)
803{
804 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
805}
806
807EXPORT_SYMBOL_GPL(mutex_lock_nested);
808
809void __sched
810_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
811{
812 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
813}
814EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
815
816int __sched
817mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
818{
819 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
820}
821EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
822
823int __sched
824mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
825{
826 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
827}
828EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
829
830void __sched
831mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
832{
833 int token;
834
835 might_sleep();
836
837 token = io_schedule_prepare();
838 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
839 subclass, NULL, _RET_IP_, NULL, 0);
840 io_schedule_finish(token);
841}
842EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
843
844static inline int
845ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
846{
847#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
848 unsigned tmp;
849
850 if (ctx->deadlock_inject_countdown-- == 0) {
851 tmp = ctx->deadlock_inject_interval;
852 if (tmp > UINT_MAX/4)
853 tmp = UINT_MAX;
854 else
855 tmp = tmp*2 + tmp + tmp/2;
856
857 ctx->deadlock_inject_interval = tmp;
858 ctx->deadlock_inject_countdown = tmp;
859 ctx->contending_lock = lock;
860
861 ww_mutex_unlock(lock);
862
863 return -EDEADLK;
864 }
865#endif
866
867 return 0;
868}
869
870int __sched
871ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
872{
873 int ret;
874
875 might_sleep();
876 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
877 0, _RET_IP_, ctx);
878 if (!ret && ctx && ctx->acquired > 1)
879 return ww_mutex_deadlock_injection(lock, ctx);
880
881 return ret;
882}
883EXPORT_SYMBOL_GPL(ww_mutex_lock);
884
885int __sched
886ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
887{
888 int ret;
889
890 might_sleep();
891 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
892 0, _RET_IP_, ctx);
893
894 if (!ret && ctx && ctx->acquired > 1)
895 return ww_mutex_deadlock_injection(lock, ctx);
896
897 return ret;
898}
899EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
900
901#endif
902
903/*
904 * Release the lock, slowpath:
905 */
906static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
907{
908 struct task_struct *next = NULL;
909 DEFINE_WAKE_Q(wake_q);
910 unsigned long owner;
911
912 mutex_release(&lock->dep_map, ip);
913
914 /*
915 * Release the lock before (potentially) taking the spinlock such that
916 * other contenders can get on with things ASAP.
917 *
918 * Except when HANDOFF, in that case we must not clear the owner field,
919 * but instead set it to the top waiter.
920 */
921 owner = atomic_long_read(&lock->owner);
922 for (;;) {
923 MUTEX_WARN_ON(__owner_task(owner) != current);
924 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
925
926 if (owner & MUTEX_FLAG_HANDOFF)
927 break;
928
929 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
930 if (owner & MUTEX_FLAG_WAITERS)
931 break;
932
933 return;
934 }
935 }
936
937 raw_spin_lock(&lock->wait_lock);
938 debug_mutex_unlock(lock);
939 if (!list_empty(&lock->wait_list)) {
940 /* get the first entry from the wait-list: */
941 struct mutex_waiter *waiter =
942 list_first_entry(&lock->wait_list,
943 struct mutex_waiter, list);
944
945 next = waiter->task;
946
947 debug_mutex_wake_waiter(lock, waiter);
948 wake_q_add(&wake_q, next);
949 }
950
951 if (owner & MUTEX_FLAG_HANDOFF)
952 __mutex_handoff(lock, next);
953
954 raw_spin_unlock(&lock->wait_lock);
955
956 wake_up_q(&wake_q);
957}
958
959#ifndef CONFIG_DEBUG_LOCK_ALLOC
960/*
961 * Here come the less common (and hence less performance-critical) APIs:
962 * mutex_lock_interruptible() and mutex_trylock().
963 */
964static noinline int __sched
965__mutex_lock_killable_slowpath(struct mutex *lock);
966
967static noinline int __sched
968__mutex_lock_interruptible_slowpath(struct mutex *lock);
969
970/**
971 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
972 * @lock: The mutex to be acquired.
973 *
974 * Lock the mutex like mutex_lock(). If a signal is delivered while the
975 * process is sleeping, this function will return without acquiring the
976 * mutex.
977 *
978 * Context: Process context.
979 * Return: 0 if the lock was successfully acquired or %-EINTR if a
980 * signal arrived.
981 */
982int __sched mutex_lock_interruptible(struct mutex *lock)
983{
984 might_sleep();
985
986 if (__mutex_trylock_fast(lock))
987 return 0;
988
989 return __mutex_lock_interruptible_slowpath(lock);
990}
991
992EXPORT_SYMBOL(mutex_lock_interruptible);
993
994/**
995 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
996 * @lock: The mutex to be acquired.
997 *
998 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
999 * the current process is delivered while the process is sleeping, this
1000 * function will return without acquiring the mutex.
1001 *
1002 * Context: Process context.
1003 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1004 * fatal signal arrived.
1005 */
1006int __sched mutex_lock_killable(struct mutex *lock)
1007{
1008 might_sleep();
1009
1010 if (__mutex_trylock_fast(lock))
1011 return 0;
1012
1013 return __mutex_lock_killable_slowpath(lock);
1014}
1015EXPORT_SYMBOL(mutex_lock_killable);
1016
1017/**
1018 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1019 * @lock: The mutex to be acquired.
1020 *
1021 * Lock the mutex like mutex_lock(). While the task is waiting for this
1022 * mutex, it will be accounted as being in the IO wait state by the
1023 * scheduler.
1024 *
1025 * Context: Process context.
1026 */
1027void __sched mutex_lock_io(struct mutex *lock)
1028{
1029 int token;
1030
1031 token = io_schedule_prepare();
1032 mutex_lock(lock);
1033 io_schedule_finish(token);
1034}
1035EXPORT_SYMBOL_GPL(mutex_lock_io);
1036
1037static noinline void __sched
1038__mutex_lock_slowpath(struct mutex *lock)
1039{
1040 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1041}
1042
1043static noinline int __sched
1044__mutex_lock_killable_slowpath(struct mutex *lock)
1045{
1046 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1047}
1048
1049static noinline int __sched
1050__mutex_lock_interruptible_slowpath(struct mutex *lock)
1051{
1052 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1053}
1054
1055static noinline int __sched
1056__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1057{
1058 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1059 _RET_IP_, ctx);
1060}
1061
1062static noinline int __sched
1063__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1064 struct ww_acquire_ctx *ctx)
1065{
1066 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1067 _RET_IP_, ctx);
1068}
1069
1070#endif
1071
1072/**
1073 * mutex_trylock - try to acquire the mutex, without waiting
1074 * @lock: the mutex to be acquired
1075 *
1076 * Try to acquire the mutex atomically. Returns 1 if the mutex
1077 * has been acquired successfully, and 0 on contention.
1078 *
1079 * NOTE: this function follows the spin_trylock() convention, so
1080 * it is negated from the down_trylock() return values! Be careful
1081 * about this when converting semaphore users to mutexes.
1082 *
1083 * This function must not be used in interrupt context. The
1084 * mutex must be released by the same task that acquired it.
1085 */
1086int __sched mutex_trylock(struct mutex *lock)
1087{
1088 bool locked;
1089
1090 MUTEX_WARN_ON(lock->magic != lock);
1091
1092 locked = __mutex_trylock(lock);
1093 if (locked)
1094 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1095
1096 return locked;
1097}
1098EXPORT_SYMBOL(mutex_trylock);
1099
1100#ifndef CONFIG_DEBUG_LOCK_ALLOC
1101int __sched
1102ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1103{
1104 might_sleep();
1105
1106 if (__mutex_trylock_fast(&lock->base)) {
1107 if (ctx)
1108 ww_mutex_set_context_fastpath(lock, ctx);
1109 return 0;
1110 }
1111
1112 return __ww_mutex_lock_slowpath(lock, ctx);
1113}
1114EXPORT_SYMBOL(ww_mutex_lock);
1115
1116int __sched
1117ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1118{
1119 might_sleep();
1120
1121 if (__mutex_trylock_fast(&lock->base)) {
1122 if (ctx)
1123 ww_mutex_set_context_fastpath(lock, ctx);
1124 return 0;
1125 }
1126
1127 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1128}
1129EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1130
1131#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1132#endif /* !CONFIG_PREEMPT_RT */
1133
1134EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
1135EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
1136
1137/**
1138 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1139 * @cnt: the atomic which we are to dec
1140 * @lock: the mutex to return holding if we dec to 0
1141 *
1142 * return true and hold lock if we dec to 0, return false otherwise
1143 */
1144int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1145{
1146 /* dec if we can't possibly hit 0 */
1147 if (atomic_add_unless(cnt, -1, 1))
1148 return 0;
1149 /* we might hit 0, so take the lock */
1150 mutex_lock(lock);
1151 if (!atomic_dec_and_test(cnt)) {
1152 /* when we actually did the dec, we didn't hit 0 */
1153 mutex_unlock(lock);
1154 return 0;
1155 }
1156 /* we hit 0, and we hold the lock */
1157 return 1;
1158}
1159EXPORT_SYMBOL(atomic_dec_and_mutex_lock);