Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * kernel/locking/mutex.c
  3 *
  4 * Mutexes: blocking mutual exclusion locks
  5 *
  6 * Started by Ingo Molnar:
  7 *
  8 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  9 *
 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 11 * David Howells for suggestions and improvements.
 12 *
 13 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 14 *    from the -rt tree, where it was originally implemented for rtmutexes
 15 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 16 *    and Sven Dietrich.
 17 *
 18 * Also see Documentation/mutex-design.txt.
 19 */
 20#include <linux/mutex.h>
 21#include <linux/ww_mutex.h>
 22#include <linux/sched.h>
 23#include <linux/sched/rt.h>
 24#include <linux/export.h>
 25#include <linux/spinlock.h>
 26#include <linux/interrupt.h>
 27#include <linux/debug_locks.h>
 28#include "mcs_spinlock.h"
 29
 30/*
 31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
 32 * which forces all calls into the slowpath:
 33 */
 34#ifdef CONFIG_DEBUG_MUTEXES
 35# include "mutex-debug.h"
 36# include <asm-generic/mutex-null.h>
 37/*
 38 * Must be 0 for the debug case so we do not do the unlock outside of the
 39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
 40 * case.
 41 */
 42# undef __mutex_slowpath_needs_to_unlock
 43# define  __mutex_slowpath_needs_to_unlock()	0
 44#else
 45# include "mutex.h"
 46# include <asm/mutex.h>
 47#endif
 48
 49/*
 50 * A negative mutex count indicates that waiters are sleeping waiting for the
 51 * mutex.
 52 */
 53#define	MUTEX_SHOW_NO_WAITER(mutex)	(atomic_read(&(mutex)->count) >= 0)
 54
 55void
 56__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 57{
 58	atomic_set(&lock->count, 1);
 59	spin_lock_init(&lock->wait_lock);
 60	INIT_LIST_HEAD(&lock->wait_list);
 61	mutex_clear_owner(lock);
 62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 63	lock->osq = NULL;
 64#endif
 65
 66	debug_mutex_init(lock, name, key);
 67}
 68
 69EXPORT_SYMBOL(__mutex_init);
 70
 71#ifndef CONFIG_DEBUG_LOCK_ALLOC
 72/*
 73 * We split the mutex lock/unlock logic into separate fastpath and
 74 * slowpath functions, to reduce the register pressure on the fastpath.
 75 * We also put the fastpath first in the kernel image, to make sure the
 76 * branch is predicted by the CPU as default-untaken.
 77 */
 78__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
 79
 80/**
 81 * mutex_lock - acquire the mutex
 82 * @lock: the mutex to be acquired
 83 *
 84 * Lock the mutex exclusively for this task. If the mutex is not
 85 * available right now, it will sleep until it can get it.
 86 *
 87 * The mutex must later on be released by the same task that
 88 * acquired it. Recursive locking is not allowed. The task
 89 * may not exit without first unlocking the mutex. Also, kernel
 90 * memory where the mutex resides mutex must not be freed with
 91 * the mutex still locked. The mutex must first be initialized
 92 * (or statically defined) before it can be locked. memset()-ing
 93 * the mutex to 0 is not allowed.
 94 *
 95 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 96 *   checks that will enforce the restrictions and will also do
 97 *   deadlock debugging. )
 98 *
 99 * This function is similar to (but not equivalent to) down().
100 */
101void __sched mutex_lock(struct mutex *lock)
102{
103	might_sleep();
104	/*
105	 * The locking fastpath is the 1->0 transition from
106	 * 'unlocked' into 'locked' state.
107	 */
108	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
109	mutex_set_owner(lock);
110}
111
112EXPORT_SYMBOL(mutex_lock);
113#endif
114
115#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116/*
117 * In order to avoid a stampede of mutex spinners from acquiring the mutex
118 * more or less simultaneously, the spinners need to acquire a MCS lock
119 * first before spinning on the owner field.
120 *
 
 
121 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
123/*
124 * Mutex spinning code migrated from kernel/sched/core.c
 
 
 
125 */
126
127static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
 
128{
129	if (lock->owner != owner)
130		return false;
 
 
131
132	/*
133	 * Ensure we emit the owner->on_cpu, dereference _after_ checking
134	 * lock->owner still matches owner, if that fails, owner might
135	 * point to free()d memory, if it still matches, the rcu_read_lock()
136	 * ensures the memory stays valid.
137	 */
138	barrier();
139
140	return owner->on_cpu;
 
141}
142
 
143/*
144 * Look out! "owner" is an entirely speculative pointer
145 * access and not reliable.
146 */
147static noinline
148int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
149{
 
 
150	rcu_read_lock();
151	while (owner_running(lock, owner)) {
152		if (need_resched())
 
 
 
 
 
 
 
 
 
153			break;
 
154
155		arch_mutex_cpu_relax();
156	}
157	rcu_read_unlock();
158
159	/*
160	 * We break out the loop above on need_resched() and when the
161	 * owner changed, which is a sign for heavy contention. Return
162	 * success only when lock->owner is NULL.
163	 */
164	return lock->owner == NULL;
165}
166
167/*
168 * Initial check for entering the mutex spinning loop
169 */
170static inline int mutex_can_spin_on_owner(struct mutex *lock)
171{
172	struct task_struct *owner;
173	int retval = 1;
174
175	if (need_resched())
176		return 0;
177
178	rcu_read_lock();
179	owner = ACCESS_ONCE(lock->owner);
180	if (owner)
181		retval = owner->on_cpu;
182	rcu_read_unlock();
183	/*
184	 * if lock->owner is not set, the mutex owner may have just acquired
185	 * it and not set the owner yet or the mutex has been released.
186	 */
187	return retval;
188}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189#endif
190
191__visible __used noinline
192void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
193
194/**
195 * mutex_unlock - release the mutex
196 * @lock: the mutex to be released
197 *
198 * Unlock a mutex that has been locked by this task previously.
199 *
200 * This function must not be used in interrupt context. Unlocking
201 * of a not locked mutex is not allowed.
202 *
203 * This function is similar to (but not equivalent to) up().
204 */
205void __sched mutex_unlock(struct mutex *lock)
206{
207	/*
208	 * The unlocking fastpath is the 0->1 transition from 'locked'
209	 * into 'unlocked' state:
210	 */
211#ifndef CONFIG_DEBUG_MUTEXES
212	/*
213	 * When debugging is enabled we must not clear the owner before time,
214	 * the slow path will always be taken, and that clears the owner field
215	 * after verifying that it was indeed current.
216	 */
217	mutex_clear_owner(lock);
218#endif
219	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
220}
221
222EXPORT_SYMBOL(mutex_unlock);
223
224/**
225 * ww_mutex_unlock - release the w/w mutex
226 * @lock: the mutex to be released
227 *
228 * Unlock a mutex that has been locked by this task previously with any of the
229 * ww_mutex_lock* functions (with or without an acquire context). It is
230 * forbidden to release the locks after releasing the acquire context.
231 *
232 * This function must not be used in interrupt context. Unlocking
233 * of a unlocked mutex is not allowed.
234 */
235void __sched ww_mutex_unlock(struct ww_mutex *lock)
236{
237	/*
238	 * The unlocking fastpath is the 0->1 transition from 'locked'
239	 * into 'unlocked' state:
240	 */
241	if (lock->ctx) {
242#ifdef CONFIG_DEBUG_MUTEXES
243		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
244#endif
245		if (lock->ctx->acquired > 0)
246			lock->ctx->acquired--;
247		lock->ctx = NULL;
248	}
249
250#ifndef CONFIG_DEBUG_MUTEXES
251	/*
252	 * When debugging is enabled we must not clear the owner before time,
253	 * the slow path will always be taken, and that clears the owner field
254	 * after verifying that it was indeed current.
255	 */
256	mutex_clear_owner(&lock->base);
257#endif
258	__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
259}
260EXPORT_SYMBOL(ww_mutex_unlock);
261
262static inline int __sched
263__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
264{
265	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
266	struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
267
268	if (!hold_ctx)
269		return 0;
270
271	if (unlikely(ctx == hold_ctx))
272		return -EALREADY;
273
274	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
275	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
276#ifdef CONFIG_DEBUG_MUTEXES
277		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
278		ctx->contending_lock = ww;
279#endif
280		return -EDEADLK;
281	}
282
283	return 0;
284}
285
286static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
287						   struct ww_acquire_ctx *ww_ctx)
288{
289#ifdef CONFIG_DEBUG_MUTEXES
290	/*
291	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
292	 * but released with a normal mutex_unlock in this call.
293	 *
294	 * This should never happen, always use ww_mutex_unlock.
295	 */
296	DEBUG_LOCKS_WARN_ON(ww->ctx);
297
298	/*
299	 * Not quite done after calling ww_acquire_done() ?
300	 */
301	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
302
303	if (ww_ctx->contending_lock) {
304		/*
305		 * After -EDEADLK you tried to
306		 * acquire a different ww_mutex? Bad!
307		 */
308		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
309
310		/*
311		 * You called ww_mutex_lock after receiving -EDEADLK,
312		 * but 'forgot' to unlock everything else first?
313		 */
314		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
315		ww_ctx->contending_lock = NULL;
316	}
317
318	/*
319	 * Naughty, using a different class will lead to undefined behavior!
320	 */
321	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
322#endif
323	ww_ctx->acquired++;
324}
325
326/*
327 * after acquiring lock with fastpath or when we lost out in contested
328 * slowpath, set ctx and wake up any waiters so they can recheck.
329 *
330 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
331 * as the fastpath and opportunistic spinning are disabled in that case.
332 */
333static __always_inline void
334ww_mutex_set_context_fastpath(struct ww_mutex *lock,
335			       struct ww_acquire_ctx *ctx)
336{
337	unsigned long flags;
338	struct mutex_waiter *cur;
339
340	ww_mutex_lock_acquired(lock, ctx);
341
342	lock->ctx = ctx;
343
344	/*
345	 * The lock->ctx update should be visible on all cores before
346	 * the atomic read is done, otherwise contended waiters might be
347	 * missed. The contended waiters will either see ww_ctx == NULL
348	 * and keep spinning, or it will acquire wait_lock, add itself
349	 * to waiter list and sleep.
350	 */
351	smp_mb(); /* ^^^ */
352
353	/*
354	 * Check if lock is contended, if not there is nobody to wake up
355	 */
356	if (likely(atomic_read(&lock->base.count) == 0))
357		return;
358
359	/*
360	 * Uh oh, we raced in fastpath, wake up everyone in this case,
361	 * so they can see the new lock->ctx.
362	 */
363	spin_lock_mutex(&lock->base.wait_lock, flags);
364	list_for_each_entry(cur, &lock->base.wait_list, list) {
365		debug_mutex_wake_waiter(&lock->base, cur);
366		wake_up_process(cur->task);
367	}
368	spin_unlock_mutex(&lock->base.wait_lock, flags);
369}
370
371/*
372 * Lock a mutex (possibly interruptible), slowpath:
373 */
374static __always_inline int __sched
375__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
376		    struct lockdep_map *nest_lock, unsigned long ip,
377		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
378{
379	struct task_struct *task = current;
380	struct mutex_waiter waiter;
381	unsigned long flags;
382	int ret;
383
384	preempt_disable();
385	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
386
387#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
388	/*
389	 * Optimistic spinning.
390	 *
391	 * We try to spin for acquisition when we find that there are no
392	 * pending waiters and the lock owner is currently running on a
393	 * (different) CPU.
394	 *
395	 * The rationale is that if the lock owner is running, it is likely to
396	 * release the lock soon.
397	 *
398	 * Since this needs the lock owner, and this mutex implementation
399	 * doesn't track the owner atomically in the lock field, we need to
400	 * track it non-atomically.
401	 *
402	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
403	 * to serialize everything.
404	 *
405	 * The mutex spinners are queued up using MCS lock so that only one
406	 * spinner can compete for the mutex. However, if mutex spinning isn't
407	 * going to happen, there is no point in going through the lock/unlock
408	 * overhead.
409	 */
410	if (!mutex_can_spin_on_owner(lock))
411		goto slowpath;
412
413	if (!osq_lock(&lock->osq))
414		goto slowpath;
415
416	for (;;) {
417		struct task_struct *owner;
418
419		if (use_ww_ctx && ww_ctx->acquired > 0) {
420			struct ww_mutex *ww;
421
422			ww = container_of(lock, struct ww_mutex, base);
423			/*
424			 * If ww->ctx is set the contents are undefined, only
425			 * by acquiring wait_lock there is a guarantee that
426			 * they are not invalid when reading.
427			 *
428			 * As such, when deadlock detection needs to be
429			 * performed the optimistic spinning cannot be done.
430			 */
431			if (ACCESS_ONCE(ww->ctx))
432				break;
433		}
434
435		/*
436		 * If there's an owner, wait for it to either
437		 * release the lock or go to sleep.
438		 */
439		owner = ACCESS_ONCE(lock->owner);
440		if (owner && !mutex_spin_on_owner(lock, owner))
441			break;
442
443		if ((atomic_read(&lock->count) == 1) &&
444		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
445			lock_acquired(&lock->dep_map, ip);
446			if (use_ww_ctx) {
447				struct ww_mutex *ww;
448				ww = container_of(lock, struct ww_mutex, base);
449
450				ww_mutex_set_context_fastpath(ww, ww_ctx);
451			}
452
453			mutex_set_owner(lock);
454			osq_unlock(&lock->osq);
455			preempt_enable();
456			return 0;
457		}
458
459		/*
460		 * When there's no owner, we might have preempted between the
461		 * owner acquiring the lock and setting the owner field. If
462		 * we're an RT task that will live-lock because we won't let
463		 * the owner complete.
464		 */
465		if (!owner && (need_resched() || rt_task(task)))
466			break;
467
468		/*
469		 * The cpu_relax() call is a compiler barrier which forces
470		 * everything in this loop to be re-loaded. We don't need
471		 * memory barriers as we'll eventually observe the right
472		 * values at the cost of a few extra spins.
473		 */
474		arch_mutex_cpu_relax();
475	}
476	osq_unlock(&lock->osq);
477slowpath:
478	/*
479	 * If we fell out of the spin path because of need_resched(),
480	 * reschedule now, before we try-lock the mutex. This avoids getting
481	 * scheduled out right after we obtained the mutex.
482	 */
483	if (need_resched())
484		schedule_preempt_disabled();
485#endif
486	spin_lock_mutex(&lock->wait_lock, flags);
487
488	/* once more, can we acquire the lock? */
489	if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
490		goto skip_wait;
491
492	debug_mutex_lock_common(lock, &waiter);
493	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
494
495	/* add waiting tasks to the end of the waitqueue (FIFO): */
496	list_add_tail(&waiter.list, &lock->wait_list);
497	waiter.task = task;
498
499	lock_contended(&lock->dep_map, ip);
500
501	for (;;) {
502		/*
503		 * Lets try to take the lock again - this is needed even if
504		 * we get here for the first time (shortly after failing to
505		 * acquire the lock), to make sure that we get a wakeup once
506		 * it's unlocked. Later on, if we sleep, this is the
507		 * operation that gives us the lock. We xchg it to -1, so
508		 * that when we release the lock, we properly wake up the
509		 * other waiters:
 
510		 */
511		if (MUTEX_SHOW_NO_WAITER(lock) &&
512		    (atomic_xchg(&lock->count, -1) == 1))
513			break;
514
515		/*
516		 * got a signal? (This code gets eliminated in the
517		 * TASK_UNINTERRUPTIBLE case.)
518		 */
519		if (unlikely(signal_pending_state(state, task))) {
520			ret = -EINTR;
521			goto err;
522		}
523
524		if (use_ww_ctx && ww_ctx->acquired > 0) {
525			ret = __mutex_lock_check_stamp(lock, ww_ctx);
526			if (ret)
527				goto err;
528		}
529
530		__set_task_state(task, state);
531
532		/* didn't get the lock, go to sleep: */
533		spin_unlock_mutex(&lock->wait_lock, flags);
534		schedule_preempt_disabled();
535		spin_lock_mutex(&lock->wait_lock, flags);
536	}
 
 
537	mutex_remove_waiter(lock, &waiter, current_thread_info());
538	/* set it to 0 if there are no waiters left: */
539	if (likely(list_empty(&lock->wait_list)))
540		atomic_set(&lock->count, 0);
541	debug_mutex_free_waiter(&waiter);
542
543skip_wait:
544	/* got the lock - cleanup and rejoice! */
545	lock_acquired(&lock->dep_map, ip);
546	mutex_set_owner(lock);
547
548	if (use_ww_ctx) {
549		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
550		struct mutex_waiter *cur;
551
552		/*
553		 * This branch gets optimized out for the common case,
554		 * and is only important for ww_mutex_lock.
555		 */
556		ww_mutex_lock_acquired(ww, ww_ctx);
557		ww->ctx = ww_ctx;
558
559		/*
560		 * Give any possible sleeping processes the chance to wake up,
561		 * so they can recheck if they have to back off.
562		 */
563		list_for_each_entry(cur, &lock->wait_list, list) {
564			debug_mutex_wake_waiter(lock, cur);
565			wake_up_process(cur->task);
566		}
567	}
568
569	spin_unlock_mutex(&lock->wait_lock, flags);
570	preempt_enable();
571	return 0;
572
573err:
574	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
575	spin_unlock_mutex(&lock->wait_lock, flags);
576	debug_mutex_free_waiter(&waiter);
577	mutex_release(&lock->dep_map, 1, ip);
578	preempt_enable();
579	return ret;
580}
581
582#ifdef CONFIG_DEBUG_LOCK_ALLOC
583void __sched
584mutex_lock_nested(struct mutex *lock, unsigned int subclass)
585{
586	might_sleep();
587	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
588			    subclass, NULL, _RET_IP_, NULL, 0);
589}
590
591EXPORT_SYMBOL_GPL(mutex_lock_nested);
592
593void __sched
594_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
595{
596	might_sleep();
597	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
598			    0, nest, _RET_IP_, NULL, 0);
599}
600
601EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
602
603int __sched
604mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
605{
606	might_sleep();
607	return __mutex_lock_common(lock, TASK_KILLABLE,
608				   subclass, NULL, _RET_IP_, NULL, 0);
609}
610EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
611
612int __sched
613mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
614{
615	might_sleep();
616	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
617				   subclass, NULL, _RET_IP_, NULL, 0);
618}
619
620EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
621
622static inline int
623ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
624{
625#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
626	unsigned tmp;
627
628	if (ctx->deadlock_inject_countdown-- == 0) {
629		tmp = ctx->deadlock_inject_interval;
630		if (tmp > UINT_MAX/4)
631			tmp = UINT_MAX;
632		else
633			tmp = tmp*2 + tmp + tmp/2;
634
635		ctx->deadlock_inject_interval = tmp;
636		ctx->deadlock_inject_countdown = tmp;
637		ctx->contending_lock = lock;
638
639		ww_mutex_unlock(lock);
640
641		return -EDEADLK;
642	}
643#endif
644
645	return 0;
646}
647
648int __sched
649__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
650{
651	int ret;
652
653	might_sleep();
654	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
655				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
656	if (!ret && ctx->acquired > 1)
657		return ww_mutex_deadlock_injection(lock, ctx);
658
659	return ret;
660}
661EXPORT_SYMBOL_GPL(__ww_mutex_lock);
662
663int __sched
664__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
665{
666	int ret;
667
668	might_sleep();
669	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
670				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
671
672	if (!ret && ctx->acquired > 1)
673		return ww_mutex_deadlock_injection(lock, ctx);
674
675	return ret;
676}
677EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
678
679#endif
680
681/*
682 * Release the lock, slowpath:
683 */
684static inline void
685__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
686{
687	struct mutex *lock = container_of(lock_count, struct mutex, count);
688	unsigned long flags;
 
689
690	/*
691	 * some architectures leave the lock unlocked in the fastpath failure
 
 
 
 
 
 
 
692	 * case, others need to leave it locked. In the later case we have to
693	 * unlock it here
694	 */
695	if (__mutex_slowpath_needs_to_unlock())
696		atomic_set(&lock->count, 1);
697
698	spin_lock_mutex(&lock->wait_lock, flags);
699	mutex_release(&lock->dep_map, nested, _RET_IP_);
700	debug_mutex_unlock(lock);
701
702	if (!list_empty(&lock->wait_list)) {
703		/* get the first entry from the wait-list: */
704		struct mutex_waiter *waiter =
705				list_entry(lock->wait_list.next,
706					   struct mutex_waiter, list);
707
708		debug_mutex_wake_waiter(lock, waiter);
709
710		wake_up_process(waiter->task);
711	}
712
713	spin_unlock_mutex(&lock->wait_lock, flags);
 
714}
715
716/*
717 * Release the lock, slowpath:
718 */
719__visible void
720__mutex_unlock_slowpath(atomic_t *lock_count)
721{
722	__mutex_unlock_common_slowpath(lock_count, 1);
 
 
723}
724
725#ifndef CONFIG_DEBUG_LOCK_ALLOC
726/*
727 * Here come the less common (and hence less performance-critical) APIs:
728 * mutex_lock_interruptible() and mutex_trylock().
729 */
730static noinline int __sched
731__mutex_lock_killable_slowpath(struct mutex *lock);
732
733static noinline int __sched
734__mutex_lock_interruptible_slowpath(struct mutex *lock);
735
736/**
737 * mutex_lock_interruptible - acquire the mutex, interruptible
738 * @lock: the mutex to be acquired
739 *
740 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
741 * been acquired or sleep until the mutex becomes available. If a
742 * signal arrives while waiting for the lock then this function
743 * returns -EINTR.
744 *
745 * This function is similar to (but not equivalent to) down_interruptible().
746 */
747int __sched mutex_lock_interruptible(struct mutex *lock)
748{
749	int ret;
750
751	might_sleep();
752	ret =  __mutex_fastpath_lock_retval(&lock->count);
753	if (likely(!ret)) {
754		mutex_set_owner(lock);
755		return 0;
756	} else
757		return __mutex_lock_interruptible_slowpath(lock);
758}
759
760EXPORT_SYMBOL(mutex_lock_interruptible);
761
762int __sched mutex_lock_killable(struct mutex *lock)
763{
764	int ret;
765
766	might_sleep();
767	ret = __mutex_fastpath_lock_retval(&lock->count);
768	if (likely(!ret)) {
769		mutex_set_owner(lock);
770		return 0;
771	} else
772		return __mutex_lock_killable_slowpath(lock);
773}
774EXPORT_SYMBOL(mutex_lock_killable);
775
776__visible void __sched
777__mutex_lock_slowpath(atomic_t *lock_count)
778{
779	struct mutex *lock = container_of(lock_count, struct mutex, count);
780
781	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
782			    NULL, _RET_IP_, NULL, 0);
783}
784
785static noinline int __sched
786__mutex_lock_killable_slowpath(struct mutex *lock)
787{
788	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
789				   NULL, _RET_IP_, NULL, 0);
790}
791
792static noinline int __sched
793__mutex_lock_interruptible_slowpath(struct mutex *lock)
794{
795	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
796				   NULL, _RET_IP_, NULL, 0);
797}
798
799static noinline int __sched
800__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
801{
802	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
803				   NULL, _RET_IP_, ctx, 1);
804}
805
806static noinline int __sched
807__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
808					    struct ww_acquire_ctx *ctx)
809{
810	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
811				   NULL, _RET_IP_, ctx, 1);
812}
813
814#endif
815
816/*
817 * Spinlock based trylock, we take the spinlock and check whether we
818 * can get the lock:
819 */
820static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
821{
822	struct mutex *lock = container_of(lock_count, struct mutex, count);
823	unsigned long flags;
824	int prev;
825
 
 
 
 
826	spin_lock_mutex(&lock->wait_lock, flags);
827
828	prev = atomic_xchg(&lock->count, -1);
829	if (likely(prev == 1)) {
830		mutex_set_owner(lock);
831		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
832	}
833
834	/* Set it back to 0 if there are no waiters: */
835	if (likely(list_empty(&lock->wait_list)))
836		atomic_set(&lock->count, 0);
837
838	spin_unlock_mutex(&lock->wait_lock, flags);
839
840	return prev == 1;
841}
842
843/**
844 * mutex_trylock - try to acquire the mutex, without waiting
845 * @lock: the mutex to be acquired
846 *
847 * Try to acquire the mutex atomically. Returns 1 if the mutex
848 * has been acquired successfully, and 0 on contention.
849 *
850 * NOTE: this function follows the spin_trylock() convention, so
851 * it is negated from the down_trylock() return values! Be careful
852 * about this when converting semaphore users to mutexes.
853 *
854 * This function must not be used in interrupt context. The
855 * mutex must be released by the same task that acquired it.
856 */
857int __sched mutex_trylock(struct mutex *lock)
858{
859	int ret;
860
861	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
862	if (ret)
863		mutex_set_owner(lock);
864
865	return ret;
866}
867EXPORT_SYMBOL(mutex_trylock);
868
869#ifndef CONFIG_DEBUG_LOCK_ALLOC
870int __sched
871__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
872{
873	int ret;
874
875	might_sleep();
876
877	ret = __mutex_fastpath_lock_retval(&lock->base.count);
878
879	if (likely(!ret)) {
880		ww_mutex_set_context_fastpath(lock, ctx);
881		mutex_set_owner(&lock->base);
882	} else
883		ret = __ww_mutex_lock_slowpath(lock, ctx);
884	return ret;
885}
886EXPORT_SYMBOL(__ww_mutex_lock);
887
888int __sched
889__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
890{
891	int ret;
892
893	might_sleep();
894
895	ret = __mutex_fastpath_lock_retval(&lock->base.count);
896
897	if (likely(!ret)) {
898		ww_mutex_set_context_fastpath(lock, ctx);
899		mutex_set_owner(&lock->base);
900	} else
901		ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
902	return ret;
903}
904EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
905
906#endif
907
908/**
909 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
910 * @cnt: the atomic which we are to dec
911 * @lock: the mutex to return holding if we dec to 0
912 *
913 * return true and hold lock if we dec to 0, return false otherwise
914 */
915int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
916{
917	/* dec if we can't possibly hit 0 */
918	if (atomic_add_unless(cnt, -1, 1))
919		return 0;
920	/* we might hit 0, so take the lock */
921	mutex_lock(lock);
922	if (!atomic_dec_and_test(cnt)) {
923		/* when we actually did the dec, we didn't hit 0 */
924		mutex_unlock(lock);
925		return 0;
926	}
927	/* we hit 0, and we hold the lock */
928	return 1;
929}
930EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
v4.6
  1/*
  2 * kernel/locking/mutex.c
  3 *
  4 * Mutexes: blocking mutual exclusion locks
  5 *
  6 * Started by Ingo Molnar:
  7 *
  8 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  9 *
 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 11 * David Howells for suggestions and improvements.
 12 *
 13 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 14 *    from the -rt tree, where it was originally implemented for rtmutexes
 15 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 16 *    and Sven Dietrich.
 17 *
 18 * Also see Documentation/locking/mutex-design.txt.
 19 */
 20#include <linux/mutex.h>
 21#include <linux/ww_mutex.h>
 22#include <linux/sched.h>
 23#include <linux/sched/rt.h>
 24#include <linux/export.h>
 25#include <linux/spinlock.h>
 26#include <linux/interrupt.h>
 27#include <linux/debug_locks.h>
 28#include <linux/osq_lock.h>
 29
 30/*
 31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
 32 * which forces all calls into the slowpath:
 33 */
 34#ifdef CONFIG_DEBUG_MUTEXES
 35# include "mutex-debug.h"
 36# include <asm-generic/mutex-null.h>
 37/*
 38 * Must be 0 for the debug case so we do not do the unlock outside of the
 39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
 40 * case.
 41 */
 42# undef __mutex_slowpath_needs_to_unlock
 43# define  __mutex_slowpath_needs_to_unlock()	0
 44#else
 45# include "mutex.h"
 46# include <asm/mutex.h>
 47#endif
 48
 
 
 
 
 
 
 49void
 50__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 51{
 52	atomic_set(&lock->count, 1);
 53	spin_lock_init(&lock->wait_lock);
 54	INIT_LIST_HEAD(&lock->wait_list);
 55	mutex_clear_owner(lock);
 56#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 57	osq_lock_init(&lock->osq);
 58#endif
 59
 60	debug_mutex_init(lock, name, key);
 61}
 62
 63EXPORT_SYMBOL(__mutex_init);
 64
 65#ifndef CONFIG_DEBUG_LOCK_ALLOC
 66/*
 67 * We split the mutex lock/unlock logic into separate fastpath and
 68 * slowpath functions, to reduce the register pressure on the fastpath.
 69 * We also put the fastpath first in the kernel image, to make sure the
 70 * branch is predicted by the CPU as default-untaken.
 71 */
 72__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
 73
 74/**
 75 * mutex_lock - acquire the mutex
 76 * @lock: the mutex to be acquired
 77 *
 78 * Lock the mutex exclusively for this task. If the mutex is not
 79 * available right now, it will sleep until it can get it.
 80 *
 81 * The mutex must later on be released by the same task that
 82 * acquired it. Recursive locking is not allowed. The task
 83 * may not exit without first unlocking the mutex. Also, kernel
 84 * memory where the mutex resides must not be freed with
 85 * the mutex still locked. The mutex must first be initialized
 86 * (or statically defined) before it can be locked. memset()-ing
 87 * the mutex to 0 is not allowed.
 88 *
 89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 90 *   checks that will enforce the restrictions and will also do
 91 *   deadlock debugging. )
 92 *
 93 * This function is similar to (but not equivalent to) down().
 94 */
 95void __sched mutex_lock(struct mutex *lock)
 96{
 97	might_sleep();
 98	/*
 99	 * The locking fastpath is the 1->0 transition from
100	 * 'unlocked' into 'locked' state.
101	 */
102	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103	mutex_set_owner(lock);
104}
105
106EXPORT_SYMBOL(mutex_lock);
107#endif
108
109static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
110						   struct ww_acquire_ctx *ww_ctx)
111{
112#ifdef CONFIG_DEBUG_MUTEXES
113	/*
114	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
115	 * but released with a normal mutex_unlock in this call.
116	 *
117	 * This should never happen, always use ww_mutex_unlock.
118	 */
119	DEBUG_LOCKS_WARN_ON(ww->ctx);
120
121	/*
122	 * Not quite done after calling ww_acquire_done() ?
123	 */
124	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
125
126	if (ww_ctx->contending_lock) {
127		/*
128		 * After -EDEADLK you tried to
129		 * acquire a different ww_mutex? Bad!
130		 */
131		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
132
133		/*
134		 * You called ww_mutex_lock after receiving -EDEADLK,
135		 * but 'forgot' to unlock everything else first?
136		 */
137		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
138		ww_ctx->contending_lock = NULL;
139	}
140
141	/*
142	 * Naughty, using a different class will lead to undefined behavior!
143	 */
144	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
145#endif
146	ww_ctx->acquired++;
147}
148
149/*
150 * After acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck.
 
152 *
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
154 * as the fastpath and opportunistic spinning are disabled in that case.
155 */
156static __always_inline void
157ww_mutex_set_context_fastpath(struct ww_mutex *lock,
158			       struct ww_acquire_ctx *ctx)
159{
160	unsigned long flags;
161	struct mutex_waiter *cur;
162
163	ww_mutex_lock_acquired(lock, ctx);
164
165	lock->ctx = ctx;
166
167	/*
168	 * The lock->ctx update should be visible on all cores before
169	 * the atomic read is done, otherwise contended waiters might be
170	 * missed. The contended waiters will either see ww_ctx == NULL
171	 * and keep spinning, or it will acquire wait_lock, add itself
172	 * to waiter list and sleep.
173	 */
174	smp_mb(); /* ^^^ */
175
176	/*
177	 * Check if lock is contended, if not there is nobody to wake up
178	 */
179	if (likely(atomic_read(&lock->base.count) == 0))
180		return;
181
182	/*
183	 * Uh oh, we raced in fastpath, wake up everyone in this case,
184	 * so they can see the new lock->ctx.
185	 */
186	spin_lock_mutex(&lock->base.wait_lock, flags);
187	list_for_each_entry(cur, &lock->base.wait_list, list) {
188		debug_mutex_wake_waiter(&lock->base, cur);
189		wake_up_process(cur->task);
190	}
191	spin_unlock_mutex(&lock->base.wait_lock, flags);
192}
193
194/*
195 * After acquiring lock in the slowpath set ctx and wake up any
196 * waiters so they can recheck.
197 *
198 * Callers must hold the mutex wait_lock.
199 */
200static __always_inline void
201ww_mutex_set_context_slowpath(struct ww_mutex *lock,
202			      struct ww_acquire_ctx *ctx)
203{
204	struct mutex_waiter *cur;
205
206	ww_mutex_lock_acquired(lock, ctx);
207	lock->ctx = ctx;
208
209	/*
210	 * Give any possible sleeping processes the chance to wake up,
211	 * so they can recheck if they have to back off.
 
 
212	 */
213	list_for_each_entry(cur, &lock->base.wait_list, list) {
214		debug_mutex_wake_waiter(&lock->base, cur);
215		wake_up_process(cur->task);
216	}
217}
218
219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
220/*
221 * Look out! "owner" is an entirely speculative pointer
222 * access and not reliable.
223 */
224static noinline
225bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
226{
227	bool ret = true;
228
229	rcu_read_lock();
230	while (lock->owner == owner) {
231		/*
232		 * Ensure we emit the owner->on_cpu, dereference _after_
233		 * checking lock->owner still matches owner. If that fails,
234		 * owner might point to freed memory. If it still matches,
235		 * the rcu_read_lock() ensures the memory stays valid.
236		 */
237		barrier();
238
239		if (!owner->on_cpu || need_resched()) {
240			ret = false;
241			break;
242		}
243
244		cpu_relax_lowlatency();
245	}
246	rcu_read_unlock();
247
248	return ret;
 
 
 
 
 
249}
250
251/*
252 * Initial check for entering the mutex spinning loop
253 */
254static inline int mutex_can_spin_on_owner(struct mutex *lock)
255{
256	struct task_struct *owner;
257	int retval = 1;
258
259	if (need_resched())
260		return 0;
261
262	rcu_read_lock();
263	owner = READ_ONCE(lock->owner);
264	if (owner)
265		retval = owner->on_cpu;
266	rcu_read_unlock();
267	/*
268	 * if lock->owner is not set, the mutex owner may have just acquired
269	 * it and not set the owner yet or the mutex has been released.
270	 */
271	return retval;
272}
273
274/*
275 * Atomically try to take the lock when it is available
276 */
277static inline bool mutex_try_to_acquire(struct mutex *lock)
278{
279	return !mutex_is_locked(lock) &&
280		(atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
281}
282
283/*
284 * Optimistic spinning.
285 *
286 * We try to spin for acquisition when we find that the lock owner
287 * is currently running on a (different) CPU and while we don't
288 * need to reschedule. The rationale is that if the lock owner is
289 * running, it is likely to release the lock soon.
290 *
291 * Since this needs the lock owner, and this mutex implementation
292 * doesn't track the owner atomically in the lock field, we need to
293 * track it non-atomically.
294 *
295 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
296 * to serialize everything.
297 *
298 * The mutex spinners are queued up using MCS lock so that only one
299 * spinner can compete for the mutex. However, if mutex spinning isn't
300 * going to happen, there is no point in going through the lock/unlock
301 * overhead.
302 *
303 * Returns true when the lock was taken, otherwise false, indicating
304 * that we need to jump to the slowpath and sleep.
305 */
306static bool mutex_optimistic_spin(struct mutex *lock,
307				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
308{
309	struct task_struct *task = current;
310
311	if (!mutex_can_spin_on_owner(lock))
312		goto done;
313
314	/*
315	 * In order to avoid a stampede of mutex spinners trying to
316	 * acquire the mutex all at once, the spinners need to take a
317	 * MCS (queued) lock first before spinning on the owner field.
318	 */
319	if (!osq_lock(&lock->osq))
320		goto done;
321
322	while (true) {
323		struct task_struct *owner;
324
325		if (use_ww_ctx && ww_ctx->acquired > 0) {
326			struct ww_mutex *ww;
327
328			ww = container_of(lock, struct ww_mutex, base);
329			/*
330			 * If ww->ctx is set the contents are undefined, only
331			 * by acquiring wait_lock there is a guarantee that
332			 * they are not invalid when reading.
333			 *
334			 * As such, when deadlock detection needs to be
335			 * performed the optimistic spinning cannot be done.
336			 */
337			if (READ_ONCE(ww->ctx))
338				break;
339		}
340
341		/*
342		 * If there's an owner, wait for it to either
343		 * release the lock or go to sleep.
344		 */
345		owner = READ_ONCE(lock->owner);
346		if (owner && !mutex_spin_on_owner(lock, owner))
347			break;
348
349		/* Try to acquire the mutex if it is unlocked. */
350		if (mutex_try_to_acquire(lock)) {
351			lock_acquired(&lock->dep_map, ip);
352
353			if (use_ww_ctx) {
354				struct ww_mutex *ww;
355				ww = container_of(lock, struct ww_mutex, base);
356
357				ww_mutex_set_context_fastpath(ww, ww_ctx);
358			}
359
360			mutex_set_owner(lock);
361			osq_unlock(&lock->osq);
362			return true;
363		}
364
365		/*
366		 * When there's no owner, we might have preempted between the
367		 * owner acquiring the lock and setting the owner field. If
368		 * we're an RT task that will live-lock because we won't let
369		 * the owner complete.
370		 */
371		if (!owner && (need_resched() || rt_task(task)))
372			break;
373
374		/*
375		 * The cpu_relax() call is a compiler barrier which forces
376		 * everything in this loop to be re-loaded. We don't need
377		 * memory barriers as we'll eventually observe the right
378		 * values at the cost of a few extra spins.
379		 */
380		cpu_relax_lowlatency();
381	}
382
383	osq_unlock(&lock->osq);
384done:
385	/*
386	 * If we fell out of the spin path because of need_resched(),
387	 * reschedule now, before we try-lock the mutex. This avoids getting
388	 * scheduled out right after we obtained the mutex.
389	 */
390	if (need_resched()) {
391		/*
392		 * We _should_ have TASK_RUNNING here, but just in case
393		 * we do not, make it so, otherwise we might get stuck.
394		 */
395		__set_current_state(TASK_RUNNING);
396		schedule_preempt_disabled();
397	}
398
399	return false;
400}
401#else
402static bool mutex_optimistic_spin(struct mutex *lock,
403				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
404{
405	return false;
406}
407#endif
408
409__visible __used noinline
410void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
411
412/**
413 * mutex_unlock - release the mutex
414 * @lock: the mutex to be released
415 *
416 * Unlock a mutex that has been locked by this task previously.
417 *
418 * This function must not be used in interrupt context. Unlocking
419 * of a not locked mutex is not allowed.
420 *
421 * This function is similar to (but not equivalent to) up().
422 */
423void __sched mutex_unlock(struct mutex *lock)
424{
425	/*
426	 * The unlocking fastpath is the 0->1 transition from 'locked'
427	 * into 'unlocked' state:
428	 */
429#ifndef CONFIG_DEBUG_MUTEXES
430	/*
431	 * When debugging is enabled we must not clear the owner before time,
432	 * the slow path will always be taken, and that clears the owner field
433	 * after verifying that it was indeed current.
434	 */
435	mutex_clear_owner(lock);
436#endif
437	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
438}
439
440EXPORT_SYMBOL(mutex_unlock);
441
442/**
443 * ww_mutex_unlock - release the w/w mutex
444 * @lock: the mutex to be released
445 *
446 * Unlock a mutex that has been locked by this task previously with any of the
447 * ww_mutex_lock* functions (with or without an acquire context). It is
448 * forbidden to release the locks after releasing the acquire context.
449 *
450 * This function must not be used in interrupt context. Unlocking
451 * of a unlocked mutex is not allowed.
452 */
453void __sched ww_mutex_unlock(struct ww_mutex *lock)
454{
455	/*
456	 * The unlocking fastpath is the 0->1 transition from 'locked'
457	 * into 'unlocked' state:
458	 */
459	if (lock->ctx) {
460#ifdef CONFIG_DEBUG_MUTEXES
461		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
462#endif
463		if (lock->ctx->acquired > 0)
464			lock->ctx->acquired--;
465		lock->ctx = NULL;
466	}
467
468#ifndef CONFIG_DEBUG_MUTEXES
469	/*
470	 * When debugging is enabled we must not clear the owner before time,
471	 * the slow path will always be taken, and that clears the owner field
472	 * after verifying that it was indeed current.
473	 */
474	mutex_clear_owner(&lock->base);
475#endif
476	__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
477}
478EXPORT_SYMBOL(ww_mutex_unlock);
479
480static inline int __sched
481__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
482{
483	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
484	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
485
486	if (!hold_ctx)
487		return 0;
488
489	if (unlikely(ctx == hold_ctx))
490		return -EALREADY;
491
492	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
493	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
494#ifdef CONFIG_DEBUG_MUTEXES
495		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
496		ctx->contending_lock = ww;
497#endif
498		return -EDEADLK;
499	}
500
501	return 0;
502}
503
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504/*
505 * Lock a mutex (possibly interruptible), slowpath:
506 */
507static __always_inline int __sched
508__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
509		    struct lockdep_map *nest_lock, unsigned long ip,
510		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
511{
512	struct task_struct *task = current;
513	struct mutex_waiter waiter;
514	unsigned long flags;
515	int ret;
516
517	preempt_disable();
518	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
519
520	if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
521		/* got the lock, yay! */
522		preempt_enable();
523		return 0;
524	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
526	spin_lock_mutex(&lock->wait_lock, flags);
 
 
 
 
 
 
 
527
 
 
 
 
 
 
 
 
 
 
528	/*
529	 * Once more, try to acquire the lock. Only try-lock the mutex if
530	 * it is unlocked to reduce unnecessary xchg() operations.
 
531	 */
532	if (!mutex_is_locked(lock) &&
533	    (atomic_xchg_acquire(&lock->count, 0) == 1))
 
 
 
 
 
534		goto skip_wait;
535
536	debug_mutex_lock_common(lock, &waiter);
537	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
538
539	/* add waiting tasks to the end of the waitqueue (FIFO): */
540	list_add_tail(&waiter.list, &lock->wait_list);
541	waiter.task = task;
542
543	lock_contended(&lock->dep_map, ip);
544
545	for (;;) {
546		/*
547		 * Lets try to take the lock again - this is needed even if
548		 * we get here for the first time (shortly after failing to
549		 * acquire the lock), to make sure that we get a wakeup once
550		 * it's unlocked. Later on, if we sleep, this is the
551		 * operation that gives us the lock. We xchg it to -1, so
552		 * that when we release the lock, we properly wake up the
553		 * other waiters. We only attempt the xchg if the count is
554		 * non-negative in order to avoid unnecessary xchg operations:
555		 */
556		if (atomic_read(&lock->count) >= 0 &&
557		    (atomic_xchg_acquire(&lock->count, -1) == 1))
558			break;
559
560		/*
561		 * got a signal? (This code gets eliminated in the
562		 * TASK_UNINTERRUPTIBLE case.)
563		 */
564		if (unlikely(signal_pending_state(state, task))) {
565			ret = -EINTR;
566			goto err;
567		}
568
569		if (use_ww_ctx && ww_ctx->acquired > 0) {
570			ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
571			if (ret)
572				goto err;
573		}
574
575		__set_task_state(task, state);
576
577		/* didn't get the lock, go to sleep: */
578		spin_unlock_mutex(&lock->wait_lock, flags);
579		schedule_preempt_disabled();
580		spin_lock_mutex(&lock->wait_lock, flags);
581	}
582	__set_task_state(task, TASK_RUNNING);
583
584	mutex_remove_waiter(lock, &waiter, current_thread_info());
585	/* set it to 0 if there are no waiters left: */
586	if (likely(list_empty(&lock->wait_list)))
587		atomic_set(&lock->count, 0);
588	debug_mutex_free_waiter(&waiter);
589
590skip_wait:
591	/* got the lock - cleanup and rejoice! */
592	lock_acquired(&lock->dep_map, ip);
593	mutex_set_owner(lock);
594
595	if (use_ww_ctx) {
596		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
597		ww_mutex_set_context_slowpath(ww, ww_ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
598	}
599
600	spin_unlock_mutex(&lock->wait_lock, flags);
601	preempt_enable();
602	return 0;
603
604err:
605	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
606	spin_unlock_mutex(&lock->wait_lock, flags);
607	debug_mutex_free_waiter(&waiter);
608	mutex_release(&lock->dep_map, 1, ip);
609	preempt_enable();
610	return ret;
611}
612
613#ifdef CONFIG_DEBUG_LOCK_ALLOC
614void __sched
615mutex_lock_nested(struct mutex *lock, unsigned int subclass)
616{
617	might_sleep();
618	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
619			    subclass, NULL, _RET_IP_, NULL, 0);
620}
621
622EXPORT_SYMBOL_GPL(mutex_lock_nested);
623
624void __sched
625_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
626{
627	might_sleep();
628	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
629			    0, nest, _RET_IP_, NULL, 0);
630}
631
632EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
633
634int __sched
635mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
636{
637	might_sleep();
638	return __mutex_lock_common(lock, TASK_KILLABLE,
639				   subclass, NULL, _RET_IP_, NULL, 0);
640}
641EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
642
643int __sched
644mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
645{
646	might_sleep();
647	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
648				   subclass, NULL, _RET_IP_, NULL, 0);
649}
650
651EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
652
653static inline int
654ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
655{
656#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
657	unsigned tmp;
658
659	if (ctx->deadlock_inject_countdown-- == 0) {
660		tmp = ctx->deadlock_inject_interval;
661		if (tmp > UINT_MAX/4)
662			tmp = UINT_MAX;
663		else
664			tmp = tmp*2 + tmp + tmp/2;
665
666		ctx->deadlock_inject_interval = tmp;
667		ctx->deadlock_inject_countdown = tmp;
668		ctx->contending_lock = lock;
669
670		ww_mutex_unlock(lock);
671
672		return -EDEADLK;
673	}
674#endif
675
676	return 0;
677}
678
679int __sched
680__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
681{
682	int ret;
683
684	might_sleep();
685	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
686				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
687	if (!ret && ctx->acquired > 1)
688		return ww_mutex_deadlock_injection(lock, ctx);
689
690	return ret;
691}
692EXPORT_SYMBOL_GPL(__ww_mutex_lock);
693
694int __sched
695__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
696{
697	int ret;
698
699	might_sleep();
700	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
701				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
702
703	if (!ret && ctx->acquired > 1)
704		return ww_mutex_deadlock_injection(lock, ctx);
705
706	return ret;
707}
708EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
709
710#endif
711
712/*
713 * Release the lock, slowpath:
714 */
715static inline void
716__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
717{
 
718	unsigned long flags;
719	WAKE_Q(wake_q);
720
721	/*
722	 * As a performance measurement, release the lock before doing other
723	 * wakeup related duties to follow. This allows other tasks to acquire
724	 * the lock sooner, while still handling cleanups in past unlock calls.
725	 * This can be done as we do not enforce strict equivalence between the
726	 * mutex counter and wait_list.
727	 *
728	 *
729	 * Some architectures leave the lock unlocked in the fastpath failure
730	 * case, others need to leave it locked. In the later case we have to
731	 * unlock it here - as the lock counter is currently 0 or negative.
732	 */
733	if (__mutex_slowpath_needs_to_unlock())
734		atomic_set(&lock->count, 1);
735
736	spin_lock_mutex(&lock->wait_lock, flags);
737	mutex_release(&lock->dep_map, nested, _RET_IP_);
738	debug_mutex_unlock(lock);
739
740	if (!list_empty(&lock->wait_list)) {
741		/* get the first entry from the wait-list: */
742		struct mutex_waiter *waiter =
743				list_entry(lock->wait_list.next,
744					   struct mutex_waiter, list);
745
746		debug_mutex_wake_waiter(lock, waiter);
747		wake_q_add(&wake_q, waiter->task);
 
748	}
749
750	spin_unlock_mutex(&lock->wait_lock, flags);
751	wake_up_q(&wake_q);
752}
753
754/*
755 * Release the lock, slowpath:
756 */
757__visible void
758__mutex_unlock_slowpath(atomic_t *lock_count)
759{
760	struct mutex *lock = container_of(lock_count, struct mutex, count);
761
762	__mutex_unlock_common_slowpath(lock, 1);
763}
764
765#ifndef CONFIG_DEBUG_LOCK_ALLOC
766/*
767 * Here come the less common (and hence less performance-critical) APIs:
768 * mutex_lock_interruptible() and mutex_trylock().
769 */
770static noinline int __sched
771__mutex_lock_killable_slowpath(struct mutex *lock);
772
773static noinline int __sched
774__mutex_lock_interruptible_slowpath(struct mutex *lock);
775
776/**
777 * mutex_lock_interruptible - acquire the mutex, interruptible
778 * @lock: the mutex to be acquired
779 *
780 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
781 * been acquired or sleep until the mutex becomes available. If a
782 * signal arrives while waiting for the lock then this function
783 * returns -EINTR.
784 *
785 * This function is similar to (but not equivalent to) down_interruptible().
786 */
787int __sched mutex_lock_interruptible(struct mutex *lock)
788{
789	int ret;
790
791	might_sleep();
792	ret =  __mutex_fastpath_lock_retval(&lock->count);
793	if (likely(!ret)) {
794		mutex_set_owner(lock);
795		return 0;
796	} else
797		return __mutex_lock_interruptible_slowpath(lock);
798}
799
800EXPORT_SYMBOL(mutex_lock_interruptible);
801
802int __sched mutex_lock_killable(struct mutex *lock)
803{
804	int ret;
805
806	might_sleep();
807	ret = __mutex_fastpath_lock_retval(&lock->count);
808	if (likely(!ret)) {
809		mutex_set_owner(lock);
810		return 0;
811	} else
812		return __mutex_lock_killable_slowpath(lock);
813}
814EXPORT_SYMBOL(mutex_lock_killable);
815
816__visible void __sched
817__mutex_lock_slowpath(atomic_t *lock_count)
818{
819	struct mutex *lock = container_of(lock_count, struct mutex, count);
820
821	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
822			    NULL, _RET_IP_, NULL, 0);
823}
824
825static noinline int __sched
826__mutex_lock_killable_slowpath(struct mutex *lock)
827{
828	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
829				   NULL, _RET_IP_, NULL, 0);
830}
831
832static noinline int __sched
833__mutex_lock_interruptible_slowpath(struct mutex *lock)
834{
835	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
836				   NULL, _RET_IP_, NULL, 0);
837}
838
839static noinline int __sched
840__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
841{
842	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
843				   NULL, _RET_IP_, ctx, 1);
844}
845
846static noinline int __sched
847__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
848					    struct ww_acquire_ctx *ctx)
849{
850	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
851				   NULL, _RET_IP_, ctx, 1);
852}
853
854#endif
855
856/*
857 * Spinlock based trylock, we take the spinlock and check whether we
858 * can get the lock:
859 */
860static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
861{
862	struct mutex *lock = container_of(lock_count, struct mutex, count);
863	unsigned long flags;
864	int prev;
865
866	/* No need to trylock if the mutex is locked. */
867	if (mutex_is_locked(lock))
868		return 0;
869
870	spin_lock_mutex(&lock->wait_lock, flags);
871
872	prev = atomic_xchg_acquire(&lock->count, -1);
873	if (likely(prev == 1)) {
874		mutex_set_owner(lock);
875		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
876	}
877
878	/* Set it back to 0 if there are no waiters: */
879	if (likely(list_empty(&lock->wait_list)))
880		atomic_set(&lock->count, 0);
881
882	spin_unlock_mutex(&lock->wait_lock, flags);
883
884	return prev == 1;
885}
886
887/**
888 * mutex_trylock - try to acquire the mutex, without waiting
889 * @lock: the mutex to be acquired
890 *
891 * Try to acquire the mutex atomically. Returns 1 if the mutex
892 * has been acquired successfully, and 0 on contention.
893 *
894 * NOTE: this function follows the spin_trylock() convention, so
895 * it is negated from the down_trylock() return values! Be careful
896 * about this when converting semaphore users to mutexes.
897 *
898 * This function must not be used in interrupt context. The
899 * mutex must be released by the same task that acquired it.
900 */
901int __sched mutex_trylock(struct mutex *lock)
902{
903	int ret;
904
905	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
906	if (ret)
907		mutex_set_owner(lock);
908
909	return ret;
910}
911EXPORT_SYMBOL(mutex_trylock);
912
913#ifndef CONFIG_DEBUG_LOCK_ALLOC
914int __sched
915__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
916{
917	int ret;
918
919	might_sleep();
920
921	ret = __mutex_fastpath_lock_retval(&lock->base.count);
922
923	if (likely(!ret)) {
924		ww_mutex_set_context_fastpath(lock, ctx);
925		mutex_set_owner(&lock->base);
926	} else
927		ret = __ww_mutex_lock_slowpath(lock, ctx);
928	return ret;
929}
930EXPORT_SYMBOL(__ww_mutex_lock);
931
932int __sched
933__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
934{
935	int ret;
936
937	might_sleep();
938
939	ret = __mutex_fastpath_lock_retval(&lock->base.count);
940
941	if (likely(!ret)) {
942		ww_mutex_set_context_fastpath(lock, ctx);
943		mutex_set_owner(&lock->base);
944	} else
945		ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
946	return ret;
947}
948EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
949
950#endif
951
952/**
953 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
954 * @cnt: the atomic which we are to dec
955 * @lock: the mutex to return holding if we dec to 0
956 *
957 * return true and hold lock if we dec to 0, return false otherwise
958 */
959int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
960{
961	/* dec if we can't possibly hit 0 */
962	if (atomic_add_unless(cnt, -1, 1))
963		return 0;
964	/* we might hit 0, so take the lock */
965	mutex_lock(lock);
966	if (!atomic_dec_and_test(cnt)) {
967		/* when we actually did the dec, we didn't hit 0 */
968		mutex_unlock(lock);
969		return 0;
970	}
971	/* we hit 0, and we hold the lock */
972	return 1;
973}
974EXPORT_SYMBOL(atomic_dec_and_mutex_lock);