Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kernel/locking/mutex.c
   4 *
   5 * Mutexes: blocking mutual exclusion locks
   6 *
   7 * Started by Ingo Molnar:
   8 *
   9 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  10 *
  11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  12 * David Howells for suggestions and improvements.
  13 *
  14 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  15 *    from the -rt tree, where it was originally implemented for rtmutexes
  16 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  17 *    and Sven Dietrich.
  18 *
  19 * Also see Documentation/locking/mutex-design.rst.
  20 */
  21#include <linux/mutex.h>
  22#include <linux/ww_mutex.h>
  23#include <linux/sched/signal.h>
  24#include <linux/sched/rt.h>
  25#include <linux/sched/wake_q.h>
  26#include <linux/sched/debug.h>
  27#include <linux/export.h>
  28#include <linux/spinlock.h>
  29#include <linux/interrupt.h>
  30#include <linux/debug_locks.h>
  31#include <linux/osq_lock.h>
  32
  33#define CREATE_TRACE_POINTS
  34#include <trace/events/lock.h>
  35
  36#ifndef CONFIG_PREEMPT_RT
  37#include "mutex.h"
  38
  39#ifdef CONFIG_DEBUG_MUTEXES
  40# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
 
 
 
 
 
 
 
 
  41#else
  42# define MUTEX_WARN_ON(cond)
 
  43#endif
  44
  45void
  46__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  47{
  48	atomic_long_set(&lock->owner, 0);
  49	raw_spin_lock_init(&lock->wait_lock);
  50	INIT_LIST_HEAD(&lock->wait_list);
 
  51#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  52	osq_lock_init(&lock->osq);
  53#endif
  54
  55	debug_mutex_init(lock, name, key);
  56}
  57EXPORT_SYMBOL(__mutex_init);
  58
  59/*
  60 * @owner: contains: 'struct task_struct *' to the current lock owner,
  61 * NULL means not owned. Since task_struct pointers are aligned at
  62 * at least L1_CACHE_BYTES, we have low bits to store extra state.
  63 *
  64 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
  65 * Bit1 indicates unlock needs to hand the lock to the top-waiter
  66 * Bit2 indicates handoff has been done and we're waiting for pickup.
  67 */
  68#define MUTEX_FLAG_WAITERS	0x01
  69#define MUTEX_FLAG_HANDOFF	0x02
  70#define MUTEX_FLAG_PICKUP	0x04
  71
  72#define MUTEX_FLAGS		0x07
  73
  74/*
  75 * Internal helper function; C doesn't allow us to hide it :/
  76 *
  77 * DO NOT USE (outside of mutex code).
  78 */
  79static inline struct task_struct *__mutex_owner(struct mutex *lock)
  80{
  81	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
  82}
  83
  84static inline struct task_struct *__owner_task(unsigned long owner)
  85{
  86	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
  87}
  88
  89bool mutex_is_locked(struct mutex *lock)
  90{
  91	return __mutex_owner(lock) != NULL;
  92}
  93EXPORT_SYMBOL(mutex_is_locked);
  94
  95static inline unsigned long __owner_flags(unsigned long owner)
  96{
  97	return owner & MUTEX_FLAGS;
  98}
  99
 100/*
 101 * Returns: __mutex_owner(lock) on failure or NULL on success.
 102 */
 103static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
 104{
 105	unsigned long owner, curr = (unsigned long)current;
 106
 107	owner = atomic_long_read(&lock->owner);
 108	for (;;) { /* must loop, can race against a flag */
 109		unsigned long flags = __owner_flags(owner);
 110		unsigned long task = owner & ~MUTEX_FLAGS;
 111
 112		if (task) {
 113			if (flags & MUTEX_FLAG_PICKUP) {
 114				if (task != curr)
 115					break;
 116				flags &= ~MUTEX_FLAG_PICKUP;
 117			} else if (handoff) {
 118				if (flags & MUTEX_FLAG_HANDOFF)
 119					break;
 120				flags |= MUTEX_FLAG_HANDOFF;
 121			} else {
 122				break;
 123			}
 124		} else {
 125			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
 126			task = curr;
 127		}
 128
 129		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
 130			if (task == curr)
 131				return NULL;
 132			break;
 133		}
 134	}
 135
 136	return __owner_task(owner);
 137}
 138
 139/*
 140 * Trylock or set HANDOFF
 141 */
 142static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
 143{
 144	return !__mutex_trylock_common(lock, handoff);
 145}
 146
 147/*
 148 * Actual trylock that will work on any unlocked state.
 149 */
 150static inline bool __mutex_trylock(struct mutex *lock)
 151{
 152	return !__mutex_trylock_common(lock, false);
 153}
 154
 155#ifndef CONFIG_DEBUG_LOCK_ALLOC
 156/*
 157 * Lockdep annotations are contained to the slow paths for simplicity.
 158 * There is nothing that would stop spreading the lockdep annotations outwards
 159 * except more code.
 160 */
 161
 162/*
 163 * Optimistic trylock that only works in the uncontended case. Make sure to
 164 * follow with a __mutex_trylock() before failing.
 165 */
 166static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
 167{
 168	unsigned long curr = (unsigned long)current;
 169	unsigned long zero = 0UL;
 170
 171	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
 172		return true;
 173
 174	return false;
 175}
 176
 177static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
 178{
 179	unsigned long curr = (unsigned long)current;
 180
 181	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
 182}
 183#endif
 184
 185static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
 186{
 187	atomic_long_or(flag, &lock->owner);
 188}
 189
 190static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
 191{
 192	atomic_long_andnot(flag, &lock->owner);
 193}
 194
 195static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
 196{
 197	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
 198}
 199
 200/*
 201 * Add @waiter to a given location in the lock wait_list and set the
 202 * FLAG_WAITERS flag if it's the first waiter.
 203 */
 204static void
 205__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 206		   struct list_head *list)
 207{
 208	debug_mutex_add_waiter(lock, waiter, current);
 209
 210	list_add_tail(&waiter->list, list);
 211	if (__mutex_waiter_is_first(lock, waiter))
 212		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 213}
 214
 215static void
 216__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
 217{
 218	list_del(&waiter->list);
 219	if (likely(list_empty(&lock->wait_list)))
 220		__mutex_clear_flag(lock, MUTEX_FLAGS);
 221
 222	debug_mutex_remove_waiter(lock, waiter, current);
 223}
 224
 225/*
 226 * Give up ownership to a specific task, when @task = NULL, this is equivalent
 227 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
 228 * WAITERS. Provides RELEASE semantics like a regular unlock, the
 229 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
 230 */
 231static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 232{
 233	unsigned long owner = atomic_long_read(&lock->owner);
 234
 235	for (;;) {
 236		unsigned long new;
 237
 238		MUTEX_WARN_ON(__owner_task(owner) != current);
 239		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 240
 241		new = (owner & MUTEX_FLAG_WAITERS);
 242		new |= (unsigned long)task;
 243		if (task)
 244			new |= MUTEX_FLAG_PICKUP;
 245
 246		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
 247			break;
 248	}
 249}
 250
 251#ifndef CONFIG_DEBUG_LOCK_ALLOC
 252/*
 253 * We split the mutex lock/unlock logic into separate fastpath and
 254 * slowpath functions, to reduce the register pressure on the fastpath.
 255 * We also put the fastpath first in the kernel image, to make sure the
 256 * branch is predicted by the CPU as default-untaken.
 257 */
 258static void __sched __mutex_lock_slowpath(struct mutex *lock);
 259
 260/**
 261 * mutex_lock - acquire the mutex
 262 * @lock: the mutex to be acquired
 263 *
 264 * Lock the mutex exclusively for this task. If the mutex is not
 265 * available right now, it will sleep until it can get it.
 266 *
 267 * The mutex must later on be released by the same task that
 268 * acquired it. Recursive locking is not allowed. The task
 269 * may not exit without first unlocking the mutex. Also, kernel
 270 * memory where the mutex resides must not be freed with
 271 * the mutex still locked. The mutex must first be initialized
 272 * (or statically defined) before it can be locked. memset()-ing
 273 * the mutex to 0 is not allowed.
 274 *
 275 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 276 * checks that will enforce the restrictions and will also do
 277 * deadlock debugging)
 278 *
 279 * This function is similar to (but not equivalent to) down().
 280 */
 281void __sched mutex_lock(struct mutex *lock)
 282{
 283	might_sleep();
 284
 285	if (!__mutex_trylock_fast(lock))
 286		__mutex_lock_slowpath(lock);
 
 
 
 287}
 
 288EXPORT_SYMBOL(mutex_lock);
 289#endif
 290
 291#include "ww_mutex.h"
 
 
 
 
 
 
 
 
 
 
 292
 293#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 294
 295/*
 296 * Trylock variant that returns the owning task on failure.
 
 
 
 
 297 */
 298static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 
 
 299{
 300	return __mutex_trylock_common(lock, false);
 301}
 302
 303static inline
 304bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 305			    struct mutex_waiter *waiter)
 306{
 307	struct ww_mutex *ww;
 308
 309	ww = container_of(lock, struct ww_mutex, base);
 310
 311	/*
 312	 * If ww->ctx is set the contents are undefined, only
 313	 * by acquiring wait_lock there is a guarantee that
 314	 * they are not invalid when reading.
 315	 *
 316	 * As such, when deadlock detection needs to be
 317	 * performed the optimistic spinning cannot be done.
 318	 *
 319	 * Check this in every inner iteration because we may
 320	 * be racing against another thread's ww_mutex_lock.
 321	 */
 322	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
 323		return false;
 324
 325	/*
 326	 * If we aren't on the wait list yet, cancel the spin
 327	 * if there are waiters. We want  to avoid stealing the
 328	 * lock from a waiter with an earlier stamp, since the
 329	 * other thread may already own a lock that we also
 330	 * need.
 331	 */
 332	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
 333		return false;
 334
 335	/*
 336	 * Similarly, stop spinning if we are no longer the
 337	 * first waiter.
 338	 */
 339	if (waiter && !__mutex_waiter_is_first(lock, waiter))
 340		return false;
 341
 342	return true;
 
 
 343}
 344
 345/*
 346 * Look out! "owner" is an entirely speculative pointer access and not
 347 * reliable.
 348 *
 349 * "noinline" so that this function shows up on perf profiles.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 350 */
 351static noinline
 352bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
 353			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
 354{
 355	bool ret = true;
 356
 357	lockdep_assert_preemption_disabled();
 358
 359	while (__mutex_owner(lock) == owner) {
 360		/*
 361		 * Ensure we emit the owner->on_cpu, dereference _after_
 362		 * checking lock->owner still matches owner. And we already
 363		 * disabled preemption which is equal to the RCU read-side
 364		 * crital section in optimistic spinning code. Thus the
 365		 * task_strcut structure won't go away during the spinning
 366		 * period
 367		 */
 368		barrier();
 369
 370		/*
 371		 * Use vcpu_is_preempted to detect lock holder preemption issue.
 372		 */
 373		if (!owner_on_cpu(owner) || need_resched()) {
 374			ret = false;
 375			break;
 376		}
 377
 378		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
 379			ret = false;
 380			break;
 381		}
 382
 383		cpu_relax();
 384	}
 
 385
 386	return ret;
 387}
 388
 389/*
 390 * Initial check for entering the mutex spinning loop
 391 */
 392static inline int mutex_can_spin_on_owner(struct mutex *lock)
 393{
 394	struct task_struct *owner;
 395	int retval = 1;
 396
 397	lockdep_assert_preemption_disabled();
 398
 399	if (need_resched())
 400		return 0;
 401
 402	/*
 403	 * We already disabled preemption which is equal to the RCU read-side
 404	 * crital section in optimistic spinning code. Thus the task_strcut
 405	 * structure won't go away during the spinning period.
 406	 */
 407	owner = __mutex_owner(lock);
 408	if (owner)
 409		retval = owner_on_cpu(owner);
 410
 411	/*
 412	 * If lock->owner is not set, the mutex has been released. Return true
 413	 * such that we'll trylock in the spin path, which is a faster option
 414	 * than the blocking slow path.
 415	 */
 416	return retval;
 417}
 418
 419/*
 
 
 
 
 
 
 
 
 
 420 * Optimistic spinning.
 421 *
 422 * We try to spin for acquisition when we find that the lock owner
 423 * is currently running on a (different) CPU and while we don't
 424 * need to reschedule. The rationale is that if the lock owner is
 425 * running, it is likely to release the lock soon.
 426 *
 
 
 
 
 
 
 
 427 * The mutex spinners are queued up using MCS lock so that only one
 428 * spinner can compete for the mutex. However, if mutex spinning isn't
 429 * going to happen, there is no point in going through the lock/unlock
 430 * overhead.
 431 *
 432 * Returns true when the lock was taken, otherwise false, indicating
 433 * that we need to jump to the slowpath and sleep.
 434 *
 435 * The waiter flag is set to true if the spinner is a waiter in the wait
 436 * queue. The waiter-spinner will spin on the lock directly and concurrently
 437 * with the spinner at the head of the OSQ, if present, until the owner is
 438 * changed to itself.
 439 */
 440static __always_inline bool
 441mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 442		      struct mutex_waiter *waiter)
 443{
 444	if (!waiter) {
 445		/*
 446		 * The purpose of the mutex_can_spin_on_owner() function is
 447		 * to eliminate the overhead of osq_lock() and osq_unlock()
 448		 * in case spinning isn't possible. As a waiter-spinner
 449		 * is not going to take OSQ lock anyway, there is no need
 450		 * to call mutex_can_spin_on_owner().
 451		 */
 452		if (!mutex_can_spin_on_owner(lock))
 453			goto fail;
 454
 455		/*
 456		 * In order to avoid a stampede of mutex spinners trying to
 457		 * acquire the mutex all at once, the spinners need to take a
 458		 * MCS (queued) lock first before spinning on the owner field.
 459		 */
 460		if (!osq_lock(&lock->osq))
 461			goto fail;
 462	}
 463
 464	for (;;) {
 
 
 
 
 
 
 
 
 465		struct task_struct *owner;
 466
 467		/* Try to acquire the mutex... */
 468		owner = __mutex_trylock_or_owner(lock);
 469		if (!owner)
 470			break;
 
 
 
 
 
 
 
 
 
 
 
 471
 472		/*
 473		 * There's an owner, wait for it to either
 474		 * release the lock or go to sleep.
 475		 */
 476		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
 477			goto fail_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478
 479		/*
 480		 * The cpu_relax() call is a compiler barrier which forces
 481		 * everything in this loop to be re-loaded. We don't need
 482		 * memory barriers as we'll eventually observe the right
 483		 * values at the cost of a few extra spins.
 484		 */
 485		cpu_relax();
 486	}
 487
 488	if (!waiter)
 489		osq_unlock(&lock->osq);
 490
 491	return true;
 492
 493
 494fail_unlock:
 495	if (!waiter)
 496		osq_unlock(&lock->osq);
 497
 498fail:
 499	/*
 500	 * If we fell out of the spin path because of need_resched(),
 501	 * reschedule now, before we try-lock the mutex. This avoids getting
 502	 * scheduled out right after we obtained the mutex.
 503	 */
 504	if (need_resched()) {
 505		/*
 506		 * We _should_ have TASK_RUNNING here, but just in case
 507		 * we do not, make it so, otherwise we might get stuck.
 508		 */
 509		__set_current_state(TASK_RUNNING);
 510		schedule_preempt_disabled();
 511	}
 512
 513	return false;
 514}
 515#else
 516static __always_inline bool
 517mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 518		      struct mutex_waiter *waiter)
 519{
 520	return false;
 521}
 522#endif
 523
 524static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
 
 525
 526/**
 527 * mutex_unlock - release the mutex
 528 * @lock: the mutex to be released
 529 *
 530 * Unlock a mutex that has been locked by this task previously.
 531 *
 532 * This function must not be used in interrupt context. Unlocking
 533 * of a not locked mutex is not allowed.
 534 *
 535 * This function is similar to (but not equivalent to) up().
 536 */
 537void __sched mutex_unlock(struct mutex *lock)
 538{
 539#ifndef CONFIG_DEBUG_LOCK_ALLOC
 540	if (__mutex_unlock_fast(lock))
 541		return;
 
 
 
 
 
 
 
 
 542#endif
 543	__mutex_unlock_slowpath(lock, _RET_IP_);
 544}
 
 545EXPORT_SYMBOL(mutex_unlock);
 546
 547/**
 548 * ww_mutex_unlock - release the w/w mutex
 549 * @lock: the mutex to be released
 550 *
 551 * Unlock a mutex that has been locked by this task previously with any of the
 552 * ww_mutex_lock* functions (with or without an acquire context). It is
 553 * forbidden to release the locks after releasing the acquire context.
 554 *
 555 * This function must not be used in interrupt context. Unlocking
 556 * of a unlocked mutex is not allowed.
 557 */
 558void __sched ww_mutex_unlock(struct ww_mutex *lock)
 559{
 560	__ww_mutex_unlock(lock);
 561	mutex_unlock(&lock->base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562}
 563EXPORT_SYMBOL(ww_mutex_unlock);
 564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 565/*
 566 * Lock a mutex (possibly interruptible), slowpath:
 567 */
 568static __always_inline int __sched
 569__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
 570		    struct lockdep_map *nest_lock, unsigned long ip,
 571		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 572{
 
 573	struct mutex_waiter waiter;
 574	struct ww_mutex *ww;
 575	int ret;
 576
 577	if (!use_ww_ctx)
 578		ww_ctx = NULL;
 579
 580	might_sleep();
 581
 582	MUTEX_WARN_ON(lock->magic != lock);
 583
 584	ww = container_of(lock, struct ww_mutex, base);
 585	if (ww_ctx) {
 586		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
 587			return -EALREADY;
 588
 589		/*
 590		 * Reset the wounded flag after a kill. No other process can
 591		 * race and wound us here since they can't have a valid owner
 592		 * pointer if we don't have any locks held.
 593		 */
 594		if (ww_ctx->acquired == 0)
 595			ww_ctx->wounded = 0;
 596
 597#ifdef CONFIG_DEBUG_LOCK_ALLOC
 598		nest_lock = &ww_ctx->dep_map;
 599#endif
 600	}
 601
 602	preempt_disable();
 603	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 604
 605	trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
 606	if (__mutex_trylock(lock) ||
 607	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
 608		/* got the lock, yay! */
 609		lock_acquired(&lock->dep_map, ip);
 610		if (ww_ctx)
 611			ww_mutex_set_context_fastpath(ww, ww_ctx);
 612		trace_contention_end(lock, 0);
 613		preempt_enable();
 614		return 0;
 615	}
 616
 617	raw_spin_lock(&lock->wait_lock);
 
 618	/*
 619	 * After waiting to acquire the wait_lock, try again.
 
 620	 */
 621	if (__mutex_trylock(lock)) {
 622		if (ww_ctx)
 623			__ww_mutex_check_waiters(lock, ww_ctx);
 624
 625		goto skip_wait;
 626	}
 627
 628	debug_mutex_lock_common(lock, &waiter);
 629	waiter.task = current;
 630	if (use_ww_ctx)
 631		waiter.ww_ctx = ww_ctx;
 632
 633	lock_contended(&lock->dep_map, ip);
 
 
 634
 635	if (!use_ww_ctx) {
 636		/* add waiting tasks to the end of the waitqueue (FIFO): */
 637		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
 638	} else {
 639		/*
 640		 * Add in stamp order, waking up waiters that must kill
 641		 * themselves.
 642		 */
 643		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
 644		if (ret)
 645			goto err_early_kill;
 646	}
 647
 648	set_current_state(state);
 649	trace_contention_begin(lock, LCB_F_MUTEX);
 650	for (;;) {
 651		bool first;
 652
 653		/*
 654		 * Once we hold wait_lock, we're serialized against
 655		 * mutex_unlock() handing the lock off to us, do a trylock
 656		 * before testing the error conditions to make sure we pick up
 657		 * the handoff.
 
 
 
 
 658		 */
 659		if (__mutex_trylock(lock))
 660			goto acquired;
 
 661
 662		/*
 663		 * Check for signals and kill conditions while holding
 664		 * wait_lock. This ensures the lock cancellation is ordered
 665		 * against mutex_unlock() and wake-ups do not go missing.
 666		 */
 667		if (signal_pending_state(state, current)) {
 668			ret = -EINTR;
 669			goto err;
 670		}
 671
 672		if (ww_ctx) {
 673			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
 674			if (ret)
 675				goto err;
 676		}
 677
 678		raw_spin_unlock(&lock->wait_lock);
 679		schedule_preempt_disabled();
 680
 681		first = __mutex_waiter_is_first(lock, &waiter);
 682
 683		set_current_state(state);
 684		/*
 685		 * Here we order against unlock; we must either see it change
 686		 * state back to RUNNING and fall through the next schedule(),
 687		 * or we must see its unlock and acquire.
 688		 */
 689		if (__mutex_trylock_or_handoff(lock, first))
 690			break;
 691
 692		if (first) {
 693			trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
 694			if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
 695				break;
 696			trace_contention_begin(lock, LCB_F_MUTEX);
 697		}
 698
 699		raw_spin_lock(&lock->wait_lock);
 700	}
 701	raw_spin_lock(&lock->wait_lock);
 702acquired:
 703	__set_current_state(TASK_RUNNING);
 704
 705	if (ww_ctx) {
 706		/*
 707		 * Wound-Wait; we stole the lock (!first_waiter), check the
 708		 * waiters as anyone might want to wound us.
 709		 */
 710		if (!ww_ctx->is_wait_die &&
 711		    !__mutex_waiter_is_first(lock, &waiter))
 712			__ww_mutex_check_waiters(lock, ww_ctx);
 713	}
 
 714
 715	__mutex_remove_waiter(lock, &waiter);
 716
 
 
 717	debug_mutex_free_waiter(&waiter);
 718
 719skip_wait:
 720	/* got the lock - cleanup and rejoice! */
 721	lock_acquired(&lock->dep_map, ip);
 722	trace_contention_end(lock, 0);
 723
 724	if (ww_ctx)
 725		ww_mutex_lock_acquired(ww, ww_ctx);
 
 
 726
 727	raw_spin_unlock(&lock->wait_lock);
 728	preempt_enable();
 729	return 0;
 730
 731err:
 732	__set_current_state(TASK_RUNNING);
 733	__mutex_remove_waiter(lock, &waiter);
 734err_early_kill:
 735	trace_contention_end(lock, ret);
 736	raw_spin_unlock(&lock->wait_lock);
 737	debug_mutex_free_waiter(&waiter);
 738	mutex_release(&lock->dep_map, ip);
 739	preempt_enable();
 740	return ret;
 741}
 742
 743static int __sched
 744__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
 745	     struct lockdep_map *nest_lock, unsigned long ip)
 746{
 747	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
 748}
 749
 750static int __sched
 751__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
 752		unsigned long ip, struct ww_acquire_ctx *ww_ctx)
 753{
 754	return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
 755}
 756
 757/**
 758 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
 759 * @ww: mutex to lock
 760 * @ww_ctx: optional w/w acquire context
 761 *
 762 * Trylocks a mutex with the optional acquire context; no deadlock detection is
 763 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
 764 *
 765 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
 766 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
 767 *
 768 * A mutex acquired with this function must be released with ww_mutex_unlock.
 769 */
 770int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
 771{
 772	if (!ww_ctx)
 773		return mutex_trylock(&ww->base);
 774
 775	MUTEX_WARN_ON(ww->base.magic != &ww->base);
 776
 777	/*
 778	 * Reset the wounded flag after a kill. No other process can
 779	 * race and wound us here, since they can't have a valid owner
 780	 * pointer if we don't have any locks held.
 781	 */
 782	if (ww_ctx->acquired == 0)
 783		ww_ctx->wounded = 0;
 784
 785	if (__mutex_trylock(&ww->base)) {
 786		ww_mutex_set_context_fastpath(ww, ww_ctx);
 787		mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
 788		return 1;
 789	}
 790
 791	return 0;
 792}
 793EXPORT_SYMBOL(ww_mutex_trylock);
 794
 795#ifdef CONFIG_DEBUG_LOCK_ALLOC
 796void __sched
 797mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 798{
 799	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
 
 
 800}
 801
 802EXPORT_SYMBOL_GPL(mutex_lock_nested);
 803
 804void __sched
 805_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
 806{
 807	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
 
 
 808}
 
 809EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
 810
 811int __sched
 812mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
 813{
 814	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
 
 
 815}
 816EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
 817
 818int __sched
 819mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 820{
 821	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
 822}
 823EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
 824
 825void __sched
 826mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
 827{
 828	int token;
 829
 830	might_sleep();
 831
 832	token = io_schedule_prepare();
 833	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
 834			    subclass, NULL, _RET_IP_, NULL, 0);
 835	io_schedule_finish(token);
 836}
 837EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
 
 838
 839static inline int
 840ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 841{
 842#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
 843	unsigned tmp;
 844
 845	if (ctx->deadlock_inject_countdown-- == 0) {
 846		tmp = ctx->deadlock_inject_interval;
 847		if (tmp > UINT_MAX/4)
 848			tmp = UINT_MAX;
 849		else
 850			tmp = tmp*2 + tmp + tmp/2;
 851
 852		ctx->deadlock_inject_interval = tmp;
 853		ctx->deadlock_inject_countdown = tmp;
 854		ctx->contending_lock = lock;
 855
 856		ww_mutex_unlock(lock);
 857
 858		return -EDEADLK;
 859	}
 860#endif
 861
 862	return 0;
 863}
 864
 865int __sched
 866ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 867{
 868	int ret;
 869
 870	might_sleep();
 871	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
 872			       0, _RET_IP_, ctx);
 873	if (!ret && ctx && ctx->acquired > 1)
 874		return ww_mutex_deadlock_injection(lock, ctx);
 875
 876	return ret;
 877}
 878EXPORT_SYMBOL_GPL(ww_mutex_lock);
 879
 880int __sched
 881ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 882{
 883	int ret;
 884
 885	might_sleep();
 886	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
 887			      0, _RET_IP_, ctx);
 888
 889	if (!ret && ctx && ctx->acquired > 1)
 890		return ww_mutex_deadlock_injection(lock, ctx);
 891
 892	return ret;
 893}
 894EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
 895
 896#endif
 897
 898/*
 899 * Release the lock, slowpath:
 900 */
 901static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
 
 902{
 903	struct task_struct *next = NULL;
 904	DEFINE_WAKE_Q(wake_q);
 905	unsigned long owner;
 906
 907	mutex_release(&lock->dep_map, ip);
 908
 909	/*
 910	 * Release the lock before (potentially) taking the spinlock such that
 911	 * other contenders can get on with things ASAP.
 
 
 
 912	 *
 913	 * Except when HANDOFF, in that case we must not clear the owner field,
 914	 * but instead set it to the top waiter.
 
 
 915	 */
 916	owner = atomic_long_read(&lock->owner);
 917	for (;;) {
 918		MUTEX_WARN_ON(__owner_task(owner) != current);
 919		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 920
 921		if (owner & MUTEX_FLAG_HANDOFF)
 922			break;
 923
 924		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
 925			if (owner & MUTEX_FLAG_WAITERS)
 926				break;
 927
 928			return;
 929		}
 930	}
 931
 932	raw_spin_lock(&lock->wait_lock);
 933	debug_mutex_unlock(lock);
 
 934	if (!list_empty(&lock->wait_list)) {
 935		/* get the first entry from the wait-list: */
 936		struct mutex_waiter *waiter =
 937			list_first_entry(&lock->wait_list,
 938					 struct mutex_waiter, list);
 939
 940		next = waiter->task;
 941
 942		debug_mutex_wake_waiter(lock, waiter);
 943		wake_q_add(&wake_q, next);
 944	}
 945
 946	if (owner & MUTEX_FLAG_HANDOFF)
 947		__mutex_handoff(lock, next);
 
 948
 949	raw_spin_unlock(&lock->wait_lock);
 
 
 
 
 
 
 950
 951	wake_up_q(&wake_q);
 952}
 953
 954#ifndef CONFIG_DEBUG_LOCK_ALLOC
 955/*
 956 * Here come the less common (and hence less performance-critical) APIs:
 957 * mutex_lock_interruptible() and mutex_trylock().
 958 */
 959static noinline int __sched
 960__mutex_lock_killable_slowpath(struct mutex *lock);
 961
 962static noinline int __sched
 963__mutex_lock_interruptible_slowpath(struct mutex *lock);
 964
 965/**
 966 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
 967 * @lock: The mutex to be acquired.
 
 
 
 
 
 968 *
 969 * Lock the mutex like mutex_lock().  If a signal is delivered while the
 970 * process is sleeping, this function will return without acquiring the
 971 * mutex.
 972 *
 973 * Context: Process context.
 974 * Return: 0 if the lock was successfully acquired or %-EINTR if a
 975 * signal arrived.
 976 */
 977int __sched mutex_lock_interruptible(struct mutex *lock)
 978{
 979	might_sleep();
 980
 981	if (__mutex_trylock_fast(lock))
 
 
 
 982		return 0;
 983
 984	return __mutex_lock_interruptible_slowpath(lock);
 985}
 986
 987EXPORT_SYMBOL(mutex_lock_interruptible);
 988
 989/**
 990 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
 991 * @lock: The mutex to be acquired.
 992 *
 993 * Lock the mutex like mutex_lock().  If a signal which will be fatal to
 994 * the current process is delivered while the process is sleeping, this
 995 * function will return without acquiring the mutex.
 996 *
 997 * Context: Process context.
 998 * Return: 0 if the lock was successfully acquired or %-EINTR if a
 999 * fatal signal arrived.
1000 */
1001int __sched mutex_lock_killable(struct mutex *lock)
1002{
1003	might_sleep();
1004
1005	if (__mutex_trylock_fast(lock))
 
 
 
1006		return 0;
1007
1008	return __mutex_lock_killable_slowpath(lock);
1009}
1010EXPORT_SYMBOL(mutex_lock_killable);
1011
1012/**
1013 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1014 * @lock: The mutex to be acquired.
1015 *
1016 * Lock the mutex like mutex_lock().  While the task is waiting for this
1017 * mutex, it will be accounted as being in the IO wait state by the
1018 * scheduler.
1019 *
1020 * Context: Process context.
1021 */
1022void __sched mutex_lock_io(struct mutex *lock)
1023{
1024	int token;
1025
1026	token = io_schedule_prepare();
1027	mutex_lock(lock);
1028	io_schedule_finish(token);
1029}
1030EXPORT_SYMBOL_GPL(mutex_lock_io);
1031
1032static noinline void __sched
1033__mutex_lock_slowpath(struct mutex *lock)
1034{
1035	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1036}
1037
1038static noinline int __sched
1039__mutex_lock_killable_slowpath(struct mutex *lock)
1040{
1041	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
 
1042}
1043
1044static noinline int __sched
1045__mutex_lock_interruptible_slowpath(struct mutex *lock)
1046{
1047	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
 
1048}
1049
1050static noinline int __sched
1051__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1052{
1053	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1054			       _RET_IP_, ctx);
1055}
1056
1057static noinline int __sched
1058__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1059					    struct ww_acquire_ctx *ctx)
1060{
1061	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1062			       _RET_IP_, ctx);
1063}
1064
1065#endif
1066
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067/**
1068 * mutex_trylock - try to acquire the mutex, without waiting
1069 * @lock: the mutex to be acquired
1070 *
1071 * Try to acquire the mutex atomically. Returns 1 if the mutex
1072 * has been acquired successfully, and 0 on contention.
1073 *
1074 * NOTE: this function follows the spin_trylock() convention, so
1075 * it is negated from the down_trylock() return values! Be careful
1076 * about this when converting semaphore users to mutexes.
1077 *
1078 * This function must not be used in interrupt context. The
1079 * mutex must be released by the same task that acquired it.
1080 */
1081int __sched mutex_trylock(struct mutex *lock)
1082{
1083	bool locked;
1084
1085	MUTEX_WARN_ON(lock->magic != lock);
 
 
1086
1087	locked = __mutex_trylock(lock);
1088	if (locked)
1089		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1090
1091	return locked;
1092}
1093EXPORT_SYMBOL(mutex_trylock);
1094
1095#ifndef CONFIG_DEBUG_LOCK_ALLOC
1096int __sched
1097ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1098{
 
 
1099	might_sleep();
1100
1101	if (__mutex_trylock_fast(&lock->base)) {
1102		if (ctx)
1103			ww_mutex_set_context_fastpath(lock, ctx);
1104		return 0;
1105	}
1106
1107	return __ww_mutex_lock_slowpath(lock, ctx);
 
 
 
 
 
1108}
1109EXPORT_SYMBOL(ww_mutex_lock);
1110
1111int __sched
1112ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1113{
 
 
1114	might_sleep();
1115
1116	if (__mutex_trylock_fast(&lock->base)) {
1117		if (ctx)
1118			ww_mutex_set_context_fastpath(lock, ctx);
1119		return 0;
1120	}
1121
1122	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
 
 
 
 
 
1123}
1124EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1125
1126#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1127#endif /* !CONFIG_PREEMPT_RT */
1128
1129/**
1130 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1131 * @cnt: the atomic which we are to dec
1132 * @lock: the mutex to return holding if we dec to 0
1133 *
1134 * return true and hold lock if we dec to 0, return false otherwise
1135 */
1136int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1137{
1138	/* dec if we can't possibly hit 0 */
1139	if (atomic_add_unless(cnt, -1, 1))
1140		return 0;
1141	/* we might hit 0, so take the lock */
1142	mutex_lock(lock);
1143	if (!atomic_dec_and_test(cnt)) {
1144		/* when we actually did the dec, we didn't hit 0 */
1145		mutex_unlock(lock);
1146		return 0;
1147	}
1148	/* we hit 0, and we hold the lock */
1149	return 1;
1150}
1151EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
v4.6
 
  1/*
  2 * kernel/locking/mutex.c
  3 *
  4 * Mutexes: blocking mutual exclusion locks
  5 *
  6 * Started by Ingo Molnar:
  7 *
  8 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  9 *
 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
 11 * David Howells for suggestions and improvements.
 12 *
 13 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
 14 *    from the -rt tree, where it was originally implemented for rtmutexes
 15 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
 16 *    and Sven Dietrich.
 17 *
 18 * Also see Documentation/locking/mutex-design.txt.
 19 */
 20#include <linux/mutex.h>
 21#include <linux/ww_mutex.h>
 22#include <linux/sched.h>
 23#include <linux/sched/rt.h>
 
 
 24#include <linux/export.h>
 25#include <linux/spinlock.h>
 26#include <linux/interrupt.h>
 27#include <linux/debug_locks.h>
 28#include <linux/osq_lock.h>
 29
 30/*
 31 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
 32 * which forces all calls into the slowpath:
 33 */
 
 
 34#ifdef CONFIG_DEBUG_MUTEXES
 35# include "mutex-debug.h"
 36# include <asm-generic/mutex-null.h>
 37/*
 38 * Must be 0 for the debug case so we do not do the unlock outside of the
 39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
 40 * case.
 41 */
 42# undef __mutex_slowpath_needs_to_unlock
 43# define  __mutex_slowpath_needs_to_unlock()	0
 44#else
 45# include "mutex.h"
 46# include <asm/mutex.h>
 47#endif
 48
 49void
 50__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 51{
 52	atomic_set(&lock->count, 1);
 53	spin_lock_init(&lock->wait_lock);
 54	INIT_LIST_HEAD(&lock->wait_list);
 55	mutex_clear_owner(lock);
 56#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 57	osq_lock_init(&lock->osq);
 58#endif
 59
 60	debug_mutex_init(lock, name, key);
 61}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62
 63EXPORT_SYMBOL(__mutex_init);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65#ifndef CONFIG_DEBUG_LOCK_ALLOC
 66/*
 67 * We split the mutex lock/unlock logic into separate fastpath and
 68 * slowpath functions, to reduce the register pressure on the fastpath.
 69 * We also put the fastpath first in the kernel image, to make sure the
 70 * branch is predicted by the CPU as default-untaken.
 71 */
 72__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
 73
 74/**
 75 * mutex_lock - acquire the mutex
 76 * @lock: the mutex to be acquired
 77 *
 78 * Lock the mutex exclusively for this task. If the mutex is not
 79 * available right now, it will sleep until it can get it.
 80 *
 81 * The mutex must later on be released by the same task that
 82 * acquired it. Recursive locking is not allowed. The task
 83 * may not exit without first unlocking the mutex. Also, kernel
 84 * memory where the mutex resides must not be freed with
 85 * the mutex still locked. The mutex must first be initialized
 86 * (or statically defined) before it can be locked. memset()-ing
 87 * the mutex to 0 is not allowed.
 88 *
 89 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 90 *   checks that will enforce the restrictions and will also do
 91 *   deadlock debugging. )
 92 *
 93 * This function is similar to (but not equivalent to) down().
 94 */
 95void __sched mutex_lock(struct mutex *lock)
 96{
 97	might_sleep();
 98	/*
 99	 * The locking fastpath is the 1->0 transition from
100	 * 'unlocked' into 'locked' state.
101	 */
102	__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103	mutex_set_owner(lock);
104}
105
106EXPORT_SYMBOL(mutex_lock);
107#endif
108
109static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
110						   struct ww_acquire_ctx *ww_ctx)
111{
112#ifdef CONFIG_DEBUG_MUTEXES
113	/*
114	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
115	 * but released with a normal mutex_unlock in this call.
116	 *
117	 * This should never happen, always use ww_mutex_unlock.
118	 */
119	DEBUG_LOCKS_WARN_ON(ww->ctx);
120
121	/*
122	 * Not quite done after calling ww_acquire_done() ?
123	 */
124	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
125
126	if (ww_ctx->contending_lock) {
127		/*
128		 * After -EDEADLK you tried to
129		 * acquire a different ww_mutex? Bad!
130		 */
131		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
132
133		/*
134		 * You called ww_mutex_lock after receiving -EDEADLK,
135		 * but 'forgot' to unlock everything else first?
136		 */
137		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
138		ww_ctx->contending_lock = NULL;
139	}
140
141	/*
142	 * Naughty, using a different class will lead to undefined behavior!
143	 */
144	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
145#endif
146	ww_ctx->acquired++;
147}
148
149/*
150 * After acquiring lock with fastpath or when we lost out in contested
151 * slowpath, set ctx and wake up any waiters so they can recheck.
152 *
153 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
154 * as the fastpath and opportunistic spinning are disabled in that case.
155 */
156static __always_inline void
157ww_mutex_set_context_fastpath(struct ww_mutex *lock,
158			       struct ww_acquire_ctx *ctx)
159{
160	unsigned long flags;
161	struct mutex_waiter *cur;
162
163	ww_mutex_lock_acquired(lock, ctx);
 
 
 
 
164
165	lock->ctx = ctx;
166
167	/*
168	 * The lock->ctx update should be visible on all cores before
169	 * the atomic read is done, otherwise contended waiters might be
170	 * missed. The contended waiters will either see ww_ctx == NULL
171	 * and keep spinning, or it will acquire wait_lock, add itself
172	 * to waiter list and sleep.
 
 
 
 
173	 */
174	smp_mb(); /* ^^^ */
 
175
176	/*
177	 * Check if lock is contended, if not there is nobody to wake up
 
 
 
 
178	 */
179	if (likely(atomic_read(&lock->base.count) == 0))
180		return;
181
182	/*
183	 * Uh oh, we raced in fastpath, wake up everyone in this case,
184	 * so they can see the new lock->ctx.
185	 */
186	spin_lock_mutex(&lock->base.wait_lock, flags);
187	list_for_each_entry(cur, &lock->base.wait_list, list) {
188		debug_mutex_wake_waiter(&lock->base, cur);
189		wake_up_process(cur->task);
190	}
191	spin_unlock_mutex(&lock->base.wait_lock, flags);
192}
193
194/*
195 * After acquiring lock in the slowpath set ctx and wake up any
196 * waiters so they can recheck.
197 *
198 * Callers must hold the mutex wait_lock.
199 */
200static __always_inline void
201ww_mutex_set_context_slowpath(struct ww_mutex *lock,
202			      struct ww_acquire_ctx *ctx)
203{
204	struct mutex_waiter *cur;
205
206	ww_mutex_lock_acquired(lock, ctx);
207	lock->ctx = ctx;
208
209	/*
210	 * Give any possible sleeping processes the chance to wake up,
211	 * so they can recheck if they have to back off.
212	 */
213	list_for_each_entry(cur, &lock->base.wait_list, list) {
214		debug_mutex_wake_waiter(&lock->base, cur);
215		wake_up_process(cur->task);
216	}
217}
218
219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
220/*
221 * Look out! "owner" is an entirely speculative pointer
222 * access and not reliable.
223 */
224static noinline
225bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
 
226{
227	bool ret = true;
228
229	rcu_read_lock();
230	while (lock->owner == owner) {
 
231		/*
232		 * Ensure we emit the owner->on_cpu, dereference _after_
233		 * checking lock->owner still matches owner. If that fails,
234		 * owner might point to freed memory. If it still matches,
235		 * the rcu_read_lock() ensures the memory stays valid.
 
 
236		 */
237		barrier();
238
239		if (!owner->on_cpu || need_resched()) {
 
 
 
 
 
 
 
 
240			ret = false;
241			break;
242		}
243
244		cpu_relax_lowlatency();
245	}
246	rcu_read_unlock();
247
248	return ret;
249}
250
251/*
252 * Initial check for entering the mutex spinning loop
253 */
254static inline int mutex_can_spin_on_owner(struct mutex *lock)
255{
256	struct task_struct *owner;
257	int retval = 1;
258
 
 
259	if (need_resched())
260		return 0;
261
262	rcu_read_lock();
263	owner = READ_ONCE(lock->owner);
 
 
 
 
264	if (owner)
265		retval = owner->on_cpu;
266	rcu_read_unlock();
267	/*
268	 * if lock->owner is not set, the mutex owner may have just acquired
269	 * it and not set the owner yet or the mutex has been released.
 
270	 */
271	return retval;
272}
273
274/*
275 * Atomically try to take the lock when it is available
276 */
277static inline bool mutex_try_to_acquire(struct mutex *lock)
278{
279	return !mutex_is_locked(lock) &&
280		(atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
281}
282
283/*
284 * Optimistic spinning.
285 *
286 * We try to spin for acquisition when we find that the lock owner
287 * is currently running on a (different) CPU and while we don't
288 * need to reschedule. The rationale is that if the lock owner is
289 * running, it is likely to release the lock soon.
290 *
291 * Since this needs the lock owner, and this mutex implementation
292 * doesn't track the owner atomically in the lock field, we need to
293 * track it non-atomically.
294 *
295 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
296 * to serialize everything.
297 *
298 * The mutex spinners are queued up using MCS lock so that only one
299 * spinner can compete for the mutex. However, if mutex spinning isn't
300 * going to happen, there is no point in going through the lock/unlock
301 * overhead.
302 *
303 * Returns true when the lock was taken, otherwise false, indicating
304 * that we need to jump to the slowpath and sleep.
 
 
 
 
 
305 */
306static bool mutex_optimistic_spin(struct mutex *lock,
307				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 
308{
309	struct task_struct *task = current;
 
 
 
 
 
 
 
 
 
310
311	if (!mutex_can_spin_on_owner(lock))
312		goto done;
 
 
 
 
 
 
313
314	/*
315	 * In order to avoid a stampede of mutex spinners trying to
316	 * acquire the mutex all at once, the spinners need to take a
317	 * MCS (queued) lock first before spinning on the owner field.
318	 */
319	if (!osq_lock(&lock->osq))
320		goto done;
321
322	while (true) {
323		struct task_struct *owner;
324
325		if (use_ww_ctx && ww_ctx->acquired > 0) {
326			struct ww_mutex *ww;
327
328			ww = container_of(lock, struct ww_mutex, base);
329			/*
330			 * If ww->ctx is set the contents are undefined, only
331			 * by acquiring wait_lock there is a guarantee that
332			 * they are not invalid when reading.
333			 *
334			 * As such, when deadlock detection needs to be
335			 * performed the optimistic spinning cannot be done.
336			 */
337			if (READ_ONCE(ww->ctx))
338				break;
339		}
340
341		/*
342		 * If there's an owner, wait for it to either
343		 * release the lock or go to sleep.
344		 */
345		owner = READ_ONCE(lock->owner);
346		if (owner && !mutex_spin_on_owner(lock, owner))
347			break;
348
349		/* Try to acquire the mutex if it is unlocked. */
350		if (mutex_try_to_acquire(lock)) {
351			lock_acquired(&lock->dep_map, ip);
352
353			if (use_ww_ctx) {
354				struct ww_mutex *ww;
355				ww = container_of(lock, struct ww_mutex, base);
356
357				ww_mutex_set_context_fastpath(ww, ww_ctx);
358			}
359
360			mutex_set_owner(lock);
361			osq_unlock(&lock->osq);
362			return true;
363		}
364
365		/*
366		 * When there's no owner, we might have preempted between the
367		 * owner acquiring the lock and setting the owner field. If
368		 * we're an RT task that will live-lock because we won't let
369		 * the owner complete.
370		 */
371		if (!owner && (need_resched() || rt_task(task)))
372			break;
373
374		/*
375		 * The cpu_relax() call is a compiler barrier which forces
376		 * everything in this loop to be re-loaded. We don't need
377		 * memory barriers as we'll eventually observe the right
378		 * values at the cost of a few extra spins.
379		 */
380		cpu_relax_lowlatency();
381	}
382
383	osq_unlock(&lock->osq);
384done:
 
 
 
 
 
 
 
 
 
385	/*
386	 * If we fell out of the spin path because of need_resched(),
387	 * reschedule now, before we try-lock the mutex. This avoids getting
388	 * scheduled out right after we obtained the mutex.
389	 */
390	if (need_resched()) {
391		/*
392		 * We _should_ have TASK_RUNNING here, but just in case
393		 * we do not, make it so, otherwise we might get stuck.
394		 */
395		__set_current_state(TASK_RUNNING);
396		schedule_preempt_disabled();
397	}
398
399	return false;
400}
401#else
402static bool mutex_optimistic_spin(struct mutex *lock,
403				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 
404{
405	return false;
406}
407#endif
408
409__visible __used noinline
410void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
411
412/**
413 * mutex_unlock - release the mutex
414 * @lock: the mutex to be released
415 *
416 * Unlock a mutex that has been locked by this task previously.
417 *
418 * This function must not be used in interrupt context. Unlocking
419 * of a not locked mutex is not allowed.
420 *
421 * This function is similar to (but not equivalent to) up().
422 */
423void __sched mutex_unlock(struct mutex *lock)
424{
425	/*
426	 * The unlocking fastpath is the 0->1 transition from 'locked'
427	 * into 'unlocked' state:
428	 */
429#ifndef CONFIG_DEBUG_MUTEXES
430	/*
431	 * When debugging is enabled we must not clear the owner before time,
432	 * the slow path will always be taken, and that clears the owner field
433	 * after verifying that it was indeed current.
434	 */
435	mutex_clear_owner(lock);
436#endif
437	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
438}
439
440EXPORT_SYMBOL(mutex_unlock);
441
442/**
443 * ww_mutex_unlock - release the w/w mutex
444 * @lock: the mutex to be released
445 *
446 * Unlock a mutex that has been locked by this task previously with any of the
447 * ww_mutex_lock* functions (with or without an acquire context). It is
448 * forbidden to release the locks after releasing the acquire context.
449 *
450 * This function must not be used in interrupt context. Unlocking
451 * of a unlocked mutex is not allowed.
452 */
453void __sched ww_mutex_unlock(struct ww_mutex *lock)
454{
455	/*
456	 * The unlocking fastpath is the 0->1 transition from 'locked'
457	 * into 'unlocked' state:
458	 */
459	if (lock->ctx) {
460#ifdef CONFIG_DEBUG_MUTEXES
461		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
462#endif
463		if (lock->ctx->acquired > 0)
464			lock->ctx->acquired--;
465		lock->ctx = NULL;
466	}
467
468#ifndef CONFIG_DEBUG_MUTEXES
469	/*
470	 * When debugging is enabled we must not clear the owner before time,
471	 * the slow path will always be taken, and that clears the owner field
472	 * after verifying that it was indeed current.
473	 */
474	mutex_clear_owner(&lock->base);
475#endif
476	__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
477}
478EXPORT_SYMBOL(ww_mutex_unlock);
479
480static inline int __sched
481__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
482{
483	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
484	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
485
486	if (!hold_ctx)
487		return 0;
488
489	if (unlikely(ctx == hold_ctx))
490		return -EALREADY;
491
492	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
493	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
494#ifdef CONFIG_DEBUG_MUTEXES
495		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
496		ctx->contending_lock = ww;
497#endif
498		return -EDEADLK;
499	}
500
501	return 0;
502}
503
504/*
505 * Lock a mutex (possibly interruptible), slowpath:
506 */
507static __always_inline int __sched
508__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
509		    struct lockdep_map *nest_lock, unsigned long ip,
510		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
511{
512	struct task_struct *task = current;
513	struct mutex_waiter waiter;
514	unsigned long flags;
515	int ret;
516
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
517	preempt_disable();
518	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
519
520	if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
 
 
521		/* got the lock, yay! */
 
 
 
 
522		preempt_enable();
523		return 0;
524	}
525
526	spin_lock_mutex(&lock->wait_lock, flags);
527
528	/*
529	 * Once more, try to acquire the lock. Only try-lock the mutex if
530	 * it is unlocked to reduce unnecessary xchg() operations.
531	 */
532	if (!mutex_is_locked(lock) &&
533	    (atomic_xchg_acquire(&lock->count, 0) == 1))
 
 
534		goto skip_wait;
 
535
536	debug_mutex_lock_common(lock, &waiter);
537	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
 
 
538
539	/* add waiting tasks to the end of the waitqueue (FIFO): */
540	list_add_tail(&waiter.list, &lock->wait_list);
541	waiter.task = task;
542
543	lock_contended(&lock->dep_map, ip);
 
 
 
 
 
 
 
 
 
 
 
544
 
 
545	for (;;) {
 
 
546		/*
547		 * Lets try to take the lock again - this is needed even if
548		 * we get here for the first time (shortly after failing to
549		 * acquire the lock), to make sure that we get a wakeup once
550		 * it's unlocked. Later on, if we sleep, this is the
551		 * operation that gives us the lock. We xchg it to -1, so
552		 * that when we release the lock, we properly wake up the
553		 * other waiters. We only attempt the xchg if the count is
554		 * non-negative in order to avoid unnecessary xchg operations:
555		 */
556		if (atomic_read(&lock->count) >= 0 &&
557		    (atomic_xchg_acquire(&lock->count, -1) == 1))
558			break;
559
560		/*
561		 * got a signal? (This code gets eliminated in the
562		 * TASK_UNINTERRUPTIBLE case.)
 
563		 */
564		if (unlikely(signal_pending_state(state, task))) {
565			ret = -EINTR;
566			goto err;
567		}
568
569		if (use_ww_ctx && ww_ctx->acquired > 0) {
570			ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
571			if (ret)
572				goto err;
573		}
574
575		__set_task_state(task, state);
 
 
 
576
577		/* didn't get the lock, go to sleep: */
578		spin_unlock_mutex(&lock->wait_lock, flags);
579		schedule_preempt_disabled();
580		spin_lock_mutex(&lock->wait_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
581	}
582	__set_task_state(task, TASK_RUNNING);
583
584	mutex_remove_waiter(lock, &waiter, current_thread_info());
585	/* set it to 0 if there are no waiters left: */
586	if (likely(list_empty(&lock->wait_list)))
587		atomic_set(&lock->count, 0);
588	debug_mutex_free_waiter(&waiter);
589
590skip_wait:
591	/* got the lock - cleanup and rejoice! */
592	lock_acquired(&lock->dep_map, ip);
593	mutex_set_owner(lock);
594
595	if (use_ww_ctx) {
596		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
597		ww_mutex_set_context_slowpath(ww, ww_ctx);
598	}
599
600	spin_unlock_mutex(&lock->wait_lock, flags);
601	preempt_enable();
602	return 0;
603
604err:
605	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
606	spin_unlock_mutex(&lock->wait_lock, flags);
 
 
 
607	debug_mutex_free_waiter(&waiter);
608	mutex_release(&lock->dep_map, 1, ip);
609	preempt_enable();
610	return ret;
611}
612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613#ifdef CONFIG_DEBUG_LOCK_ALLOC
614void __sched
615mutex_lock_nested(struct mutex *lock, unsigned int subclass)
616{
617	might_sleep();
618	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
619			    subclass, NULL, _RET_IP_, NULL, 0);
620}
621
622EXPORT_SYMBOL_GPL(mutex_lock_nested);
623
624void __sched
625_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
626{
627	might_sleep();
628	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
629			    0, nest, _RET_IP_, NULL, 0);
630}
631
632EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
633
634int __sched
635mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
636{
637	might_sleep();
638	return __mutex_lock_common(lock, TASK_KILLABLE,
639				   subclass, NULL, _RET_IP_, NULL, 0);
640}
641EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
642
643int __sched
644mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
645{
 
 
 
 
 
 
 
 
 
646	might_sleep();
647	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
648				   subclass, NULL, _RET_IP_, NULL, 0);
 
 
 
649}
650
651EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
652
653static inline int
654ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
655{
656#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
657	unsigned tmp;
658
659	if (ctx->deadlock_inject_countdown-- == 0) {
660		tmp = ctx->deadlock_inject_interval;
661		if (tmp > UINT_MAX/4)
662			tmp = UINT_MAX;
663		else
664			tmp = tmp*2 + tmp + tmp/2;
665
666		ctx->deadlock_inject_interval = tmp;
667		ctx->deadlock_inject_countdown = tmp;
668		ctx->contending_lock = lock;
669
670		ww_mutex_unlock(lock);
671
672		return -EDEADLK;
673	}
674#endif
675
676	return 0;
677}
678
679int __sched
680__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
681{
682	int ret;
683
684	might_sleep();
685	ret =  __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
686				   0, &ctx->dep_map, _RET_IP_, ctx, 1);
687	if (!ret && ctx->acquired > 1)
688		return ww_mutex_deadlock_injection(lock, ctx);
689
690	return ret;
691}
692EXPORT_SYMBOL_GPL(__ww_mutex_lock);
693
694int __sched
695__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
696{
697	int ret;
698
699	might_sleep();
700	ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
701				  0, &ctx->dep_map, _RET_IP_, ctx, 1);
702
703	if (!ret && ctx->acquired > 1)
704		return ww_mutex_deadlock_injection(lock, ctx);
705
706	return ret;
707}
708EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
709
710#endif
711
712/*
713 * Release the lock, slowpath:
714 */
715static inline void
716__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
717{
718	unsigned long flags;
719	WAKE_Q(wake_q);
 
 
 
720
721	/*
722	 * As a performance measurement, release the lock before doing other
723	 * wakeup related duties to follow. This allows other tasks to acquire
724	 * the lock sooner, while still handling cleanups in past unlock calls.
725	 * This can be done as we do not enforce strict equivalence between the
726	 * mutex counter and wait_list.
727	 *
728	 *
729	 * Some architectures leave the lock unlocked in the fastpath failure
730	 * case, others need to leave it locked. In the later case we have to
731	 * unlock it here - as the lock counter is currently 0 or negative.
732	 */
733	if (__mutex_slowpath_needs_to_unlock())
734		atomic_set(&lock->count, 1);
 
 
 
 
 
 
 
 
 
735
736	spin_lock_mutex(&lock->wait_lock, flags);
737	mutex_release(&lock->dep_map, nested, _RET_IP_);
 
 
 
738	debug_mutex_unlock(lock);
739
740	if (!list_empty(&lock->wait_list)) {
741		/* get the first entry from the wait-list: */
742		struct mutex_waiter *waiter =
743				list_entry(lock->wait_list.next,
744					   struct mutex_waiter, list);
 
 
745
746		debug_mutex_wake_waiter(lock, waiter);
747		wake_q_add(&wake_q, waiter->task);
748	}
749
750	spin_unlock_mutex(&lock->wait_lock, flags);
751	wake_up_q(&wake_q);
752}
753
754/*
755 * Release the lock, slowpath:
756 */
757__visible void
758__mutex_unlock_slowpath(atomic_t *lock_count)
759{
760	struct mutex *lock = container_of(lock_count, struct mutex, count);
761
762	__mutex_unlock_common_slowpath(lock, 1);
763}
764
765#ifndef CONFIG_DEBUG_LOCK_ALLOC
766/*
767 * Here come the less common (and hence less performance-critical) APIs:
768 * mutex_lock_interruptible() and mutex_trylock().
769 */
770static noinline int __sched
771__mutex_lock_killable_slowpath(struct mutex *lock);
772
773static noinline int __sched
774__mutex_lock_interruptible_slowpath(struct mutex *lock);
775
776/**
777 * mutex_lock_interruptible - acquire the mutex, interruptible
778 * @lock: the mutex to be acquired
779 *
780 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
781 * been acquired or sleep until the mutex becomes available. If a
782 * signal arrives while waiting for the lock then this function
783 * returns -EINTR.
784 *
785 * This function is similar to (but not equivalent to) down_interruptible().
 
 
 
 
 
 
786 */
787int __sched mutex_lock_interruptible(struct mutex *lock)
788{
789	int ret;
790
791	might_sleep();
792	ret =  __mutex_fastpath_lock_retval(&lock->count);
793	if (likely(!ret)) {
794		mutex_set_owner(lock);
795		return 0;
796	} else
797		return __mutex_lock_interruptible_slowpath(lock);
798}
799
800EXPORT_SYMBOL(mutex_lock_interruptible);
801
 
 
 
 
 
 
 
 
 
 
 
 
802int __sched mutex_lock_killable(struct mutex *lock)
803{
804	int ret;
805
806	might_sleep();
807	ret = __mutex_fastpath_lock_retval(&lock->count);
808	if (likely(!ret)) {
809		mutex_set_owner(lock);
810		return 0;
811	} else
812		return __mutex_lock_killable_slowpath(lock);
813}
814EXPORT_SYMBOL(mutex_lock_killable);
815
816__visible void __sched
817__mutex_lock_slowpath(atomic_t *lock_count)
 
 
 
 
 
 
 
 
 
818{
819	struct mutex *lock = container_of(lock_count, struct mutex, count);
820
821	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
822			    NULL, _RET_IP_, NULL, 0);
 
 
 
 
 
 
 
 
823}
824
825static noinline int __sched
826__mutex_lock_killable_slowpath(struct mutex *lock)
827{
828	return __mutex_lock_common(lock, TASK_KILLABLE, 0,
829				   NULL, _RET_IP_, NULL, 0);
830}
831
832static noinline int __sched
833__mutex_lock_interruptible_slowpath(struct mutex *lock)
834{
835	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
836				   NULL, _RET_IP_, NULL, 0);
837}
838
839static noinline int __sched
840__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
841{
842	return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
843				   NULL, _RET_IP_, ctx, 1);
844}
845
846static noinline int __sched
847__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
848					    struct ww_acquire_ctx *ctx)
849{
850	return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
851				   NULL, _RET_IP_, ctx, 1);
852}
853
854#endif
855
856/*
857 * Spinlock based trylock, we take the spinlock and check whether we
858 * can get the lock:
859 */
860static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
861{
862	struct mutex *lock = container_of(lock_count, struct mutex, count);
863	unsigned long flags;
864	int prev;
865
866	/* No need to trylock if the mutex is locked. */
867	if (mutex_is_locked(lock))
868		return 0;
869
870	spin_lock_mutex(&lock->wait_lock, flags);
871
872	prev = atomic_xchg_acquire(&lock->count, -1);
873	if (likely(prev == 1)) {
874		mutex_set_owner(lock);
875		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
876	}
877
878	/* Set it back to 0 if there are no waiters: */
879	if (likely(list_empty(&lock->wait_list)))
880		atomic_set(&lock->count, 0);
881
882	spin_unlock_mutex(&lock->wait_lock, flags);
883
884	return prev == 1;
885}
886
887/**
888 * mutex_trylock - try to acquire the mutex, without waiting
889 * @lock: the mutex to be acquired
890 *
891 * Try to acquire the mutex atomically. Returns 1 if the mutex
892 * has been acquired successfully, and 0 on contention.
893 *
894 * NOTE: this function follows the spin_trylock() convention, so
895 * it is negated from the down_trylock() return values! Be careful
896 * about this when converting semaphore users to mutexes.
897 *
898 * This function must not be used in interrupt context. The
899 * mutex must be released by the same task that acquired it.
900 */
901int __sched mutex_trylock(struct mutex *lock)
902{
903	int ret;
904
905	ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
906	if (ret)
907		mutex_set_owner(lock);
908
909	return ret;
 
 
 
 
910}
911EXPORT_SYMBOL(mutex_trylock);
912
913#ifndef CONFIG_DEBUG_LOCK_ALLOC
914int __sched
915__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
916{
917	int ret;
918
919	might_sleep();
920
921	ret = __mutex_fastpath_lock_retval(&lock->base.count);
 
 
 
 
922
923	if (likely(!ret)) {
924		ww_mutex_set_context_fastpath(lock, ctx);
925		mutex_set_owner(&lock->base);
926	} else
927		ret = __ww_mutex_lock_slowpath(lock, ctx);
928	return ret;
929}
930EXPORT_SYMBOL(__ww_mutex_lock);
931
932int __sched
933__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
934{
935	int ret;
936
937	might_sleep();
938
939	ret = __mutex_fastpath_lock_retval(&lock->base.count);
 
 
 
 
940
941	if (likely(!ret)) {
942		ww_mutex_set_context_fastpath(lock, ctx);
943		mutex_set_owner(&lock->base);
944	} else
945		ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
946	return ret;
947}
948EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
949
950#endif
 
951
952/**
953 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
954 * @cnt: the atomic which we are to dec
955 * @lock: the mutex to return holding if we dec to 0
956 *
957 * return true and hold lock if we dec to 0, return false otherwise
958 */
959int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
960{
961	/* dec if we can't possibly hit 0 */
962	if (atomic_add_unless(cnt, -1, 1))
963		return 0;
964	/* we might hit 0, so take the lock */
965	mutex_lock(lock);
966	if (!atomic_dec_and_test(cnt)) {
967		/* when we actually did the dec, we didn't hit 0 */
968		mutex_unlock(lock);
969		return 0;
970	}
971	/* we hit 0, and we hold the lock */
972	return 1;
973}
974EXPORT_SYMBOL(atomic_dec_and_mutex_lock);