Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kernel/locking/mutex.c
   4 *
   5 * Mutexes: blocking mutual exclusion locks
   6 *
   7 * Started by Ingo Molnar:
   8 *
   9 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  10 *
  11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  12 * David Howells for suggestions and improvements.
  13 *
  14 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  15 *    from the -rt tree, where it was originally implemented for rtmutexes
  16 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  17 *    and Sven Dietrich.
  18 *
  19 * Also see Documentation/locking/mutex-design.rst.
  20 */
  21#include <linux/mutex.h>
  22#include <linux/ww_mutex.h>
  23#include <linux/sched/signal.h>
  24#include <linux/sched/rt.h>
  25#include <linux/sched/wake_q.h>
  26#include <linux/sched/debug.h>
  27#include <linux/export.h>
  28#include <linux/spinlock.h>
  29#include <linux/interrupt.h>
  30#include <linux/debug_locks.h>
  31#include <linux/osq_lock.h>
  32
  33#define CREATE_TRACE_POINTS
  34#include <trace/events/lock.h>
  35
  36#ifndef CONFIG_PREEMPT_RT
  37#include "mutex.h"
  38
  39#ifdef CONFIG_DEBUG_MUTEXES
  40# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
  41#else
  42# define MUTEX_WARN_ON(cond)
  43#endif
  44
  45void
  46__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  47{
  48	atomic_long_set(&lock->owner, 0);
  49	raw_spin_lock_init(&lock->wait_lock);
  50	INIT_LIST_HEAD(&lock->wait_list);
  51#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  52	osq_lock_init(&lock->osq);
  53#endif
  54
  55	debug_mutex_init(lock, name, key);
  56}
  57EXPORT_SYMBOL(__mutex_init);
  58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  59static inline struct task_struct *__owner_task(unsigned long owner)
  60{
  61	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
  62}
  63
  64bool mutex_is_locked(struct mutex *lock)
  65{
  66	return __mutex_owner(lock) != NULL;
  67}
  68EXPORT_SYMBOL(mutex_is_locked);
  69
  70static inline unsigned long __owner_flags(unsigned long owner)
  71{
  72	return owner & MUTEX_FLAGS;
  73}
  74
  75/*
  76 * Returns: __mutex_owner(lock) on failure or NULL on success.
  77 */
  78static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
  79{
  80	unsigned long owner, curr = (unsigned long)current;
  81
  82	owner = atomic_long_read(&lock->owner);
  83	for (;;) { /* must loop, can race against a flag */
  84		unsigned long flags = __owner_flags(owner);
  85		unsigned long task = owner & ~MUTEX_FLAGS;
  86
  87		if (task) {
  88			if (flags & MUTEX_FLAG_PICKUP) {
  89				if (task != curr)
  90					break;
  91				flags &= ~MUTEX_FLAG_PICKUP;
  92			} else if (handoff) {
  93				if (flags & MUTEX_FLAG_HANDOFF)
  94					break;
  95				flags |= MUTEX_FLAG_HANDOFF;
  96			} else {
  97				break;
  98			}
  99		} else {
 100			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
 101			task = curr;
 102		}
 103
 104		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
 105			if (task == curr)
 106				return NULL;
 107			break;
 108		}
 109	}
 110
 111	return __owner_task(owner);
 112}
 113
 114/*
 115 * Trylock or set HANDOFF
 116 */
 117static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
 118{
 119	return !__mutex_trylock_common(lock, handoff);
 120}
 121
 122/*
 123 * Actual trylock that will work on any unlocked state.
 124 */
 125static inline bool __mutex_trylock(struct mutex *lock)
 126{
 127	return !__mutex_trylock_common(lock, false);
 128}
 129
 130#ifndef CONFIG_DEBUG_LOCK_ALLOC
 131/*
 132 * Lockdep annotations are contained to the slow paths for simplicity.
 133 * There is nothing that would stop spreading the lockdep annotations outwards
 134 * except more code.
 135 */
 136
 137/*
 138 * Optimistic trylock that only works in the uncontended case. Make sure to
 139 * follow with a __mutex_trylock() before failing.
 140 */
 141static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
 142{
 143	unsigned long curr = (unsigned long)current;
 144	unsigned long zero = 0UL;
 145
 146	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
 147		return true;
 148
 149	return false;
 150}
 151
 152static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
 153{
 154	unsigned long curr = (unsigned long)current;
 155
 156	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
 157}
 158#endif
 159
 160static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
 161{
 162	atomic_long_or(flag, &lock->owner);
 163}
 164
 165static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
 166{
 167	atomic_long_andnot(flag, &lock->owner);
 168}
 169
 170static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
 171{
 172	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
 173}
 174
 175/*
 176 * Add @waiter to a given location in the lock wait_list and set the
 177 * FLAG_WAITERS flag if it's the first waiter.
 178 */
 179static void
 180__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 181		   struct list_head *list)
 182{
 183	debug_mutex_add_waiter(lock, waiter, current);
 184
 185	list_add_tail(&waiter->list, list);
 186	if (__mutex_waiter_is_first(lock, waiter))
 187		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 188}
 189
 190static void
 191__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
 192{
 193	list_del(&waiter->list);
 194	if (likely(list_empty(&lock->wait_list)))
 195		__mutex_clear_flag(lock, MUTEX_FLAGS);
 196
 197	debug_mutex_remove_waiter(lock, waiter, current);
 198}
 199
 200/*
 201 * Give up ownership to a specific task, when @task = NULL, this is equivalent
 202 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
 203 * WAITERS. Provides RELEASE semantics like a regular unlock, the
 204 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
 205 */
 206static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 207{
 208	unsigned long owner = atomic_long_read(&lock->owner);
 209
 210	for (;;) {
 211		unsigned long new;
 212
 213		MUTEX_WARN_ON(__owner_task(owner) != current);
 214		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 215
 216		new = (owner & MUTEX_FLAG_WAITERS);
 217		new |= (unsigned long)task;
 218		if (task)
 219			new |= MUTEX_FLAG_PICKUP;
 220
 221		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
 222			break;
 223	}
 224}
 225
 226#ifndef CONFIG_DEBUG_LOCK_ALLOC
 227/*
 228 * We split the mutex lock/unlock logic into separate fastpath and
 229 * slowpath functions, to reduce the register pressure on the fastpath.
 230 * We also put the fastpath first in the kernel image, to make sure the
 231 * branch is predicted by the CPU as default-untaken.
 232 */
 233static void __sched __mutex_lock_slowpath(struct mutex *lock);
 234
 235/**
 236 * mutex_lock - acquire the mutex
 237 * @lock: the mutex to be acquired
 238 *
 239 * Lock the mutex exclusively for this task. If the mutex is not
 240 * available right now, it will sleep until it can get it.
 241 *
 242 * The mutex must later on be released by the same task that
 243 * acquired it. Recursive locking is not allowed. The task
 244 * may not exit without first unlocking the mutex. Also, kernel
 245 * memory where the mutex resides must not be freed with
 246 * the mutex still locked. The mutex must first be initialized
 247 * (or statically defined) before it can be locked. memset()-ing
 248 * the mutex to 0 is not allowed.
 249 *
 250 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 251 * checks that will enforce the restrictions and will also do
 252 * deadlock debugging)
 253 *
 254 * This function is similar to (but not equivalent to) down().
 255 */
 256void __sched mutex_lock(struct mutex *lock)
 257{
 258	might_sleep();
 259
 260	if (!__mutex_trylock_fast(lock))
 261		__mutex_lock_slowpath(lock);
 262}
 263EXPORT_SYMBOL(mutex_lock);
 264#endif
 265
 266#include "ww_mutex.h"
 267
 268#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 269
 270/*
 271 * Trylock variant that returns the owning task on failure.
 272 */
 273static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 274{
 275	return __mutex_trylock_common(lock, false);
 276}
 277
 278static inline
 279bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 280			    struct mutex_waiter *waiter)
 281{
 282	struct ww_mutex *ww;
 283
 284	ww = container_of(lock, struct ww_mutex, base);
 285
 286	/*
 287	 * If ww->ctx is set the contents are undefined, only
 288	 * by acquiring wait_lock there is a guarantee that
 289	 * they are not invalid when reading.
 290	 *
 291	 * As such, when deadlock detection needs to be
 292	 * performed the optimistic spinning cannot be done.
 293	 *
 294	 * Check this in every inner iteration because we may
 295	 * be racing against another thread's ww_mutex_lock.
 296	 */
 297	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
 298		return false;
 299
 300	/*
 301	 * If we aren't on the wait list yet, cancel the spin
 302	 * if there are waiters. We want  to avoid stealing the
 303	 * lock from a waiter with an earlier stamp, since the
 304	 * other thread may already own a lock that we also
 305	 * need.
 306	 */
 307	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
 308		return false;
 309
 310	/*
 311	 * Similarly, stop spinning if we are no longer the
 312	 * first waiter.
 313	 */
 314	if (waiter && !__mutex_waiter_is_first(lock, waiter))
 315		return false;
 316
 317	return true;
 318}
 319
 320/*
 321 * Look out! "owner" is an entirely speculative pointer access and not
 322 * reliable.
 323 *
 324 * "noinline" so that this function shows up on perf profiles.
 325 */
 326static noinline
 327bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
 328			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
 329{
 330	bool ret = true;
 331
 332	lockdep_assert_preemption_disabled();
 333
 334	while (__mutex_owner(lock) == owner) {
 335		/*
 336		 * Ensure we emit the owner->on_cpu, dereference _after_
 337		 * checking lock->owner still matches owner. And we already
 338		 * disabled preemption which is equal to the RCU read-side
 339		 * crital section in optimistic spinning code. Thus the
 340		 * task_strcut structure won't go away during the spinning
 341		 * period
 342		 */
 343		barrier();
 344
 345		/*
 346		 * Use vcpu_is_preempted to detect lock holder preemption issue.
 347		 */
 348		if (!owner_on_cpu(owner) || need_resched()) {
 349			ret = false;
 350			break;
 351		}
 352
 353		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
 354			ret = false;
 355			break;
 356		}
 357
 358		cpu_relax();
 359	}
 360
 361	return ret;
 362}
 363
 364/*
 365 * Initial check for entering the mutex spinning loop
 366 */
 367static inline int mutex_can_spin_on_owner(struct mutex *lock)
 368{
 369	struct task_struct *owner;
 370	int retval = 1;
 371
 372	lockdep_assert_preemption_disabled();
 373
 374	if (need_resched())
 375		return 0;
 376
 377	/*
 378	 * We already disabled preemption which is equal to the RCU read-side
 379	 * crital section in optimistic spinning code. Thus the task_strcut
 380	 * structure won't go away during the spinning period.
 381	 */
 382	owner = __mutex_owner(lock);
 383	if (owner)
 384		retval = owner_on_cpu(owner);
 385
 386	/*
 387	 * If lock->owner is not set, the mutex has been released. Return true
 388	 * such that we'll trylock in the spin path, which is a faster option
 389	 * than the blocking slow path.
 390	 */
 391	return retval;
 392}
 393
 394/*
 395 * Optimistic spinning.
 396 *
 397 * We try to spin for acquisition when we find that the lock owner
 398 * is currently running on a (different) CPU and while we don't
 399 * need to reschedule. The rationale is that if the lock owner is
 400 * running, it is likely to release the lock soon.
 401 *
 402 * The mutex spinners are queued up using MCS lock so that only one
 403 * spinner can compete for the mutex. However, if mutex spinning isn't
 404 * going to happen, there is no point in going through the lock/unlock
 405 * overhead.
 406 *
 407 * Returns true when the lock was taken, otherwise false, indicating
 408 * that we need to jump to the slowpath and sleep.
 409 *
 410 * The waiter flag is set to true if the spinner is a waiter in the wait
 411 * queue. The waiter-spinner will spin on the lock directly and concurrently
 412 * with the spinner at the head of the OSQ, if present, until the owner is
 413 * changed to itself.
 414 */
 415static __always_inline bool
 416mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 417		      struct mutex_waiter *waiter)
 418{
 419	if (!waiter) {
 420		/*
 421		 * The purpose of the mutex_can_spin_on_owner() function is
 422		 * to eliminate the overhead of osq_lock() and osq_unlock()
 423		 * in case spinning isn't possible. As a waiter-spinner
 424		 * is not going to take OSQ lock anyway, there is no need
 425		 * to call mutex_can_spin_on_owner().
 426		 */
 427		if (!mutex_can_spin_on_owner(lock))
 428			goto fail;
 429
 430		/*
 431		 * In order to avoid a stampede of mutex spinners trying to
 432		 * acquire the mutex all at once, the spinners need to take a
 433		 * MCS (queued) lock first before spinning on the owner field.
 434		 */
 435		if (!osq_lock(&lock->osq))
 436			goto fail;
 437	}
 438
 439	for (;;) {
 440		struct task_struct *owner;
 441
 442		/* Try to acquire the mutex... */
 443		owner = __mutex_trylock_or_owner(lock);
 444		if (!owner)
 445			break;
 446
 447		/*
 448		 * There's an owner, wait for it to either
 449		 * release the lock or go to sleep.
 450		 */
 451		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
 452			goto fail_unlock;
 453
 454		/*
 455		 * The cpu_relax() call is a compiler barrier which forces
 456		 * everything in this loop to be re-loaded. We don't need
 457		 * memory barriers as we'll eventually observe the right
 458		 * values at the cost of a few extra spins.
 459		 */
 460		cpu_relax();
 461	}
 462
 463	if (!waiter)
 464		osq_unlock(&lock->osq);
 465
 466	return true;
 467
 468
 469fail_unlock:
 470	if (!waiter)
 471		osq_unlock(&lock->osq);
 472
 473fail:
 474	/*
 475	 * If we fell out of the spin path because of need_resched(),
 476	 * reschedule now, before we try-lock the mutex. This avoids getting
 477	 * scheduled out right after we obtained the mutex.
 478	 */
 479	if (need_resched()) {
 480		/*
 481		 * We _should_ have TASK_RUNNING here, but just in case
 482		 * we do not, make it so, otherwise we might get stuck.
 483		 */
 484		__set_current_state(TASK_RUNNING);
 485		schedule_preempt_disabled();
 486	}
 487
 488	return false;
 489}
 490#else
 491static __always_inline bool
 492mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 493		      struct mutex_waiter *waiter)
 494{
 495	return false;
 496}
 497#endif
 498
 499static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
 500
 501/**
 502 * mutex_unlock - release the mutex
 503 * @lock: the mutex to be released
 504 *
 505 * Unlock a mutex that has been locked by this task previously.
 506 *
 507 * This function must not be used in interrupt context. Unlocking
 508 * of a not locked mutex is not allowed.
 509 *
 510 * The caller must ensure that the mutex stays alive until this function has
 511 * returned - mutex_unlock() can NOT directly be used to release an object such
 512 * that another concurrent task can free it.
 513 * Mutexes are different from spinlocks & refcounts in this aspect.
 514 *
 515 * This function is similar to (but not equivalent to) up().
 516 */
 517void __sched mutex_unlock(struct mutex *lock)
 518{
 519#ifndef CONFIG_DEBUG_LOCK_ALLOC
 520	if (__mutex_unlock_fast(lock))
 521		return;
 522#endif
 523	__mutex_unlock_slowpath(lock, _RET_IP_);
 524}
 525EXPORT_SYMBOL(mutex_unlock);
 526
 527/**
 528 * ww_mutex_unlock - release the w/w mutex
 529 * @lock: the mutex to be released
 530 *
 531 * Unlock a mutex that has been locked by this task previously with any of the
 532 * ww_mutex_lock* functions (with or without an acquire context). It is
 533 * forbidden to release the locks after releasing the acquire context.
 534 *
 535 * This function must not be used in interrupt context. Unlocking
 536 * of a unlocked mutex is not allowed.
 537 */
 538void __sched ww_mutex_unlock(struct ww_mutex *lock)
 539{
 540	__ww_mutex_unlock(lock);
 541	mutex_unlock(&lock->base);
 542}
 543EXPORT_SYMBOL(ww_mutex_unlock);
 544
 545/*
 546 * Lock a mutex (possibly interruptible), slowpath:
 547 */
 548static __always_inline int __sched
 549__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
 550		    struct lockdep_map *nest_lock, unsigned long ip,
 551		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 552{
 553	DEFINE_WAKE_Q(wake_q);
 554	struct mutex_waiter waiter;
 555	struct ww_mutex *ww;
 556	unsigned long flags;
 557	int ret;
 558
 559	if (!use_ww_ctx)
 560		ww_ctx = NULL;
 561
 562	might_sleep();
 563
 564	MUTEX_WARN_ON(lock->magic != lock);
 565
 566	ww = container_of(lock, struct ww_mutex, base);
 567	if (ww_ctx) {
 568		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
 569			return -EALREADY;
 570
 571		/*
 572		 * Reset the wounded flag after a kill. No other process can
 573		 * race and wound us here since they can't have a valid owner
 574		 * pointer if we don't have any locks held.
 575		 */
 576		if (ww_ctx->acquired == 0)
 577			ww_ctx->wounded = 0;
 578
 579#ifdef CONFIG_DEBUG_LOCK_ALLOC
 580		nest_lock = &ww_ctx->dep_map;
 581#endif
 582	}
 583
 584	preempt_disable();
 585	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 586
 587	trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
 588	if (__mutex_trylock(lock) ||
 589	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
 590		/* got the lock, yay! */
 591		lock_acquired(&lock->dep_map, ip);
 592		if (ww_ctx)
 593			ww_mutex_set_context_fastpath(ww, ww_ctx);
 594		trace_contention_end(lock, 0);
 595		preempt_enable();
 596		return 0;
 597	}
 598
 599	raw_spin_lock_irqsave(&lock->wait_lock, flags);
 600	/*
 601	 * After waiting to acquire the wait_lock, try again.
 602	 */
 603	if (__mutex_trylock(lock)) {
 604		if (ww_ctx)
 605			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
 606
 607		goto skip_wait;
 608	}
 609
 610	debug_mutex_lock_common(lock, &waiter);
 611	waiter.task = current;
 612	if (use_ww_ctx)
 613		waiter.ww_ctx = ww_ctx;
 614
 615	lock_contended(&lock->dep_map, ip);
 616
 617	if (!use_ww_ctx) {
 618		/* add waiting tasks to the end of the waitqueue (FIFO): */
 619		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
 620	} else {
 621		/*
 622		 * Add in stamp order, waking up waiters that must kill
 623		 * themselves.
 624		 */
 625		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx, &wake_q);
 626		if (ret)
 627			goto err_early_kill;
 628	}
 629
 630	set_current_state(state);
 631	trace_contention_begin(lock, LCB_F_MUTEX);
 632	for (;;) {
 633		bool first;
 634
 635		/*
 636		 * Once we hold wait_lock, we're serialized against
 637		 * mutex_unlock() handing the lock off to us, do a trylock
 638		 * before testing the error conditions to make sure we pick up
 639		 * the handoff.
 640		 */
 641		if (__mutex_trylock(lock))
 642			goto acquired;
 643
 644		/*
 645		 * Check for signals and kill conditions while holding
 646		 * wait_lock. This ensures the lock cancellation is ordered
 647		 * against mutex_unlock() and wake-ups do not go missing.
 648		 */
 649		if (signal_pending_state(state, current)) {
 650			ret = -EINTR;
 651			goto err;
 652		}
 653
 654		if (ww_ctx) {
 655			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
 656			if (ret)
 657				goto err;
 658		}
 659
 660		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 661		/* Make sure we do wakeups before calling schedule */
 662		wake_up_q(&wake_q);
 663		wake_q_init(&wake_q);
 664
 665		schedule_preempt_disabled();
 666
 667		first = __mutex_waiter_is_first(lock, &waiter);
 668
 669		set_current_state(state);
 670		/*
 671		 * Here we order against unlock; we must either see it change
 672		 * state back to RUNNING and fall through the next schedule(),
 673		 * or we must see its unlock and acquire.
 674		 */
 675		if (__mutex_trylock_or_handoff(lock, first))
 676			break;
 677
 678		if (first) {
 679			trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
 680			if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
 681				break;
 682			trace_contention_begin(lock, LCB_F_MUTEX);
 683		}
 684
 685		raw_spin_lock_irqsave(&lock->wait_lock, flags);
 686	}
 687	raw_spin_lock_irqsave(&lock->wait_lock, flags);
 688acquired:
 689	__set_current_state(TASK_RUNNING);
 690
 691	if (ww_ctx) {
 692		/*
 693		 * Wound-Wait; we stole the lock (!first_waiter), check the
 694		 * waiters as anyone might want to wound us.
 695		 */
 696		if (!ww_ctx->is_wait_die &&
 697		    !__mutex_waiter_is_first(lock, &waiter))
 698			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
 699	}
 700
 701	__mutex_remove_waiter(lock, &waiter);
 702
 703	debug_mutex_free_waiter(&waiter);
 704
 705skip_wait:
 706	/* got the lock - cleanup and rejoice! */
 707	lock_acquired(&lock->dep_map, ip);
 708	trace_contention_end(lock, 0);
 709
 710	if (ww_ctx)
 711		ww_mutex_lock_acquired(ww, ww_ctx);
 712
 713	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 714	wake_up_q(&wake_q);
 715	preempt_enable();
 716	return 0;
 717
 718err:
 719	__set_current_state(TASK_RUNNING);
 720	__mutex_remove_waiter(lock, &waiter);
 721err_early_kill:
 722	trace_contention_end(lock, ret);
 723	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 724	debug_mutex_free_waiter(&waiter);
 725	mutex_release(&lock->dep_map, ip);
 726	wake_up_q(&wake_q);
 727	preempt_enable();
 728	return ret;
 729}
 730
 731static int __sched
 732__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
 733	     struct lockdep_map *nest_lock, unsigned long ip)
 734{
 735	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
 736}
 737
 738static int __sched
 739__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
 740		unsigned long ip, struct ww_acquire_ctx *ww_ctx)
 741{
 742	return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
 743}
 744
 745/**
 746 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
 747 * @ww: mutex to lock
 748 * @ww_ctx: optional w/w acquire context
 749 *
 750 * Trylocks a mutex with the optional acquire context; no deadlock detection is
 751 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
 752 *
 753 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
 754 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
 755 *
 756 * A mutex acquired with this function must be released with ww_mutex_unlock.
 757 */
 758int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
 759{
 760	if (!ww_ctx)
 761		return mutex_trylock(&ww->base);
 762
 763	MUTEX_WARN_ON(ww->base.magic != &ww->base);
 764
 765	/*
 766	 * Reset the wounded flag after a kill. No other process can
 767	 * race and wound us here, since they can't have a valid owner
 768	 * pointer if we don't have any locks held.
 769	 */
 770	if (ww_ctx->acquired == 0)
 771		ww_ctx->wounded = 0;
 772
 773	if (__mutex_trylock(&ww->base)) {
 774		ww_mutex_set_context_fastpath(ww, ww_ctx);
 775		mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
 776		return 1;
 777	}
 778
 779	return 0;
 780}
 781EXPORT_SYMBOL(ww_mutex_trylock);
 782
 783#ifdef CONFIG_DEBUG_LOCK_ALLOC
 784void __sched
 785mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 786{
 787	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
 788}
 789
 790EXPORT_SYMBOL_GPL(mutex_lock_nested);
 791
 792void __sched
 793_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
 794{
 795	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
 796}
 797EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
 798
 799int __sched
 800mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
 801{
 802	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
 803}
 804EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
 805
 806int __sched
 807mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 808{
 809	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
 810}
 811EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
 812
 813void __sched
 814mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
 815{
 816	int token;
 817
 818	might_sleep();
 819
 820	token = io_schedule_prepare();
 821	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
 822			    subclass, NULL, _RET_IP_, NULL, 0);
 823	io_schedule_finish(token);
 824}
 825EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
 826
 827static inline int
 828ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 829{
 830#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
 831	unsigned tmp;
 832
 833	if (ctx->deadlock_inject_countdown-- == 0) {
 834		tmp = ctx->deadlock_inject_interval;
 835		if (tmp > UINT_MAX/4)
 836			tmp = UINT_MAX;
 837		else
 838			tmp = tmp*2 + tmp + tmp/2;
 839
 840		ctx->deadlock_inject_interval = tmp;
 841		ctx->deadlock_inject_countdown = tmp;
 842		ctx->contending_lock = lock;
 843
 844		ww_mutex_unlock(lock);
 845
 846		return -EDEADLK;
 847	}
 848#endif
 849
 850	return 0;
 851}
 852
 853int __sched
 854ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 855{
 856	int ret;
 857
 858	might_sleep();
 859	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
 860			       0, _RET_IP_, ctx);
 861	if (!ret && ctx && ctx->acquired > 1)
 862		return ww_mutex_deadlock_injection(lock, ctx);
 863
 864	return ret;
 865}
 866EXPORT_SYMBOL_GPL(ww_mutex_lock);
 867
 868int __sched
 869ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 870{
 871	int ret;
 872
 873	might_sleep();
 874	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
 875			      0, _RET_IP_, ctx);
 876
 877	if (!ret && ctx && ctx->acquired > 1)
 878		return ww_mutex_deadlock_injection(lock, ctx);
 879
 880	return ret;
 881}
 882EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
 883
 884#endif
 885
 886/*
 887 * Release the lock, slowpath:
 888 */
 889static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
 890{
 891	struct task_struct *next = NULL;
 892	DEFINE_WAKE_Q(wake_q);
 893	unsigned long owner;
 894	unsigned long flags;
 895
 896	mutex_release(&lock->dep_map, ip);
 897
 898	/*
 899	 * Release the lock before (potentially) taking the spinlock such that
 900	 * other contenders can get on with things ASAP.
 901	 *
 902	 * Except when HANDOFF, in that case we must not clear the owner field,
 903	 * but instead set it to the top waiter.
 904	 */
 905	owner = atomic_long_read(&lock->owner);
 906	for (;;) {
 907		MUTEX_WARN_ON(__owner_task(owner) != current);
 908		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 909
 910		if (owner & MUTEX_FLAG_HANDOFF)
 911			break;
 912
 913		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
 914			if (owner & MUTEX_FLAG_WAITERS)
 915				break;
 916
 917			return;
 918		}
 919	}
 920
 921	raw_spin_lock_irqsave(&lock->wait_lock, flags);
 922	debug_mutex_unlock(lock);
 923	if (!list_empty(&lock->wait_list)) {
 924		/* get the first entry from the wait-list: */
 925		struct mutex_waiter *waiter =
 926			list_first_entry(&lock->wait_list,
 927					 struct mutex_waiter, list);
 928
 929		next = waiter->task;
 930
 931		debug_mutex_wake_waiter(lock, waiter);
 932		wake_q_add(&wake_q, next);
 933	}
 934
 935	if (owner & MUTEX_FLAG_HANDOFF)
 936		__mutex_handoff(lock, next);
 937
 938	preempt_disable();
 939	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 940	wake_up_q(&wake_q);
 941	preempt_enable();
 942}
 943
 944#ifndef CONFIG_DEBUG_LOCK_ALLOC
 945/*
 946 * Here come the less common (and hence less performance-critical) APIs:
 947 * mutex_lock_interruptible() and mutex_trylock().
 948 */
 949static noinline int __sched
 950__mutex_lock_killable_slowpath(struct mutex *lock);
 951
 952static noinline int __sched
 953__mutex_lock_interruptible_slowpath(struct mutex *lock);
 954
 955/**
 956 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
 957 * @lock: The mutex to be acquired.
 958 *
 959 * Lock the mutex like mutex_lock().  If a signal is delivered while the
 960 * process is sleeping, this function will return without acquiring the
 961 * mutex.
 962 *
 963 * Context: Process context.
 964 * Return: 0 if the lock was successfully acquired or %-EINTR if a
 965 * signal arrived.
 966 */
 967int __sched mutex_lock_interruptible(struct mutex *lock)
 968{
 969	might_sleep();
 970
 971	if (__mutex_trylock_fast(lock))
 972		return 0;
 973
 974	return __mutex_lock_interruptible_slowpath(lock);
 975}
 976
 977EXPORT_SYMBOL(mutex_lock_interruptible);
 978
 979/**
 980 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
 981 * @lock: The mutex to be acquired.
 982 *
 983 * Lock the mutex like mutex_lock().  If a signal which will be fatal to
 984 * the current process is delivered while the process is sleeping, this
 985 * function will return without acquiring the mutex.
 986 *
 987 * Context: Process context.
 988 * Return: 0 if the lock was successfully acquired or %-EINTR if a
 989 * fatal signal arrived.
 990 */
 991int __sched mutex_lock_killable(struct mutex *lock)
 992{
 993	might_sleep();
 994
 995	if (__mutex_trylock_fast(lock))
 996		return 0;
 997
 998	return __mutex_lock_killable_slowpath(lock);
 999}
1000EXPORT_SYMBOL(mutex_lock_killable);
1001
1002/**
1003 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1004 * @lock: The mutex to be acquired.
1005 *
1006 * Lock the mutex like mutex_lock().  While the task is waiting for this
1007 * mutex, it will be accounted as being in the IO wait state by the
1008 * scheduler.
1009 *
1010 * Context: Process context.
1011 */
1012void __sched mutex_lock_io(struct mutex *lock)
1013{
1014	int token;
1015
1016	token = io_schedule_prepare();
1017	mutex_lock(lock);
1018	io_schedule_finish(token);
1019}
1020EXPORT_SYMBOL_GPL(mutex_lock_io);
1021
1022static noinline void __sched
1023__mutex_lock_slowpath(struct mutex *lock)
1024{
1025	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1026}
1027
1028static noinline int __sched
1029__mutex_lock_killable_slowpath(struct mutex *lock)
1030{
1031	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1032}
1033
1034static noinline int __sched
1035__mutex_lock_interruptible_slowpath(struct mutex *lock)
1036{
1037	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1038}
1039
1040static noinline int __sched
1041__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1042{
1043	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1044			       _RET_IP_, ctx);
1045}
1046
1047static noinline int __sched
1048__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1049					    struct ww_acquire_ctx *ctx)
1050{
1051	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1052			       _RET_IP_, ctx);
1053}
1054
1055#endif
1056
1057/**
1058 * mutex_trylock - try to acquire the mutex, without waiting
1059 * @lock: the mutex to be acquired
1060 *
1061 * Try to acquire the mutex atomically. Returns 1 if the mutex
1062 * has been acquired successfully, and 0 on contention.
1063 *
1064 * NOTE: this function follows the spin_trylock() convention, so
1065 * it is negated from the down_trylock() return values! Be careful
1066 * about this when converting semaphore users to mutexes.
1067 *
1068 * This function must not be used in interrupt context. The
1069 * mutex must be released by the same task that acquired it.
1070 */
1071int __sched mutex_trylock(struct mutex *lock)
1072{
1073	bool locked;
1074
1075	MUTEX_WARN_ON(lock->magic != lock);
1076
1077	locked = __mutex_trylock(lock);
1078	if (locked)
1079		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1080
1081	return locked;
1082}
1083EXPORT_SYMBOL(mutex_trylock);
1084
1085#ifndef CONFIG_DEBUG_LOCK_ALLOC
1086int __sched
1087ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1088{
1089	might_sleep();
1090
1091	if (__mutex_trylock_fast(&lock->base)) {
1092		if (ctx)
1093			ww_mutex_set_context_fastpath(lock, ctx);
1094		return 0;
1095	}
1096
1097	return __ww_mutex_lock_slowpath(lock, ctx);
1098}
1099EXPORT_SYMBOL(ww_mutex_lock);
1100
1101int __sched
1102ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1103{
1104	might_sleep();
1105
1106	if (__mutex_trylock_fast(&lock->base)) {
1107		if (ctx)
1108			ww_mutex_set_context_fastpath(lock, ctx);
1109		return 0;
1110	}
1111
1112	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1113}
1114EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1115
1116#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1117#endif /* !CONFIG_PREEMPT_RT */
1118
1119EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
1120EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
1121
1122/**
1123 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1124 * @cnt: the atomic which we are to dec
1125 * @lock: the mutex to return holding if we dec to 0
1126 *
1127 * return true and hold lock if we dec to 0, return false otherwise
1128 */
1129int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1130{
1131	/* dec if we can't possibly hit 0 */
1132	if (atomic_add_unless(cnt, -1, 1))
1133		return 0;
1134	/* we might hit 0, so take the lock */
1135	mutex_lock(lock);
1136	if (!atomic_dec_and_test(cnt)) {
1137		/* when we actually did the dec, we didn't hit 0 */
1138		mutex_unlock(lock);
1139		return 0;
1140	}
1141	/* we hit 0, and we hold the lock */
1142	return 1;
1143}
1144EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kernel/locking/mutex.c
   4 *
   5 * Mutexes: blocking mutual exclusion locks
   6 *
   7 * Started by Ingo Molnar:
   8 *
   9 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  10 *
  11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  12 * David Howells for suggestions and improvements.
  13 *
  14 *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  15 *    from the -rt tree, where it was originally implemented for rtmutexes
  16 *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  17 *    and Sven Dietrich.
  18 *
  19 * Also see Documentation/locking/mutex-design.rst.
  20 */
  21#include <linux/mutex.h>
  22#include <linux/ww_mutex.h>
  23#include <linux/sched/signal.h>
  24#include <linux/sched/rt.h>
  25#include <linux/sched/wake_q.h>
  26#include <linux/sched/debug.h>
  27#include <linux/export.h>
  28#include <linux/spinlock.h>
  29#include <linux/interrupt.h>
  30#include <linux/debug_locks.h>
  31#include <linux/osq_lock.h>
  32
  33#define CREATE_TRACE_POINTS
  34#include <trace/events/lock.h>
  35
  36#ifndef CONFIG_PREEMPT_RT
  37#include "mutex.h"
  38
  39#ifdef CONFIG_DEBUG_MUTEXES
  40# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
  41#else
  42# define MUTEX_WARN_ON(cond)
  43#endif
  44
  45void
  46__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  47{
  48	atomic_long_set(&lock->owner, 0);
  49	raw_spin_lock_init(&lock->wait_lock);
  50	INIT_LIST_HEAD(&lock->wait_list);
  51#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  52	osq_lock_init(&lock->osq);
  53#endif
  54
  55	debug_mutex_init(lock, name, key);
  56}
  57EXPORT_SYMBOL(__mutex_init);
  58
  59/*
  60 * @owner: contains: 'struct task_struct *' to the current lock owner,
  61 * NULL means not owned. Since task_struct pointers are aligned at
  62 * at least L1_CACHE_BYTES, we have low bits to store extra state.
  63 *
  64 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
  65 * Bit1 indicates unlock needs to hand the lock to the top-waiter
  66 * Bit2 indicates handoff has been done and we're waiting for pickup.
  67 */
  68#define MUTEX_FLAG_WAITERS	0x01
  69#define MUTEX_FLAG_HANDOFF	0x02
  70#define MUTEX_FLAG_PICKUP	0x04
  71
  72#define MUTEX_FLAGS		0x07
  73
  74/*
  75 * Internal helper function; C doesn't allow us to hide it :/
  76 *
  77 * DO NOT USE (outside of mutex code).
  78 */
  79static inline struct task_struct *__mutex_owner(struct mutex *lock)
  80{
  81	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
  82}
  83
  84static inline struct task_struct *__owner_task(unsigned long owner)
  85{
  86	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
  87}
  88
  89bool mutex_is_locked(struct mutex *lock)
  90{
  91	return __mutex_owner(lock) != NULL;
  92}
  93EXPORT_SYMBOL(mutex_is_locked);
  94
  95static inline unsigned long __owner_flags(unsigned long owner)
  96{
  97	return owner & MUTEX_FLAGS;
  98}
  99
 100/*
 101 * Returns: __mutex_owner(lock) on failure or NULL on success.
 102 */
 103static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
 104{
 105	unsigned long owner, curr = (unsigned long)current;
 106
 107	owner = atomic_long_read(&lock->owner);
 108	for (;;) { /* must loop, can race against a flag */
 109		unsigned long flags = __owner_flags(owner);
 110		unsigned long task = owner & ~MUTEX_FLAGS;
 111
 112		if (task) {
 113			if (flags & MUTEX_FLAG_PICKUP) {
 114				if (task != curr)
 115					break;
 116				flags &= ~MUTEX_FLAG_PICKUP;
 117			} else if (handoff) {
 118				if (flags & MUTEX_FLAG_HANDOFF)
 119					break;
 120				flags |= MUTEX_FLAG_HANDOFF;
 121			} else {
 122				break;
 123			}
 124		} else {
 125			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
 126			task = curr;
 127		}
 128
 129		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
 130			if (task == curr)
 131				return NULL;
 132			break;
 133		}
 134	}
 135
 136	return __owner_task(owner);
 137}
 138
 139/*
 140 * Trylock or set HANDOFF
 141 */
 142static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
 143{
 144	return !__mutex_trylock_common(lock, handoff);
 145}
 146
 147/*
 148 * Actual trylock that will work on any unlocked state.
 149 */
 150static inline bool __mutex_trylock(struct mutex *lock)
 151{
 152	return !__mutex_trylock_common(lock, false);
 153}
 154
 155#ifndef CONFIG_DEBUG_LOCK_ALLOC
 156/*
 157 * Lockdep annotations are contained to the slow paths for simplicity.
 158 * There is nothing that would stop spreading the lockdep annotations outwards
 159 * except more code.
 160 */
 161
 162/*
 163 * Optimistic trylock that only works in the uncontended case. Make sure to
 164 * follow with a __mutex_trylock() before failing.
 165 */
 166static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
 167{
 168	unsigned long curr = (unsigned long)current;
 169	unsigned long zero = 0UL;
 170
 171	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
 172		return true;
 173
 174	return false;
 175}
 176
 177static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
 178{
 179	unsigned long curr = (unsigned long)current;
 180
 181	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
 182}
 183#endif
 184
 185static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
 186{
 187	atomic_long_or(flag, &lock->owner);
 188}
 189
 190static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
 191{
 192	atomic_long_andnot(flag, &lock->owner);
 193}
 194
 195static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
 196{
 197	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
 198}
 199
 200/*
 201 * Add @waiter to a given location in the lock wait_list and set the
 202 * FLAG_WAITERS flag if it's the first waiter.
 203 */
 204static void
 205__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 206		   struct list_head *list)
 207{
 208	debug_mutex_add_waiter(lock, waiter, current);
 209
 210	list_add_tail(&waiter->list, list);
 211	if (__mutex_waiter_is_first(lock, waiter))
 212		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 213}
 214
 215static void
 216__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
 217{
 218	list_del(&waiter->list);
 219	if (likely(list_empty(&lock->wait_list)))
 220		__mutex_clear_flag(lock, MUTEX_FLAGS);
 221
 222	debug_mutex_remove_waiter(lock, waiter, current);
 223}
 224
 225/*
 226 * Give up ownership to a specific task, when @task = NULL, this is equivalent
 227 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
 228 * WAITERS. Provides RELEASE semantics like a regular unlock, the
 229 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
 230 */
 231static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 232{
 233	unsigned long owner = atomic_long_read(&lock->owner);
 234
 235	for (;;) {
 236		unsigned long new;
 237
 238		MUTEX_WARN_ON(__owner_task(owner) != current);
 239		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 240
 241		new = (owner & MUTEX_FLAG_WAITERS);
 242		new |= (unsigned long)task;
 243		if (task)
 244			new |= MUTEX_FLAG_PICKUP;
 245
 246		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
 247			break;
 248	}
 249}
 250
 251#ifndef CONFIG_DEBUG_LOCK_ALLOC
 252/*
 253 * We split the mutex lock/unlock logic into separate fastpath and
 254 * slowpath functions, to reduce the register pressure on the fastpath.
 255 * We also put the fastpath first in the kernel image, to make sure the
 256 * branch is predicted by the CPU as default-untaken.
 257 */
 258static void __sched __mutex_lock_slowpath(struct mutex *lock);
 259
 260/**
 261 * mutex_lock - acquire the mutex
 262 * @lock: the mutex to be acquired
 263 *
 264 * Lock the mutex exclusively for this task. If the mutex is not
 265 * available right now, it will sleep until it can get it.
 266 *
 267 * The mutex must later on be released by the same task that
 268 * acquired it. Recursive locking is not allowed. The task
 269 * may not exit without first unlocking the mutex. Also, kernel
 270 * memory where the mutex resides must not be freed with
 271 * the mutex still locked. The mutex must first be initialized
 272 * (or statically defined) before it can be locked. memset()-ing
 273 * the mutex to 0 is not allowed.
 274 *
 275 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 276 * checks that will enforce the restrictions and will also do
 277 * deadlock debugging)
 278 *
 279 * This function is similar to (but not equivalent to) down().
 280 */
 281void __sched mutex_lock(struct mutex *lock)
 282{
 283	might_sleep();
 284
 285	if (!__mutex_trylock_fast(lock))
 286		__mutex_lock_slowpath(lock);
 287}
 288EXPORT_SYMBOL(mutex_lock);
 289#endif
 290
 291#include "ww_mutex.h"
 292
 293#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 294
 295/*
 296 * Trylock variant that returns the owning task on failure.
 297 */
 298static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 299{
 300	return __mutex_trylock_common(lock, false);
 301}
 302
 303static inline
 304bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 305			    struct mutex_waiter *waiter)
 306{
 307	struct ww_mutex *ww;
 308
 309	ww = container_of(lock, struct ww_mutex, base);
 310
 311	/*
 312	 * If ww->ctx is set the contents are undefined, only
 313	 * by acquiring wait_lock there is a guarantee that
 314	 * they are not invalid when reading.
 315	 *
 316	 * As such, when deadlock detection needs to be
 317	 * performed the optimistic spinning cannot be done.
 318	 *
 319	 * Check this in every inner iteration because we may
 320	 * be racing against another thread's ww_mutex_lock.
 321	 */
 322	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
 323		return false;
 324
 325	/*
 326	 * If we aren't on the wait list yet, cancel the spin
 327	 * if there are waiters. We want  to avoid stealing the
 328	 * lock from a waiter with an earlier stamp, since the
 329	 * other thread may already own a lock that we also
 330	 * need.
 331	 */
 332	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
 333		return false;
 334
 335	/*
 336	 * Similarly, stop spinning if we are no longer the
 337	 * first waiter.
 338	 */
 339	if (waiter && !__mutex_waiter_is_first(lock, waiter))
 340		return false;
 341
 342	return true;
 343}
 344
 345/*
 346 * Look out! "owner" is an entirely speculative pointer access and not
 347 * reliable.
 348 *
 349 * "noinline" so that this function shows up on perf profiles.
 350 */
 351static noinline
 352bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
 353			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
 354{
 355	bool ret = true;
 356
 357	lockdep_assert_preemption_disabled();
 358
 359	while (__mutex_owner(lock) == owner) {
 360		/*
 361		 * Ensure we emit the owner->on_cpu, dereference _after_
 362		 * checking lock->owner still matches owner. And we already
 363		 * disabled preemption which is equal to the RCU read-side
 364		 * crital section in optimistic spinning code. Thus the
 365		 * task_strcut structure won't go away during the spinning
 366		 * period
 367		 */
 368		barrier();
 369
 370		/*
 371		 * Use vcpu_is_preempted to detect lock holder preemption issue.
 372		 */
 373		if (!owner_on_cpu(owner) || need_resched()) {
 374			ret = false;
 375			break;
 376		}
 377
 378		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
 379			ret = false;
 380			break;
 381		}
 382
 383		cpu_relax();
 384	}
 385
 386	return ret;
 387}
 388
 389/*
 390 * Initial check for entering the mutex spinning loop
 391 */
 392static inline int mutex_can_spin_on_owner(struct mutex *lock)
 393{
 394	struct task_struct *owner;
 395	int retval = 1;
 396
 397	lockdep_assert_preemption_disabled();
 398
 399	if (need_resched())
 400		return 0;
 401
 402	/*
 403	 * We already disabled preemption which is equal to the RCU read-side
 404	 * crital section in optimistic spinning code. Thus the task_strcut
 405	 * structure won't go away during the spinning period.
 406	 */
 407	owner = __mutex_owner(lock);
 408	if (owner)
 409		retval = owner_on_cpu(owner);
 410
 411	/*
 412	 * If lock->owner is not set, the mutex has been released. Return true
 413	 * such that we'll trylock in the spin path, which is a faster option
 414	 * than the blocking slow path.
 415	 */
 416	return retval;
 417}
 418
 419/*
 420 * Optimistic spinning.
 421 *
 422 * We try to spin for acquisition when we find that the lock owner
 423 * is currently running on a (different) CPU and while we don't
 424 * need to reschedule. The rationale is that if the lock owner is
 425 * running, it is likely to release the lock soon.
 426 *
 427 * The mutex spinners are queued up using MCS lock so that only one
 428 * spinner can compete for the mutex. However, if mutex spinning isn't
 429 * going to happen, there is no point in going through the lock/unlock
 430 * overhead.
 431 *
 432 * Returns true when the lock was taken, otherwise false, indicating
 433 * that we need to jump to the slowpath and sleep.
 434 *
 435 * The waiter flag is set to true if the spinner is a waiter in the wait
 436 * queue. The waiter-spinner will spin on the lock directly and concurrently
 437 * with the spinner at the head of the OSQ, if present, until the owner is
 438 * changed to itself.
 439 */
 440static __always_inline bool
 441mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 442		      struct mutex_waiter *waiter)
 443{
 444	if (!waiter) {
 445		/*
 446		 * The purpose of the mutex_can_spin_on_owner() function is
 447		 * to eliminate the overhead of osq_lock() and osq_unlock()
 448		 * in case spinning isn't possible. As a waiter-spinner
 449		 * is not going to take OSQ lock anyway, there is no need
 450		 * to call mutex_can_spin_on_owner().
 451		 */
 452		if (!mutex_can_spin_on_owner(lock))
 453			goto fail;
 454
 455		/*
 456		 * In order to avoid a stampede of mutex spinners trying to
 457		 * acquire the mutex all at once, the spinners need to take a
 458		 * MCS (queued) lock first before spinning on the owner field.
 459		 */
 460		if (!osq_lock(&lock->osq))
 461			goto fail;
 462	}
 463
 464	for (;;) {
 465		struct task_struct *owner;
 466
 467		/* Try to acquire the mutex... */
 468		owner = __mutex_trylock_or_owner(lock);
 469		if (!owner)
 470			break;
 471
 472		/*
 473		 * There's an owner, wait for it to either
 474		 * release the lock or go to sleep.
 475		 */
 476		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
 477			goto fail_unlock;
 478
 479		/*
 480		 * The cpu_relax() call is a compiler barrier which forces
 481		 * everything in this loop to be re-loaded. We don't need
 482		 * memory barriers as we'll eventually observe the right
 483		 * values at the cost of a few extra spins.
 484		 */
 485		cpu_relax();
 486	}
 487
 488	if (!waiter)
 489		osq_unlock(&lock->osq);
 490
 491	return true;
 492
 493
 494fail_unlock:
 495	if (!waiter)
 496		osq_unlock(&lock->osq);
 497
 498fail:
 499	/*
 500	 * If we fell out of the spin path because of need_resched(),
 501	 * reschedule now, before we try-lock the mutex. This avoids getting
 502	 * scheduled out right after we obtained the mutex.
 503	 */
 504	if (need_resched()) {
 505		/*
 506		 * We _should_ have TASK_RUNNING here, but just in case
 507		 * we do not, make it so, otherwise we might get stuck.
 508		 */
 509		__set_current_state(TASK_RUNNING);
 510		schedule_preempt_disabled();
 511	}
 512
 513	return false;
 514}
 515#else
 516static __always_inline bool
 517mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 518		      struct mutex_waiter *waiter)
 519{
 520	return false;
 521}
 522#endif
 523
 524static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
 525
 526/**
 527 * mutex_unlock - release the mutex
 528 * @lock: the mutex to be released
 529 *
 530 * Unlock a mutex that has been locked by this task previously.
 531 *
 532 * This function must not be used in interrupt context. Unlocking
 533 * of a not locked mutex is not allowed.
 534 *
 535 * The caller must ensure that the mutex stays alive until this function has
 536 * returned - mutex_unlock() can NOT directly be used to release an object such
 537 * that another concurrent task can free it.
 538 * Mutexes are different from spinlocks & refcounts in this aspect.
 539 *
 540 * This function is similar to (but not equivalent to) up().
 541 */
 542void __sched mutex_unlock(struct mutex *lock)
 543{
 544#ifndef CONFIG_DEBUG_LOCK_ALLOC
 545	if (__mutex_unlock_fast(lock))
 546		return;
 547#endif
 548	__mutex_unlock_slowpath(lock, _RET_IP_);
 549}
 550EXPORT_SYMBOL(mutex_unlock);
 551
 552/**
 553 * ww_mutex_unlock - release the w/w mutex
 554 * @lock: the mutex to be released
 555 *
 556 * Unlock a mutex that has been locked by this task previously with any of the
 557 * ww_mutex_lock* functions (with or without an acquire context). It is
 558 * forbidden to release the locks after releasing the acquire context.
 559 *
 560 * This function must not be used in interrupt context. Unlocking
 561 * of a unlocked mutex is not allowed.
 562 */
 563void __sched ww_mutex_unlock(struct ww_mutex *lock)
 564{
 565	__ww_mutex_unlock(lock);
 566	mutex_unlock(&lock->base);
 567}
 568EXPORT_SYMBOL(ww_mutex_unlock);
 569
 570/*
 571 * Lock a mutex (possibly interruptible), slowpath:
 572 */
 573static __always_inline int __sched
 574__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
 575		    struct lockdep_map *nest_lock, unsigned long ip,
 576		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 577{
 
 578	struct mutex_waiter waiter;
 579	struct ww_mutex *ww;
 
 580	int ret;
 581
 582	if (!use_ww_ctx)
 583		ww_ctx = NULL;
 584
 585	might_sleep();
 586
 587	MUTEX_WARN_ON(lock->magic != lock);
 588
 589	ww = container_of(lock, struct ww_mutex, base);
 590	if (ww_ctx) {
 591		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
 592			return -EALREADY;
 593
 594		/*
 595		 * Reset the wounded flag after a kill. No other process can
 596		 * race and wound us here since they can't have a valid owner
 597		 * pointer if we don't have any locks held.
 598		 */
 599		if (ww_ctx->acquired == 0)
 600			ww_ctx->wounded = 0;
 601
 602#ifdef CONFIG_DEBUG_LOCK_ALLOC
 603		nest_lock = &ww_ctx->dep_map;
 604#endif
 605	}
 606
 607	preempt_disable();
 608	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 609
 610	trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
 611	if (__mutex_trylock(lock) ||
 612	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
 613		/* got the lock, yay! */
 614		lock_acquired(&lock->dep_map, ip);
 615		if (ww_ctx)
 616			ww_mutex_set_context_fastpath(ww, ww_ctx);
 617		trace_contention_end(lock, 0);
 618		preempt_enable();
 619		return 0;
 620	}
 621
 622	raw_spin_lock(&lock->wait_lock);
 623	/*
 624	 * After waiting to acquire the wait_lock, try again.
 625	 */
 626	if (__mutex_trylock(lock)) {
 627		if (ww_ctx)
 628			__ww_mutex_check_waiters(lock, ww_ctx);
 629
 630		goto skip_wait;
 631	}
 632
 633	debug_mutex_lock_common(lock, &waiter);
 634	waiter.task = current;
 635	if (use_ww_ctx)
 636		waiter.ww_ctx = ww_ctx;
 637
 638	lock_contended(&lock->dep_map, ip);
 639
 640	if (!use_ww_ctx) {
 641		/* add waiting tasks to the end of the waitqueue (FIFO): */
 642		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
 643	} else {
 644		/*
 645		 * Add in stamp order, waking up waiters that must kill
 646		 * themselves.
 647		 */
 648		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
 649		if (ret)
 650			goto err_early_kill;
 651	}
 652
 653	set_current_state(state);
 654	trace_contention_begin(lock, LCB_F_MUTEX);
 655	for (;;) {
 656		bool first;
 657
 658		/*
 659		 * Once we hold wait_lock, we're serialized against
 660		 * mutex_unlock() handing the lock off to us, do a trylock
 661		 * before testing the error conditions to make sure we pick up
 662		 * the handoff.
 663		 */
 664		if (__mutex_trylock(lock))
 665			goto acquired;
 666
 667		/*
 668		 * Check for signals and kill conditions while holding
 669		 * wait_lock. This ensures the lock cancellation is ordered
 670		 * against mutex_unlock() and wake-ups do not go missing.
 671		 */
 672		if (signal_pending_state(state, current)) {
 673			ret = -EINTR;
 674			goto err;
 675		}
 676
 677		if (ww_ctx) {
 678			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
 679			if (ret)
 680				goto err;
 681		}
 682
 683		raw_spin_unlock(&lock->wait_lock);
 
 
 
 
 684		schedule_preempt_disabled();
 685
 686		first = __mutex_waiter_is_first(lock, &waiter);
 687
 688		set_current_state(state);
 689		/*
 690		 * Here we order against unlock; we must either see it change
 691		 * state back to RUNNING and fall through the next schedule(),
 692		 * or we must see its unlock and acquire.
 693		 */
 694		if (__mutex_trylock_or_handoff(lock, first))
 695			break;
 696
 697		if (first) {
 698			trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
 699			if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
 700				break;
 701			trace_contention_begin(lock, LCB_F_MUTEX);
 702		}
 703
 704		raw_spin_lock(&lock->wait_lock);
 705	}
 706	raw_spin_lock(&lock->wait_lock);
 707acquired:
 708	__set_current_state(TASK_RUNNING);
 709
 710	if (ww_ctx) {
 711		/*
 712		 * Wound-Wait; we stole the lock (!first_waiter), check the
 713		 * waiters as anyone might want to wound us.
 714		 */
 715		if (!ww_ctx->is_wait_die &&
 716		    !__mutex_waiter_is_first(lock, &waiter))
 717			__ww_mutex_check_waiters(lock, ww_ctx);
 718	}
 719
 720	__mutex_remove_waiter(lock, &waiter);
 721
 722	debug_mutex_free_waiter(&waiter);
 723
 724skip_wait:
 725	/* got the lock - cleanup and rejoice! */
 726	lock_acquired(&lock->dep_map, ip);
 727	trace_contention_end(lock, 0);
 728
 729	if (ww_ctx)
 730		ww_mutex_lock_acquired(ww, ww_ctx);
 731
 732	raw_spin_unlock(&lock->wait_lock);
 
 733	preempt_enable();
 734	return 0;
 735
 736err:
 737	__set_current_state(TASK_RUNNING);
 738	__mutex_remove_waiter(lock, &waiter);
 739err_early_kill:
 740	trace_contention_end(lock, ret);
 741	raw_spin_unlock(&lock->wait_lock);
 742	debug_mutex_free_waiter(&waiter);
 743	mutex_release(&lock->dep_map, ip);
 
 744	preempt_enable();
 745	return ret;
 746}
 747
 748static int __sched
 749__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
 750	     struct lockdep_map *nest_lock, unsigned long ip)
 751{
 752	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
 753}
 754
 755static int __sched
 756__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
 757		unsigned long ip, struct ww_acquire_ctx *ww_ctx)
 758{
 759	return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
 760}
 761
 762/**
 763 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
 764 * @ww: mutex to lock
 765 * @ww_ctx: optional w/w acquire context
 766 *
 767 * Trylocks a mutex with the optional acquire context; no deadlock detection is
 768 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
 769 *
 770 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
 771 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
 772 *
 773 * A mutex acquired with this function must be released with ww_mutex_unlock.
 774 */
 775int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
 776{
 777	if (!ww_ctx)
 778		return mutex_trylock(&ww->base);
 779
 780	MUTEX_WARN_ON(ww->base.magic != &ww->base);
 781
 782	/*
 783	 * Reset the wounded flag after a kill. No other process can
 784	 * race and wound us here, since they can't have a valid owner
 785	 * pointer if we don't have any locks held.
 786	 */
 787	if (ww_ctx->acquired == 0)
 788		ww_ctx->wounded = 0;
 789
 790	if (__mutex_trylock(&ww->base)) {
 791		ww_mutex_set_context_fastpath(ww, ww_ctx);
 792		mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
 793		return 1;
 794	}
 795
 796	return 0;
 797}
 798EXPORT_SYMBOL(ww_mutex_trylock);
 799
 800#ifdef CONFIG_DEBUG_LOCK_ALLOC
 801void __sched
 802mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 803{
 804	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
 805}
 806
 807EXPORT_SYMBOL_GPL(mutex_lock_nested);
 808
 809void __sched
 810_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
 811{
 812	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
 813}
 814EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
 815
 816int __sched
 817mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
 818{
 819	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
 820}
 821EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
 822
 823int __sched
 824mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
 825{
 826	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
 827}
 828EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
 829
 830void __sched
 831mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
 832{
 833	int token;
 834
 835	might_sleep();
 836
 837	token = io_schedule_prepare();
 838	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
 839			    subclass, NULL, _RET_IP_, NULL, 0);
 840	io_schedule_finish(token);
 841}
 842EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
 843
 844static inline int
 845ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 846{
 847#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
 848	unsigned tmp;
 849
 850	if (ctx->deadlock_inject_countdown-- == 0) {
 851		tmp = ctx->deadlock_inject_interval;
 852		if (tmp > UINT_MAX/4)
 853			tmp = UINT_MAX;
 854		else
 855			tmp = tmp*2 + tmp + tmp/2;
 856
 857		ctx->deadlock_inject_interval = tmp;
 858		ctx->deadlock_inject_countdown = tmp;
 859		ctx->contending_lock = lock;
 860
 861		ww_mutex_unlock(lock);
 862
 863		return -EDEADLK;
 864	}
 865#endif
 866
 867	return 0;
 868}
 869
 870int __sched
 871ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 872{
 873	int ret;
 874
 875	might_sleep();
 876	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
 877			       0, _RET_IP_, ctx);
 878	if (!ret && ctx && ctx->acquired > 1)
 879		return ww_mutex_deadlock_injection(lock, ctx);
 880
 881	return ret;
 882}
 883EXPORT_SYMBOL_GPL(ww_mutex_lock);
 884
 885int __sched
 886ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 887{
 888	int ret;
 889
 890	might_sleep();
 891	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
 892			      0, _RET_IP_, ctx);
 893
 894	if (!ret && ctx && ctx->acquired > 1)
 895		return ww_mutex_deadlock_injection(lock, ctx);
 896
 897	return ret;
 898}
 899EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
 900
 901#endif
 902
 903/*
 904 * Release the lock, slowpath:
 905 */
 906static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
 907{
 908	struct task_struct *next = NULL;
 909	DEFINE_WAKE_Q(wake_q);
 910	unsigned long owner;
 
 911
 912	mutex_release(&lock->dep_map, ip);
 913
 914	/*
 915	 * Release the lock before (potentially) taking the spinlock such that
 916	 * other contenders can get on with things ASAP.
 917	 *
 918	 * Except when HANDOFF, in that case we must not clear the owner field,
 919	 * but instead set it to the top waiter.
 920	 */
 921	owner = atomic_long_read(&lock->owner);
 922	for (;;) {
 923		MUTEX_WARN_ON(__owner_task(owner) != current);
 924		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 925
 926		if (owner & MUTEX_FLAG_HANDOFF)
 927			break;
 928
 929		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
 930			if (owner & MUTEX_FLAG_WAITERS)
 931				break;
 932
 933			return;
 934		}
 935	}
 936
 937	raw_spin_lock(&lock->wait_lock);
 938	debug_mutex_unlock(lock);
 939	if (!list_empty(&lock->wait_list)) {
 940		/* get the first entry from the wait-list: */
 941		struct mutex_waiter *waiter =
 942			list_first_entry(&lock->wait_list,
 943					 struct mutex_waiter, list);
 944
 945		next = waiter->task;
 946
 947		debug_mutex_wake_waiter(lock, waiter);
 948		wake_q_add(&wake_q, next);
 949	}
 950
 951	if (owner & MUTEX_FLAG_HANDOFF)
 952		__mutex_handoff(lock, next);
 953
 954	raw_spin_unlock(&lock->wait_lock);
 955
 956	wake_up_q(&wake_q);
 
 957}
 958
 959#ifndef CONFIG_DEBUG_LOCK_ALLOC
 960/*
 961 * Here come the less common (and hence less performance-critical) APIs:
 962 * mutex_lock_interruptible() and mutex_trylock().
 963 */
 964static noinline int __sched
 965__mutex_lock_killable_slowpath(struct mutex *lock);
 966
 967static noinline int __sched
 968__mutex_lock_interruptible_slowpath(struct mutex *lock);
 969
 970/**
 971 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
 972 * @lock: The mutex to be acquired.
 973 *
 974 * Lock the mutex like mutex_lock().  If a signal is delivered while the
 975 * process is sleeping, this function will return without acquiring the
 976 * mutex.
 977 *
 978 * Context: Process context.
 979 * Return: 0 if the lock was successfully acquired or %-EINTR if a
 980 * signal arrived.
 981 */
 982int __sched mutex_lock_interruptible(struct mutex *lock)
 983{
 984	might_sleep();
 985
 986	if (__mutex_trylock_fast(lock))
 987		return 0;
 988
 989	return __mutex_lock_interruptible_slowpath(lock);
 990}
 991
 992EXPORT_SYMBOL(mutex_lock_interruptible);
 993
 994/**
 995 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
 996 * @lock: The mutex to be acquired.
 997 *
 998 * Lock the mutex like mutex_lock().  If a signal which will be fatal to
 999 * the current process is delivered while the process is sleeping, this
1000 * function will return without acquiring the mutex.
1001 *
1002 * Context: Process context.
1003 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1004 * fatal signal arrived.
1005 */
1006int __sched mutex_lock_killable(struct mutex *lock)
1007{
1008	might_sleep();
1009
1010	if (__mutex_trylock_fast(lock))
1011		return 0;
1012
1013	return __mutex_lock_killable_slowpath(lock);
1014}
1015EXPORT_SYMBOL(mutex_lock_killable);
1016
1017/**
1018 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1019 * @lock: The mutex to be acquired.
1020 *
1021 * Lock the mutex like mutex_lock().  While the task is waiting for this
1022 * mutex, it will be accounted as being in the IO wait state by the
1023 * scheduler.
1024 *
1025 * Context: Process context.
1026 */
1027void __sched mutex_lock_io(struct mutex *lock)
1028{
1029	int token;
1030
1031	token = io_schedule_prepare();
1032	mutex_lock(lock);
1033	io_schedule_finish(token);
1034}
1035EXPORT_SYMBOL_GPL(mutex_lock_io);
1036
1037static noinline void __sched
1038__mutex_lock_slowpath(struct mutex *lock)
1039{
1040	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1041}
1042
1043static noinline int __sched
1044__mutex_lock_killable_slowpath(struct mutex *lock)
1045{
1046	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1047}
1048
1049static noinline int __sched
1050__mutex_lock_interruptible_slowpath(struct mutex *lock)
1051{
1052	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1053}
1054
1055static noinline int __sched
1056__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1057{
1058	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1059			       _RET_IP_, ctx);
1060}
1061
1062static noinline int __sched
1063__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1064					    struct ww_acquire_ctx *ctx)
1065{
1066	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1067			       _RET_IP_, ctx);
1068}
1069
1070#endif
1071
1072/**
1073 * mutex_trylock - try to acquire the mutex, without waiting
1074 * @lock: the mutex to be acquired
1075 *
1076 * Try to acquire the mutex atomically. Returns 1 if the mutex
1077 * has been acquired successfully, and 0 on contention.
1078 *
1079 * NOTE: this function follows the spin_trylock() convention, so
1080 * it is negated from the down_trylock() return values! Be careful
1081 * about this when converting semaphore users to mutexes.
1082 *
1083 * This function must not be used in interrupt context. The
1084 * mutex must be released by the same task that acquired it.
1085 */
1086int __sched mutex_trylock(struct mutex *lock)
1087{
1088	bool locked;
1089
1090	MUTEX_WARN_ON(lock->magic != lock);
1091
1092	locked = __mutex_trylock(lock);
1093	if (locked)
1094		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1095
1096	return locked;
1097}
1098EXPORT_SYMBOL(mutex_trylock);
1099
1100#ifndef CONFIG_DEBUG_LOCK_ALLOC
1101int __sched
1102ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1103{
1104	might_sleep();
1105
1106	if (__mutex_trylock_fast(&lock->base)) {
1107		if (ctx)
1108			ww_mutex_set_context_fastpath(lock, ctx);
1109		return 0;
1110	}
1111
1112	return __ww_mutex_lock_slowpath(lock, ctx);
1113}
1114EXPORT_SYMBOL(ww_mutex_lock);
1115
1116int __sched
1117ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1118{
1119	might_sleep();
1120
1121	if (__mutex_trylock_fast(&lock->base)) {
1122		if (ctx)
1123			ww_mutex_set_context_fastpath(lock, ctx);
1124		return 0;
1125	}
1126
1127	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1128}
1129EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1130
1131#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1132#endif /* !CONFIG_PREEMPT_RT */
1133
1134EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
1135EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
1136
1137/**
1138 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1139 * @cnt: the atomic which we are to dec
1140 * @lock: the mutex to return holding if we dec to 0
1141 *
1142 * return true and hold lock if we dec to 0, return false otherwise
1143 */
1144int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1145{
1146	/* dec if we can't possibly hit 0 */
1147	if (atomic_add_unless(cnt, -1, 1))
1148		return 0;
1149	/* we might hit 0, so take the lock */
1150	mutex_lock(lock);
1151	if (!atomic_dec_and_test(cnt)) {
1152		/* when we actually did the dec, we didn't hit 0 */
1153		mutex_unlock(lock);
1154		return 0;
1155	}
1156	/* we hit 0, and we hold the lock */
1157	return 1;
1158}
1159EXPORT_SYMBOL(atomic_dec_and_mutex_lock);