Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 *  Fast Userspace Mutexes (which I call "Futexes!").
   3 *  (C) Rusty Russell, IBM 2002
   4 *
   5 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
   6 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
   7 *
   8 *  Removed page pinning, fix privately mapped COW pages and other cleanups
   9 *  (C) Copyright 2003, 2004 Jamie Lokier
  10 *
  11 *  Robust futex support started by Ingo Molnar
  12 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
  13 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
  14 *
  15 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
  16 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  17 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  18 *
  19 *  PRIVATE futexes by Eric Dumazet
  20 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
  21 *
  22 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
  23 *  Copyright (C) IBM Corporation, 2009
  24 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
  25 *
  26 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
  27 *  enough at me, Linus for the original (flawed) idea, Matthew
  28 *  Kirkwood for proof-of-concept implementation.
  29 *
  30 *  "The futexes are also cursed."
  31 *  "But they come in a choice of three flavours!"
  32 *
  33 *  This program is free software; you can redistribute it and/or modify
  34 *  it under the terms of the GNU General Public License as published by
  35 *  the Free Software Foundation; either version 2 of the License, or
  36 *  (at your option) any later version.
  37 *
  38 *  This program is distributed in the hope that it will be useful,
  39 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  40 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  41 *  GNU General Public License for more details.
  42 *
  43 *  You should have received a copy of the GNU General Public License
  44 *  along with this program; if not, write to the Free Software
  45 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  46 */
  47#include <linux/slab.h>
  48#include <linux/poll.h>
  49#include <linux/fs.h>
  50#include <linux/file.h>
  51#include <linux/jhash.h>
  52#include <linux/init.h>
  53#include <linux/futex.h>
  54#include <linux/mount.h>
  55#include <linux/pagemap.h>
  56#include <linux/syscalls.h>
  57#include <linux/signal.h>
  58#include <linux/export.h>
  59#include <linux/magic.h>
  60#include <linux/pid.h>
  61#include <linux/nsproxy.h>
  62#include <linux/ptrace.h>
  63#include <linux/sched/rt.h>
  64#include <linux/hugetlb.h>
  65#include <linux/freezer.h>
  66#include <linux/bootmem.h>
 
  67
  68#include <asm/futex.h>
  69
  70#include "locking/rtmutex_common.h"
  71
  72/*
  73 * READ this before attempting to hack on futexes!
  74 *
  75 * Basic futex operation and ordering guarantees
  76 * =============================================
  77 *
  78 * The waiter reads the futex value in user space and calls
  79 * futex_wait(). This function computes the hash bucket and acquires
  80 * the hash bucket lock. After that it reads the futex user space value
  81 * again and verifies that the data has not changed. If it has not changed
  82 * it enqueues itself into the hash bucket, releases the hash bucket lock
  83 * and schedules.
  84 *
  85 * The waker side modifies the user space value of the futex and calls
  86 * futex_wake(). This function computes the hash bucket and acquires the
  87 * hash bucket lock. Then it looks for waiters on that futex in the hash
  88 * bucket and wakes them.
  89 *
  90 * In futex wake up scenarios where no tasks are blocked on a futex, taking
  91 * the hb spinlock can be avoided and simply return. In order for this
  92 * optimization to work, ordering guarantees must exist so that the waiter
  93 * being added to the list is acknowledged when the list is concurrently being
  94 * checked by the waker, avoiding scenarios like the following:
  95 *
  96 * CPU 0                               CPU 1
  97 * val = *futex;
  98 * sys_futex(WAIT, futex, val);
  99 *   futex_wait(futex, val);
 100 *   uval = *futex;
 101 *                                     *futex = newval;
 102 *                                     sys_futex(WAKE, futex);
 103 *                                       futex_wake(futex);
 104 *                                       if (queue_empty())
 105 *                                         return;
 106 *   if (uval == val)
 107 *      lock(hash_bucket(futex));
 108 *      queue();
 109 *     unlock(hash_bucket(futex));
 110 *     schedule();
 111 *
 112 * This would cause the waiter on CPU 0 to wait forever because it
 113 * missed the transition of the user space value from val to newval
 114 * and the waker did not find the waiter in the hash bucket queue.
 115 *
 116 * The correct serialization ensures that a waiter either observes
 117 * the changed user space value before blocking or is woken by a
 118 * concurrent waker:
 119 *
 120 * CPU 0                                 CPU 1
 121 * val = *futex;
 122 * sys_futex(WAIT, futex, val);
 123 *   futex_wait(futex, val);
 124 *
 125 *   waiters++; (a)
 126 *   mb(); (A) <-- paired with -.
 127 *                              |
 128 *   lock(hash_bucket(futex));  |
 129 *                              |
 130 *   uval = *futex;             |
 131 *                              |        *futex = newval;
 132 *                              |        sys_futex(WAKE, futex);
 133 *                              |          futex_wake(futex);
 134 *                              |
 135 *                              `------->  mb(); (B)
 136 *   if (uval == val)
 137 *     queue();
 138 *     unlock(hash_bucket(futex));
 139 *     schedule();                         if (waiters)
 140 *                                           lock(hash_bucket(futex));
 141 *   else                                    wake_waiters(futex);
 142 *     waiters--; (b)                        unlock(hash_bucket(futex));
 143 *
 144 * Where (A) orders the waiters increment and the futex value read through
 145 * atomic operations (see hb_waiters_inc) and where (B) orders the write
 146 * to futex and the waiters read -- this is done by the barriers in
 147 * get_futex_key_refs(), through either ihold or atomic_inc, depending on the
 148 * futex type.
 149 *
 150 * This yields the following case (where X:=waiters, Y:=futex):
 151 *
 152 *	X = Y = 0
 153 *
 154 *	w[X]=1		w[Y]=1
 155 *	MB		MB
 156 *	r[Y]=y		r[X]=x
 157 *
 158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 159 * the guarantee that we cannot both miss the futex variable change and the
 160 * enqueue.
 161 *
 162 * Note that a new waiter is accounted for in (a) even when it is possible that
 163 * the wait call can return error, in which case we backtrack from it in (b).
 164 * Refer to the comment in queue_lock().
 165 *
 166 * Similarly, in order to account for waiters being requeued on another
 167 * address we always increment the waiters for the destination bucket before
 168 * acquiring the lock. It then decrements them again  after releasing it -
 169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
 170 * will do the additional required waiter count housekeeping. This is done for
 171 * double_lock_hb() and double_unlock_hb(), respectively.
 172 */
 173
 174#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
 175int __read_mostly futex_cmpxchg_enabled;
 176#endif
 177
 178/*
 179 * Futex flags used to encode options to functions and preserve them across
 180 * restarts.
 181 */
 182#define FLAGS_SHARED		0x01
 183#define FLAGS_CLOCKRT		0x02
 184#define FLAGS_HAS_TIMEOUT	0x04
 185
 186/*
 187 * Priority Inheritance state:
 188 */
 189struct futex_pi_state {
 190	/*
 191	 * list of 'owned' pi_state instances - these have to be
 192	 * cleaned up in do_exit() if the task exits prematurely:
 193	 */
 194	struct list_head list;
 195
 196	/*
 197	 * The PI object:
 198	 */
 199	struct rt_mutex pi_mutex;
 200
 201	struct task_struct *owner;
 202	atomic_t refcount;
 203
 204	union futex_key key;
 205};
 206
 207/**
 208 * struct futex_q - The hashed futex queue entry, one per waiting task
 209 * @list:		priority-sorted list of tasks waiting on this futex
 210 * @task:		the task waiting on the futex
 211 * @lock_ptr:		the hash bucket lock
 212 * @key:		the key the futex is hashed on
 213 * @pi_state:		optional priority inheritance state
 214 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 215 * @requeue_pi_key:	the requeue_pi target futex key
 216 * @bitset:		bitset for the optional bitmasked wakeup
 217 *
 218 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
 219 * we can wake only the relevant ones (hashed queues may be shared).
 220 *
 221 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
 222 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
 223 * The order of wakeup is always to make the first condition true, then
 224 * the second.
 225 *
 226 * PI futexes are typically woken before they are removed from the hash list via
 227 * the rt_mutex code. See unqueue_me_pi().
 228 */
 229struct futex_q {
 230	struct plist_node list;
 231
 232	struct task_struct *task;
 233	spinlock_t *lock_ptr;
 234	union futex_key key;
 235	struct futex_pi_state *pi_state;
 236	struct rt_mutex_waiter *rt_waiter;
 237	union futex_key *requeue_pi_key;
 238	u32 bitset;
 239};
 240
 241static const struct futex_q futex_q_init = {
 242	/* list gets initialized in queue_me()*/
 243	.key = FUTEX_KEY_INIT,
 244	.bitset = FUTEX_BITSET_MATCH_ANY
 245};
 246
 247/*
 248 * Hash buckets are shared by all the futex_keys that hash to the same
 249 * location.  Each key may have multiple futex_q structures, one for each task
 250 * waiting on a futex.
 251 */
 252struct futex_hash_bucket {
 253	atomic_t waiters;
 254	spinlock_t lock;
 255	struct plist_head chain;
 256} ____cacheline_aligned_in_smp;
 257
 258static unsigned long __read_mostly futex_hashsize;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 259
 260static struct futex_hash_bucket *futex_queues;
 
 
 
 
 
 
 
 261
 262static inline void futex_get_mm(union futex_key *key)
 263{
 264	atomic_inc(&key->private.mm->mm_count);
 265	/*
 266	 * Ensure futex_get_mm() implies a full barrier such that
 267	 * get_futex_key() implies a full barrier. This is relied upon
 268	 * as full barrier (B), see the ordering comment above.
 269	 */
 270	smp_mb__after_atomic_inc();
 271}
 272
 273/*
 274 * Reflects a new waiter being added to the waitqueue.
 275 */
 276static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
 277{
 278#ifdef CONFIG_SMP
 279	atomic_inc(&hb->waiters);
 280	/*
 281	 * Full barrier (A), see the ordering comment above.
 282	 */
 283	smp_mb__after_atomic_inc();
 284#endif
 285}
 286
 287/*
 288 * Reflects a waiter being removed from the waitqueue by wakeup
 289 * paths.
 290 */
 291static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
 292{
 293#ifdef CONFIG_SMP
 294	atomic_dec(&hb->waiters);
 295#endif
 296}
 297
 298static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
 299{
 300#ifdef CONFIG_SMP
 301	return atomic_read(&hb->waiters);
 302#else
 303	return 1;
 304#endif
 305}
 306
 307/*
 308 * We hash on the keys returned from get_futex_key (see below).
 309 */
 310static struct futex_hash_bucket *hash_futex(union futex_key *key)
 311{
 312	u32 hash = jhash2((u32*)&key->both.word,
 313			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
 314			  key->both.offset);
 315	return &futex_queues[hash & (futex_hashsize - 1)];
 316}
 317
 318/*
 319 * Return 1 if two futex_keys are equal, 0 otherwise.
 320 */
 321static inline int match_futex(union futex_key *key1, union futex_key *key2)
 322{
 323	return (key1 && key2
 324		&& key1->both.word == key2->both.word
 325		&& key1->both.ptr == key2->both.ptr
 326		&& key1->both.offset == key2->both.offset);
 327}
 328
 329/*
 330 * Take a reference to the resource addressed by a key.
 331 * Can be called while holding spinlocks.
 332 *
 333 */
 334static void get_futex_key_refs(union futex_key *key)
 335{
 336	if (!key->both.ptr)
 337		return;
 338
 339	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
 340	case FUT_OFF_INODE:
 341		ihold(key->shared.inode); /* implies MB (B) */
 342		break;
 343	case FUT_OFF_MMSHARED:
 344		futex_get_mm(key); /* implies MB (B) */
 345		break;
 
 
 
 
 
 
 
 346	}
 347}
 348
 349/*
 350 * Drop a reference to the resource addressed by a key.
 351 * The hash bucket spinlock must not be held.
 
 
 352 */
 353static void drop_futex_key_refs(union futex_key *key)
 354{
 355	if (!key->both.ptr) {
 356		/* If we're here then we tried to put a key we failed to get */
 357		WARN_ON_ONCE(1);
 358		return;
 359	}
 360
 361	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
 362	case FUT_OFF_INODE:
 363		iput(key->shared.inode);
 364		break;
 365	case FUT_OFF_MMSHARED:
 366		mmdrop(key->private.mm);
 367		break;
 368	}
 369}
 370
 371/**
 372 * get_futex_key() - Get parameters which are the keys for a futex
 373 * @uaddr:	virtual address of the futex
 374 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 375 * @key:	address where result is stored.
 376 * @rw:		mapping needs to be read/write (values: VERIFY_READ,
 377 *              VERIFY_WRITE)
 378 *
 379 * Return: a negative error code or 0
 380 *
 381 * The key words are stored in *key on success.
 382 *
 383 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
 384 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 385 * We can usually work out the index without swapping in the page.
 386 *
 387 * lock_page() might sleep, the caller should not hold a spinlock.
 388 */
 389static int
 390get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 391{
 392	unsigned long address = (unsigned long)uaddr;
 393	struct mm_struct *mm = current->mm;
 394	struct page *page, *page_head;
 
 395	int err, ro = 0;
 396
 397	/*
 398	 * The futex address must be "naturally" aligned.
 399	 */
 400	key->both.offset = address % PAGE_SIZE;
 401	if (unlikely((address % sizeof(u32)) != 0))
 402		return -EINVAL;
 403	address -= key->both.offset;
 404
 405	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
 406		return -EFAULT;
 407
 
 
 
 408	/*
 409	 * PROCESS_PRIVATE futexes are fast.
 410	 * As the mm cannot disappear under us and the 'key' only needs
 411	 * virtual address, we dont even have to find the underlying vma.
 412	 * Note : We do have to check 'uaddr' is a valid user address,
 413	 *        but access_ok() should be faster than find_vma()
 414	 */
 415	if (!fshared) {
 416		key->private.mm = mm;
 417		key->private.address = address;
 418		get_futex_key_refs(key);  /* implies MB (B) */
 419		return 0;
 420	}
 421
 422again:
 
 
 
 
 423	err = get_user_pages_fast(address, 1, 1, &page);
 424	/*
 425	 * If write access is not required (eg. FUTEX_WAIT), try
 426	 * and get read-only access.
 427	 */
 428	if (err == -EFAULT && rw == VERIFY_READ) {
 429		err = get_user_pages_fast(address, 1, 0, &page);
 430		ro = 1;
 431	}
 432	if (err < 0)
 433		return err;
 434	else
 435		err = 0;
 436
 437#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 438	page_head = page;
 439	if (unlikely(PageTail(page))) {
 440		put_page(page);
 441		/* serialize against __split_huge_page_splitting() */
 442		local_irq_disable();
 443		if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
 444			page_head = compound_head(page);
 445			/*
 446			 * page_head is valid pointer but we must pin
 447			 * it before taking the PG_lock and/or
 448			 * PG_compound_lock. The moment we re-enable
 449			 * irqs __split_huge_page_splitting() can
 450			 * return and the head page can be freed from
 451			 * under us. We can't take the PG_lock and/or
 452			 * PG_compound_lock on a page that could be
 453			 * freed from under us.
 454			 */
 455			if (page != page_head) {
 456				get_page(page_head);
 457				put_page(page);
 458			}
 459			local_irq_enable();
 460		} else {
 461			local_irq_enable();
 462			goto again;
 463		}
 464	}
 465#else
 466	page_head = compound_head(page);
 467	if (page != page_head) {
 468		get_page(page_head);
 469		put_page(page);
 470	}
 471#endif
 472
 473	lock_page(page_head);
 474
 475	/*
 476	 * If page_head->mapping is NULL, then it cannot be a PageAnon
 477	 * page; but it might be the ZERO_PAGE or in the gate area or
 478	 * in a special mapping (all cases which we are happy to fail);
 479	 * or it may have been a good file page when get_user_pages_fast
 480	 * found it, but truncated or holepunched or subjected to
 481	 * invalidate_complete_page2 before we got the page lock (also
 482	 * cases which we are happy to fail).  And we hold a reference,
 483	 * so refcount care in invalidate_complete_page's remove_mapping
 484	 * prevents drop_caches from setting mapping to NULL beneath us.
 485	 *
 486	 * The case we do have to guard against is when memory pressure made
 487	 * shmem_writepage move it from filecache to swapcache beneath us:
 488	 * an unlikely race, but we do need to retry for page_head->mapping.
 489	 */
 490	if (!page_head->mapping) {
 491		int shmem_swizzled = PageSwapCache(page_head);
 492		unlock_page(page_head);
 493		put_page(page_head);
 
 
 
 
 
 
 
 
 
 494		if (shmem_swizzled)
 495			goto again;
 
 496		return -EFAULT;
 497	}
 498
 499	/*
 500	 * Private mappings are handled in a simple way.
 501	 *
 
 
 
 502	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
 503	 * it's a read-only handle, it's expected that futexes attach to
 504	 * the object not the particular process.
 505	 */
 506	if (PageAnon(page_head)) {
 507		/*
 508		 * A RO anonymous page will never change and thus doesn't make
 509		 * sense for futex operations.
 510		 */
 511		if (ro) {
 512			err = -EFAULT;
 513			goto out;
 514		}
 515
 516		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
 517		key->private.mm = mm;
 518		key->private.address = address;
 
 
 
 519	} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
 521		key->shared.inode = page_head->mapping->host;
 522		key->shared.pgoff = basepage_index(page);
 
 523	}
 524
 525	get_futex_key_refs(key); /* implies MB (B) */
 526
 527out:
 528	unlock_page(page_head);
 529	put_page(page_head);
 530	return err;
 531}
 532
 533static inline void put_futex_key(union futex_key *key)
 534{
 535	drop_futex_key_refs(key);
 536}
 537
 538/**
 539 * fault_in_user_writeable() - Fault in user address and verify RW access
 540 * @uaddr:	pointer to faulting user space address
 541 *
 542 * Slow path to fixup the fault we just took in the atomic write
 543 * access to @uaddr.
 544 *
 545 * We have no generic implementation of a non-destructive write to the
 546 * user address. We know that we faulted in the atomic pagefault
 547 * disabled section so we can as well avoid the #PF overhead by
 548 * calling get_user_pages() right away.
 549 */
 550static int fault_in_user_writeable(u32 __user *uaddr)
 551{
 552	struct mm_struct *mm = current->mm;
 553	int ret;
 554
 555	down_read(&mm->mmap_sem);
 556	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
 557			       FAULT_FLAG_WRITE);
 558	up_read(&mm->mmap_sem);
 559
 560	return ret < 0 ? ret : 0;
 561}
 562
 563/**
 564 * futex_top_waiter() - Return the highest priority waiter on a futex
 565 * @hb:		the hash bucket the futex_q's reside in
 566 * @key:	the futex key (to distinguish it from other futex futex_q's)
 567 *
 568 * Must be called with the hb lock held.
 569 */
 570static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
 571					union futex_key *key)
 572{
 573	struct futex_q *this;
 574
 575	plist_for_each_entry(this, &hb->chain, list) {
 576		if (match_futex(&this->key, key))
 577			return this;
 578	}
 579	return NULL;
 580}
 581
 582static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
 583				      u32 uval, u32 newval)
 584{
 585	int ret;
 586
 587	pagefault_disable();
 588	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
 589	pagefault_enable();
 590
 591	return ret;
 592}
 593
 594static int get_futex_value_locked(u32 *dest, u32 __user *from)
 595{
 596	int ret;
 597
 598	pagefault_disable();
 599	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
 600	pagefault_enable();
 601
 602	return ret ? -EFAULT : 0;
 603}
 604
 605
 606/*
 607 * PI code:
 608 */
 609static int refill_pi_state_cache(void)
 610{
 611	struct futex_pi_state *pi_state;
 612
 613	if (likely(current->pi_state_cache))
 614		return 0;
 615
 616	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
 617
 618	if (!pi_state)
 619		return -ENOMEM;
 620
 621	INIT_LIST_HEAD(&pi_state->list);
 622	/* pi_mutex gets initialized later */
 623	pi_state->owner = NULL;
 624	atomic_set(&pi_state->refcount, 1);
 625	pi_state->key = FUTEX_KEY_INIT;
 626
 627	current->pi_state_cache = pi_state;
 628
 629	return 0;
 630}
 631
 632static struct futex_pi_state * alloc_pi_state(void)
 633{
 634	struct futex_pi_state *pi_state = current->pi_state_cache;
 635
 636	WARN_ON(!pi_state);
 637	current->pi_state_cache = NULL;
 638
 639	return pi_state;
 640}
 641
 642static void free_pi_state(struct futex_pi_state *pi_state)
 
 
 
 
 
 
 643{
 
 
 
 644	if (!atomic_dec_and_test(&pi_state->refcount))
 645		return;
 646
 647	/*
 648	 * If pi_state->owner is NULL, the owner is most probably dying
 649	 * and has cleaned up the pi_state already
 650	 */
 651	if (pi_state->owner) {
 652		raw_spin_lock_irq(&pi_state->owner->pi_lock);
 653		list_del_init(&pi_state->list);
 654		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
 655
 656		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
 657	}
 658
 659	if (current->pi_state_cache)
 660		kfree(pi_state);
 661	else {
 662		/*
 663		 * pi_state->list is already empty.
 664		 * clear pi_state->owner.
 665		 * refcount is at 0 - put it back to 1.
 666		 */
 667		pi_state->owner = NULL;
 668		atomic_set(&pi_state->refcount, 1);
 669		current->pi_state_cache = pi_state;
 670	}
 671}
 672
 673/*
 674 * Look up the task based on what TID userspace gave us.
 675 * We dont trust it.
 676 */
 677static struct task_struct * futex_find_get_task(pid_t pid)
 678{
 679	struct task_struct *p;
 680
 681	rcu_read_lock();
 682	p = find_task_by_vpid(pid);
 683	if (p)
 684		get_task_struct(p);
 685
 686	rcu_read_unlock();
 687
 688	return p;
 689}
 690
 691/*
 692 * This task is holding PI mutexes at exit time => bad.
 693 * Kernel cleans up PI-state, but userspace is likely hosed.
 694 * (Robust-futex cleanup is separate and might save the day for userspace.)
 695 */
 696void exit_pi_state_list(struct task_struct *curr)
 697{
 698	struct list_head *next, *head = &curr->pi_state_list;
 699	struct futex_pi_state *pi_state;
 700	struct futex_hash_bucket *hb;
 701	union futex_key key = FUTEX_KEY_INIT;
 702
 703	if (!futex_cmpxchg_enabled)
 704		return;
 705	/*
 706	 * We are a ZOMBIE and nobody can enqueue itself on
 707	 * pi_state_list anymore, but we have to be careful
 708	 * versus waiters unqueueing themselves:
 709	 */
 710	raw_spin_lock_irq(&curr->pi_lock);
 711	while (!list_empty(head)) {
 712
 713		next = head->next;
 714		pi_state = list_entry(next, struct futex_pi_state, list);
 715		key = pi_state->key;
 716		hb = hash_futex(&key);
 717		raw_spin_unlock_irq(&curr->pi_lock);
 718
 719		spin_lock(&hb->lock);
 720
 721		raw_spin_lock_irq(&curr->pi_lock);
 722		/*
 723		 * We dropped the pi-lock, so re-check whether this
 724		 * task still owns the PI-state:
 725		 */
 726		if (head->next != next) {
 727			spin_unlock(&hb->lock);
 728			continue;
 729		}
 730
 731		WARN_ON(pi_state->owner != curr);
 732		WARN_ON(list_empty(&pi_state->list));
 733		list_del_init(&pi_state->list);
 734		pi_state->owner = NULL;
 735		raw_spin_unlock_irq(&curr->pi_lock);
 736
 737		rt_mutex_unlock(&pi_state->pi_mutex);
 738
 739		spin_unlock(&hb->lock);
 740
 741		raw_spin_lock_irq(&curr->pi_lock);
 742	}
 743	raw_spin_unlock_irq(&curr->pi_lock);
 744}
 745
 746/*
 747 * We need to check the following states:
 748 *
 749 *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
 750 *
 751 * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
 752 * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
 753 *
 754 * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
 755 *
 756 * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
 757 * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
 758 *
 759 * [6]  Found  | Found    | task      | 0         | 1      | Valid
 760 *
 761 * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
 762 *
 763 * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
 764 * [9]  Found  | Found    | task      | 0         | 0      | Invalid
 765 * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
 766 *
 767 * [1]	Indicates that the kernel can acquire the futex atomically. We
 768 *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
 769 *
 770 * [2]	Valid, if TID does not belong to a kernel thread. If no matching
 771 *      thread is found then it indicates that the owner TID has died.
 772 *
 773 * [3]	Invalid. The waiter is queued on a non PI futex
 774 *
 775 * [4]	Valid state after exit_robust_list(), which sets the user space
 776 *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
 777 *
 778 * [5]	The user space value got manipulated between exit_robust_list()
 779 *	and exit_pi_state_list()
 780 *
 781 * [6]	Valid state after exit_pi_state_list() which sets the new owner in
 782 *	the pi_state but cannot access the user space value.
 783 *
 784 * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
 785 *
 786 * [8]	Owner and user space value match
 787 *
 788 * [9]	There is no transient state which sets the user space TID to 0
 789 *	except exit_robust_list(), but this is indicated by the
 790 *	FUTEX_OWNER_DIED bit. See [4]
 791 *
 792 * [10] There is no transient state which leaves owner and user space
 793 *	TID out of sync.
 794 */
 795static int
 796lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
 797		union futex_key *key, struct futex_pi_state **ps)
 
 
 
 
 
 798{
 799	struct futex_pi_state *pi_state = NULL;
 800	struct futex_q *this, *next;
 801	struct task_struct *p;
 802	pid_t pid = uval & FUTEX_TID_MASK;
 803
 804	plist_for_each_entry_safe(this, next, &hb->chain, list) {
 805		if (match_futex(&this->key, key)) {
 806			/*
 807			 * Sanity check the waiter before increasing
 808			 * the refcount and attaching to it.
 809			 */
 810			pi_state = this->pi_state;
 811			/*
 812			 * Userspace might have messed up non-PI and
 813			 * PI futexes [3]
 814			 */
 815			if (unlikely(!pi_state))
 816				return -EINVAL;
 817
 818			WARN_ON(!atomic_read(&pi_state->refcount));
 819
 
 
 
 
 
 
 
 
 
 
 820			/*
 821			 * Handle the owner died case:
 
 822			 */
 823			if (uval & FUTEX_OWNER_DIED) {
 824				/*
 825				 * exit_pi_state_list sets owner to NULL and
 826				 * wakes the topmost waiter. The task which
 827				 * acquires the pi_state->rt_mutex will fixup
 828				 * owner.
 829				 */
 830				if (!pi_state->owner) {
 831					/*
 832					 * No pi state owner, but the user
 833					 * space TID is not 0. Inconsistent
 834					 * state. [5]
 835					 */
 836					if (pid)
 837						return -EINVAL;
 838					/*
 839					 * Take a ref on the state and
 840					 * return. [4]
 841					 */
 842					goto out_state;
 843				}
 844
 845				/*
 846				 * If TID is 0, then either the dying owner
 847				 * has not yet executed exit_pi_state_list()
 848				 * or some waiter acquired the rtmutex in the
 849				 * pi state, but did not yet fixup the TID in
 850				 * user space.
 851				 *
 852				 * Take a ref on the state and return. [6]
 853				 */
 854				if (!pid)
 855					goto out_state;
 856			} else {
 857				/*
 858				 * If the owner died bit is not set,
 859				 * then the pi_state must have an
 860				 * owner. [7]
 861				 */
 862				if (!pi_state->owner)
 863					return -EINVAL;
 864			}
 865
 866			/*
 867			 * Bail out if user space manipulated the
 868			 * futex value. If pi state exists then the
 869			 * owner TID must be the same as the user
 870			 * space TID. [9/10]
 871			 */
 872			if (pid != task_pid_vnr(pi_state->owner))
 873				return -EINVAL;
 874
 875		out_state:
 876			atomic_inc(&pi_state->refcount);
 877			*ps = pi_state;
 878			return 0;
 879		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 880	}
 881
 882	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 883	 * We are the first waiter - try to look up the real owner and attach
 884	 * the new pi_state to it, but bail out when TID = 0 [1]
 885	 */
 886	if (!pid)
 887		return -ESRCH;
 888	p = futex_find_get_task(pid);
 889	if (!p)
 890		return -ESRCH;
 891
 892	if (!p->mm) {
 893		put_task_struct(p);
 894		return -EPERM;
 895	}
 896
 897	/*
 898	 * We need to look at the task state flags to figure out,
 899	 * whether the task is exiting. To protect against the do_exit
 900	 * change of the task flags, we do this protected by
 901	 * p->pi_lock:
 902	 */
 903	raw_spin_lock_irq(&p->pi_lock);
 904	if (unlikely(p->flags & PF_EXITING)) {
 905		/*
 906		 * The task is on the way out. When PF_EXITPIDONE is
 907		 * set, we know that the task has finished the
 908		 * cleanup:
 909		 */
 910		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
 911
 912		raw_spin_unlock_irq(&p->pi_lock);
 913		put_task_struct(p);
 914		return ret;
 915	}
 916
 917	/*
 918	 * No existing pi state. First waiter. [2]
 919	 */
 920	pi_state = alloc_pi_state();
 921
 922	/*
 923	 * Initialize the pi_mutex in locked state and make 'p'
 924	 * the owner of it:
 925	 */
 926	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
 927
 928	/* Store the key for possible exit cleanups: */
 929	pi_state->key = *key;
 930
 931	WARN_ON(!list_empty(&pi_state->list));
 932	list_add(&pi_state->list, &p->pi_state_list);
 933	pi_state->owner = p;
 934	raw_spin_unlock_irq(&p->pi_lock);
 935
 936	put_task_struct(p);
 937
 938	*ps = pi_state;
 939
 940	return 0;
 941}
 942
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 943/**
 944 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
 945 * @uaddr:		the pi futex user address
 946 * @hb:			the pi futex hash bucket
 947 * @key:		the futex key associated with uaddr and hb
 948 * @ps:			the pi_state pointer where we store the result of the
 949 *			lookup
 950 * @task:		the task to perform the atomic lock work for.  This will
 951 *			be "current" except in the case of requeue pi.
 952 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
 953 *
 954 * Return:
 955 *  0 - ready to wait;
 956 *  1 - acquired the lock;
 957 * <0 - error
 958 *
 959 * The hb->lock and futex_key refs shall be held by the caller.
 960 */
 961static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
 962				union futex_key *key,
 963				struct futex_pi_state **ps,
 964				struct task_struct *task, int set_waiters)
 965{
 966	int lock_taken, ret, force_take = 0;
 967	u32 uval, newval, curval, vpid = task_pid_vnr(task);
 968
 969retry:
 970	ret = lock_taken = 0;
 971
 972	/*
 973	 * To avoid races, we attempt to take the lock here again
 974	 * (by doing a 0 -> TID atomic cmpxchg), while holding all
 975	 * the locks. It will most likely not succeed.
 976	 */
 977	newval = vpid;
 978	if (set_waiters)
 979		newval |= FUTEX_WAITERS;
 980
 981	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
 982		return -EFAULT;
 983
 984	/*
 985	 * Detect deadlocks.
 986	 */
 987	if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
 988		return -EDEADLK;
 989
 990	/*
 991	 * Surprise - we got the lock, but we do not trust user space at all.
 992	 */
 993	if (unlikely(!curval)) {
 994		/*
 995		 * We verify whether there is kernel state for this
 996		 * futex. If not, we can safely assume, that the 0 ->
 997		 * TID transition is correct. If state exists, we do
 998		 * not bother to fixup the user space state as it was
 999		 * corrupted already.
1000		 */
1001		return futex_top_waiter(hb, key) ? -EINVAL : 1;
1002	}
1003
1004	uval = curval;
1005
1006	/*
1007	 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
1008	 * to wake at the next unlock.
1009	 */
1010	newval = curval | FUTEX_WAITERS;
 
 
1011
1012	/*
1013	 * Should we force take the futex? See below.
 
 
 
1014	 */
1015	if (unlikely(force_take)) {
1016		/*
1017		 * Keep the OWNER_DIED and the WAITERS bit and set the
1018		 * new TID value.
1019		 */
1020		newval = (curval & ~FUTEX_TID_MASK) | vpid;
1021		force_take = 0;
1022		lock_taken = 1;
1023	}
1024
1025	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1026		return -EFAULT;
1027	if (unlikely(curval != uval))
1028		goto retry;
 
 
 
 
1029
1030	/*
1031	 * We took the lock due to forced take over.
 
 
1032	 */
1033	if (unlikely(lock_taken))
1034		return 1;
1035
 
1036	/*
1037	 * We dont have the lock. Look up the PI state (or create it if
1038	 * we are the first waiter):
 
1039	 */
1040	ret = lookup_pi_state(uval, hb, key, ps);
1041
1042	if (unlikely(ret)) {
1043		switch (ret) {
1044		case -ESRCH:
1045			/*
1046			 * We failed to find an owner for this
1047			 * futex. So we have no pi_state to block
1048			 * on. This can happen in two cases:
1049			 *
1050			 * 1) The owner died
1051			 * 2) A stale FUTEX_WAITERS bit
1052			 *
1053			 * Re-read the futex value.
1054			 */
1055			if (get_futex_value_locked(&curval, uaddr))
1056				return -EFAULT;
1057
1058			/*
1059			 * If the owner died or we have a stale
1060			 * WAITERS bit the owner TID in the user space
1061			 * futex is 0.
1062			 */
1063			if (!(curval & FUTEX_TID_MASK)) {
1064				force_take = 1;
1065				goto retry;
1066			}
1067		default:
1068			break;
1069		}
1070	}
1071
1072	return ret;
1073}
1074
1075/**
1076 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1077 * @q:	The futex_q to unqueue
1078 *
1079 * The q->lock_ptr must not be NULL and must be held by the caller.
1080 */
1081static void __unqueue_futex(struct futex_q *q)
1082{
1083	struct futex_hash_bucket *hb;
1084
1085	if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1086	    || WARN_ON(plist_node_empty(&q->list)))
1087		return;
1088
1089	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1090	plist_del(&q->list, &hb->chain);
1091	hb_waiters_dec(hb);
1092}
1093
1094/*
1095 * The hash bucket lock must be held when this is called.
1096 * Afterwards, the futex_q must not be accessed.
 
 
1097 */
1098static void wake_futex(struct futex_q *q)
1099{
1100	struct task_struct *p = q->task;
1101
1102	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1103		return;
1104
1105	/*
1106	 * We set q->lock_ptr = NULL _before_ we wake up the task. If
1107	 * a non-futex wake up happens on another CPU then the task
1108	 * might exit and p would dereference a non-existing task
1109	 * struct. Prevent this by holding a reference on p across the
1110	 * wake up.
1111	 */
1112	get_task_struct(p);
1113
1114	__unqueue_futex(q);
1115	/*
1116	 * The waiting task can free the futex_q as soon as
1117	 * q->lock_ptr = NULL is written, without taking any locks. A
1118	 * memory barrier is required here to prevent the following
1119	 * store to lock_ptr from getting ahead of the plist_del.
1120	 */
1121	smp_wmb();
1122	q->lock_ptr = NULL;
1123
1124	wake_up_state(p, TASK_NORMAL);
1125	put_task_struct(p);
1126}
1127
1128static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
 
1129{
1130	struct task_struct *new_owner;
1131	struct futex_pi_state *pi_state = this->pi_state;
1132	u32 uninitialized_var(curval), newval;
 
 
1133	int ret = 0;
1134
1135	if (!pi_state)
1136		return -EINVAL;
1137
1138	/*
1139	 * If current does not own the pi_state then the futex is
1140	 * inconsistent and user space fiddled with the futex value.
1141	 */
1142	if (pi_state->owner != current)
1143		return -EINVAL;
1144
1145	raw_spin_lock(&pi_state->pi_mutex.wait_lock);
1146	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1147
1148	/*
1149	 * It is possible that the next waiter (the one that brought
1150	 * this owner to the kernel) timed out and is no longer
1151	 * waiting on the lock.
1152	 */
1153	if (!new_owner)
1154		new_owner = this->task;
1155
1156	/*
1157	 * We pass it to the next owner. The WAITERS bit is always
1158	 * kept enabled while there is PI state around. We cleanup the
1159	 * owner died bit, because we are the owner.
1160	 */
1161	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1162
1163	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1164		ret = -EFAULT;
1165	else if (curval != uval)
1166		ret = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
1167	if (ret) {
1168		raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1169		return ret;
1170	}
1171
1172	raw_spin_lock_irq(&pi_state->owner->pi_lock);
1173	WARN_ON(list_empty(&pi_state->list));
1174	list_del_init(&pi_state->list);
1175	raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1176
1177	raw_spin_lock_irq(&new_owner->pi_lock);
1178	WARN_ON(!list_empty(&pi_state->list));
1179	list_add(&pi_state->list, &new_owner->pi_state_list);
1180	pi_state->owner = new_owner;
1181	raw_spin_unlock_irq(&new_owner->pi_lock);
1182
1183	raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1184	rt_mutex_unlock(&pi_state->pi_mutex);
1185
1186	return 0;
1187}
1188
1189static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
1190{
1191	u32 uninitialized_var(oldval);
1192
1193	/*
1194	 * There is no waiter, so we unlock the futex. The owner died
1195	 * bit has not to be preserved here. We are the owner:
 
 
1196	 */
1197	if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
1198		return -EFAULT;
1199	if (oldval != uval)
1200		return -EAGAIN;
1201
1202	return 0;
1203}
1204
1205/*
1206 * Express the locking dependencies for lockdep:
1207 */
1208static inline void
1209double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1210{
1211	if (hb1 <= hb2) {
1212		spin_lock(&hb1->lock);
1213		if (hb1 < hb2)
1214			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1215	} else { /* hb1 > hb2 */
1216		spin_lock(&hb2->lock);
1217		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1218	}
1219}
1220
1221static inline void
1222double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1223{
1224	spin_unlock(&hb1->lock);
1225	if (hb1 != hb2)
1226		spin_unlock(&hb2->lock);
1227}
1228
1229/*
1230 * Wake up waiters matching bitset queued on this futex (uaddr).
1231 */
1232static int
1233futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1234{
1235	struct futex_hash_bucket *hb;
1236	struct futex_q *this, *next;
1237	union futex_key key = FUTEX_KEY_INIT;
1238	int ret;
 
1239
1240	if (!bitset)
1241		return -EINVAL;
1242
1243	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1244	if (unlikely(ret != 0))
1245		goto out;
1246
1247	hb = hash_futex(&key);
1248
1249	/* Make sure we really have tasks to wakeup */
1250	if (!hb_waiters_pending(hb))
1251		goto out_put_key;
1252
1253	spin_lock(&hb->lock);
1254
1255	plist_for_each_entry_safe(this, next, &hb->chain, list) {
1256		if (match_futex (&this->key, &key)) {
1257			if (this->pi_state || this->rt_waiter) {
1258				ret = -EINVAL;
1259				break;
1260			}
1261
1262			/* Check if one of the bits is set in both bitsets */
1263			if (!(this->bitset & bitset))
1264				continue;
1265
1266			wake_futex(this);
1267			if (++ret >= nr_wake)
1268				break;
1269		}
1270	}
1271
1272	spin_unlock(&hb->lock);
 
1273out_put_key:
1274	put_futex_key(&key);
1275out:
1276	return ret;
1277}
1278
1279/*
1280 * Wake up all waiters hashed on the physical page that is mapped
1281 * to this virtual address:
1282 */
1283static int
1284futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1285	      int nr_wake, int nr_wake2, int op)
1286{
1287	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1288	struct futex_hash_bucket *hb1, *hb2;
1289	struct futex_q *this, *next;
1290	int ret, op_ret;
 
1291
1292retry:
1293	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1294	if (unlikely(ret != 0))
1295		goto out;
1296	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1297	if (unlikely(ret != 0))
1298		goto out_put_key1;
1299
1300	hb1 = hash_futex(&key1);
1301	hb2 = hash_futex(&key2);
1302
1303retry_private:
1304	double_lock_hb(hb1, hb2);
1305	op_ret = futex_atomic_op_inuser(op, uaddr2);
1306	if (unlikely(op_ret < 0)) {
1307
1308		double_unlock_hb(hb1, hb2);
1309
1310#ifndef CONFIG_MMU
1311		/*
1312		 * we don't get EFAULT from MMU faults if we don't have an MMU,
1313		 * but we might get them from range checking
1314		 */
1315		ret = op_ret;
1316		goto out_put_keys;
1317#endif
1318
1319		if (unlikely(op_ret != -EFAULT)) {
1320			ret = op_ret;
1321			goto out_put_keys;
1322		}
1323
1324		ret = fault_in_user_writeable(uaddr2);
1325		if (ret)
1326			goto out_put_keys;
1327
1328		if (!(flags & FLAGS_SHARED))
1329			goto retry_private;
1330
1331		put_futex_key(&key2);
1332		put_futex_key(&key1);
1333		goto retry;
1334	}
1335
1336	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1337		if (match_futex (&this->key, &key1)) {
1338			if (this->pi_state || this->rt_waiter) {
1339				ret = -EINVAL;
1340				goto out_unlock;
1341			}
1342			wake_futex(this);
1343			if (++ret >= nr_wake)
1344				break;
1345		}
1346	}
1347
1348	if (op_ret > 0) {
1349		op_ret = 0;
1350		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1351			if (match_futex (&this->key, &key2)) {
1352				if (this->pi_state || this->rt_waiter) {
1353					ret = -EINVAL;
1354					goto out_unlock;
1355				}
1356				wake_futex(this);
1357				if (++op_ret >= nr_wake2)
1358					break;
1359			}
1360		}
1361		ret += op_ret;
1362	}
1363
1364out_unlock:
1365	double_unlock_hb(hb1, hb2);
 
1366out_put_keys:
1367	put_futex_key(&key2);
1368out_put_key1:
1369	put_futex_key(&key1);
1370out:
1371	return ret;
1372}
1373
1374/**
1375 * requeue_futex() - Requeue a futex_q from one hb to another
1376 * @q:		the futex_q to requeue
1377 * @hb1:	the source hash_bucket
1378 * @hb2:	the target hash_bucket
1379 * @key2:	the new key for the requeued futex_q
1380 */
1381static inline
1382void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1383		   struct futex_hash_bucket *hb2, union futex_key *key2)
1384{
1385
1386	/*
1387	 * If key1 and key2 hash to the same bucket, no need to
1388	 * requeue.
1389	 */
1390	if (likely(&hb1->chain != &hb2->chain)) {
1391		plist_del(&q->list, &hb1->chain);
1392		hb_waiters_dec(hb1);
1393		plist_add(&q->list, &hb2->chain);
1394		hb_waiters_inc(hb2);
 
1395		q->lock_ptr = &hb2->lock;
1396	}
1397	get_futex_key_refs(key2);
1398	q->key = *key2;
1399}
1400
1401/**
1402 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1403 * @q:		the futex_q
1404 * @key:	the key of the requeue target futex
1405 * @hb:		the hash_bucket of the requeue target futex
1406 *
1407 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1408 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1409 * to the requeue target futex so the waiter can detect the wakeup on the right
1410 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1411 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1412 * to protect access to the pi_state to fixup the owner later.  Must be called
1413 * with both q->lock_ptr and hb->lock held.
1414 */
1415static inline
1416void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1417			   struct futex_hash_bucket *hb)
1418{
1419	get_futex_key_refs(key);
1420	q->key = *key;
1421
1422	__unqueue_futex(q);
1423
1424	WARN_ON(!q->rt_waiter);
1425	q->rt_waiter = NULL;
1426
1427	q->lock_ptr = &hb->lock;
1428
1429	wake_up_state(q->task, TASK_NORMAL);
1430}
1431
1432/**
1433 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1434 * @pifutex:		the user address of the to futex
1435 * @hb1:		the from futex hash bucket, must be locked by the caller
1436 * @hb2:		the to futex hash bucket, must be locked by the caller
1437 * @key1:		the from futex key
1438 * @key2:		the to futex key
1439 * @ps:			address to store the pi_state pointer
1440 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1441 *
1442 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1443 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1444 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1445 * hb1 and hb2 must be held by the caller.
1446 *
1447 * Return:
1448 *  0 - failed to acquire the lock atomically;
1449 * >0 - acquired the lock, return value is vpid of the top_waiter
1450 * <0 - error
1451 */
1452static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1453				 struct futex_hash_bucket *hb1,
1454				 struct futex_hash_bucket *hb2,
1455				 union futex_key *key1, union futex_key *key2,
1456				 struct futex_pi_state **ps, int set_waiters)
1457{
1458	struct futex_q *top_waiter = NULL;
1459	u32 curval;
1460	int ret, vpid;
1461
1462	if (get_futex_value_locked(&curval, pifutex))
1463		return -EFAULT;
1464
 
 
 
1465	/*
1466	 * Find the top_waiter and determine if there are additional waiters.
1467	 * If the caller intends to requeue more than 1 waiter to pifutex,
1468	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1469	 * as we have means to handle the possible fault.  If not, don't set
1470	 * the bit unecessarily as it will force the subsequent unlock to enter
1471	 * the kernel.
1472	 */
1473	top_waiter = futex_top_waiter(hb1, key1);
1474
1475	/* There are no waiters, nothing for us to do. */
1476	if (!top_waiter)
1477		return 0;
1478
1479	/* Ensure we requeue to the expected futex. */
1480	if (!match_futex(top_waiter->requeue_pi_key, key2))
1481		return -EINVAL;
1482
1483	/*
1484	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1485	 * the contended case or if set_waiters is 1.  The pi_state is returned
1486	 * in ps in contended cases.
1487	 */
1488	vpid = task_pid_vnr(top_waiter->task);
1489	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1490				   set_waiters);
1491	if (ret == 1) {
1492		requeue_pi_wake_futex(top_waiter, key2, hb2);
1493		return vpid;
1494	}
1495	return ret;
1496}
1497
1498/**
1499 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1500 * @uaddr1:	source futex user address
1501 * @flags:	futex flags (FLAGS_SHARED, etc.)
1502 * @uaddr2:	target futex user address
1503 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
1504 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
1505 * @cmpval:	@uaddr1 expected value (or %NULL)
1506 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1507 *		pi futex (pi to pi requeue is not supported)
1508 *
1509 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1510 * uaddr2 atomically on behalf of the top waiter.
1511 *
1512 * Return:
1513 * >=0 - on success, the number of tasks requeued or woken;
1514 *  <0 - on error
1515 */
1516static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1517			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1518			 u32 *cmpval, int requeue_pi)
1519{
1520	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1521	int drop_count = 0, task_count = 0, ret;
1522	struct futex_pi_state *pi_state = NULL;
1523	struct futex_hash_bucket *hb1, *hb2;
1524	struct futex_q *this, *next;
 
1525
1526	if (requeue_pi) {
1527		/*
1528		 * Requeue PI only works on two distinct uaddrs. This
1529		 * check is only valid for private futexes. See below.
1530		 */
1531		if (uaddr1 == uaddr2)
1532			return -EINVAL;
1533
1534		/*
1535		 * requeue_pi requires a pi_state, try to allocate it now
1536		 * without any locks in case it fails.
1537		 */
1538		if (refill_pi_state_cache())
1539			return -ENOMEM;
1540		/*
1541		 * requeue_pi must wake as many tasks as it can, up to nr_wake
1542		 * + nr_requeue, since it acquires the rt_mutex prior to
1543		 * returning to userspace, so as to not leave the rt_mutex with
1544		 * waiters and no owner.  However, second and third wake-ups
1545		 * cannot be predicted as they involve race conditions with the
1546		 * first wake and a fault while looking up the pi_state.  Both
1547		 * pthread_cond_signal() and pthread_cond_broadcast() should
1548		 * use nr_wake=1.
1549		 */
1550		if (nr_wake != 1)
1551			return -EINVAL;
1552	}
1553
1554retry:
1555	if (pi_state != NULL) {
1556		/*
1557		 * We will have to lookup the pi_state again, so free this one
1558		 * to keep the accounting correct.
1559		 */
1560		free_pi_state(pi_state);
1561		pi_state = NULL;
1562	}
1563
1564	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1565	if (unlikely(ret != 0))
1566		goto out;
1567	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1568			    requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1569	if (unlikely(ret != 0))
1570		goto out_put_key1;
1571
1572	/*
1573	 * The check above which compares uaddrs is not sufficient for
1574	 * shared futexes. We need to compare the keys:
1575	 */
1576	if (requeue_pi && match_futex(&key1, &key2)) {
1577		ret = -EINVAL;
1578		goto out_put_keys;
1579	}
1580
1581	hb1 = hash_futex(&key1);
1582	hb2 = hash_futex(&key2);
1583
1584retry_private:
1585	hb_waiters_inc(hb2);
1586	double_lock_hb(hb1, hb2);
1587
1588	if (likely(cmpval != NULL)) {
1589		u32 curval;
1590
1591		ret = get_futex_value_locked(&curval, uaddr1);
1592
1593		if (unlikely(ret)) {
1594			double_unlock_hb(hb1, hb2);
1595			hb_waiters_dec(hb2);
1596
1597			ret = get_user(curval, uaddr1);
1598			if (ret)
1599				goto out_put_keys;
1600
1601			if (!(flags & FLAGS_SHARED))
1602				goto retry_private;
1603
1604			put_futex_key(&key2);
1605			put_futex_key(&key1);
1606			goto retry;
1607		}
1608		if (curval != *cmpval) {
1609			ret = -EAGAIN;
1610			goto out_unlock;
1611		}
1612	}
1613
1614	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1615		/*
1616		 * Attempt to acquire uaddr2 and wake the top waiter. If we
1617		 * intend to requeue waiters, force setting the FUTEX_WAITERS
1618		 * bit.  We force this here where we are able to easily handle
1619		 * faults rather in the requeue loop below.
1620		 */
1621		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1622						 &key2, &pi_state, nr_requeue);
1623
1624		/*
1625		 * At this point the top_waiter has either taken uaddr2 or is
1626		 * waiting on it.  If the former, then the pi_state will not
1627		 * exist yet, look it up one more time to ensure we have a
1628		 * reference to it. If the lock was taken, ret contains the
1629		 * vpid of the top waiter task.
 
 
1630		 */
1631		if (ret > 0) {
1632			WARN_ON(pi_state);
1633			drop_count++;
1634			task_count++;
1635			/*
1636			 * If we acquired the lock, then the user
1637			 * space value of uaddr2 should be vpid. It
1638			 * cannot be changed by the top waiter as it
1639			 * is blocked on hb2 lock if it tries to do
1640			 * so. If something fiddled with it behind our
1641			 * back the pi state lookup might unearth
1642			 * it. So we rather use the known value than
1643			 * rereading and handing potential crap to
1644			 * lookup_pi_state.
 
1645			 */
1646			ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1647		}
1648
1649		switch (ret) {
1650		case 0:
 
1651			break;
 
 
1652		case -EFAULT:
1653			double_unlock_hb(hb1, hb2);
1654			hb_waiters_dec(hb2);
1655			put_futex_key(&key2);
1656			put_futex_key(&key1);
1657			ret = fault_in_user_writeable(uaddr2);
1658			if (!ret)
1659				goto retry;
1660			goto out;
1661		case -EAGAIN:
1662			/* The owner was exiting, try again. */
 
 
 
 
 
1663			double_unlock_hb(hb1, hb2);
1664			hb_waiters_dec(hb2);
1665			put_futex_key(&key2);
1666			put_futex_key(&key1);
1667			cond_resched();
1668			goto retry;
1669		default:
1670			goto out_unlock;
1671		}
1672	}
1673
1674	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1675		if (task_count - nr_wake >= nr_requeue)
1676			break;
1677
1678		if (!match_futex(&this->key, &key1))
1679			continue;
1680
1681		/*
1682		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1683		 * be paired with each other and no other futex ops.
1684		 *
1685		 * We should never be requeueing a futex_q with a pi_state,
1686		 * which is awaiting a futex_unlock_pi().
1687		 */
1688		if ((requeue_pi && !this->rt_waiter) ||
1689		    (!requeue_pi && this->rt_waiter) ||
1690		    this->pi_state) {
1691			ret = -EINVAL;
1692			break;
1693		}
1694
1695		/*
1696		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1697		 * lock, we already woke the top_waiter.  If not, it will be
1698		 * woken by futex_unlock_pi().
1699		 */
1700		if (++task_count <= nr_wake && !requeue_pi) {
1701			wake_futex(this);
1702			continue;
1703		}
1704
1705		/* Ensure we requeue to the expected futex for requeue_pi. */
1706		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1707			ret = -EINVAL;
1708			break;
1709		}
1710
1711		/*
1712		 * Requeue nr_requeue waiters and possibly one more in the case
1713		 * of requeue_pi if we couldn't acquire the lock atomically.
1714		 */
1715		if (requeue_pi) {
1716			/* Prepare the waiter to take the rt_mutex. */
 
 
 
 
1717			atomic_inc(&pi_state->refcount);
1718			this->pi_state = pi_state;
1719			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1720							this->rt_waiter,
1721							this->task, 1);
1722			if (ret == 1) {
1723				/* We got the lock. */
 
 
 
 
 
 
 
1724				requeue_pi_wake_futex(this, &key2, hb2);
1725				drop_count++;
1726				continue;
1727			} else if (ret) {
1728				/* -EDEADLK */
 
 
 
 
 
 
 
1729				this->pi_state = NULL;
1730				free_pi_state(pi_state);
1731				goto out_unlock;
 
 
 
 
1732			}
1733		}
1734		requeue_futex(this, hb1, hb2, &key2);
1735		drop_count++;
1736	}
1737
 
 
 
 
 
 
 
1738out_unlock:
1739	double_unlock_hb(hb1, hb2);
 
1740	hb_waiters_dec(hb2);
1741
1742	/*
1743	 * drop_futex_key_refs() must be called outside the spinlocks. During
1744	 * the requeue we moved futex_q's from the hash bucket at key1 to the
1745	 * one at key2 and updated their key pointer.  We no longer need to
1746	 * hold the references to key1.
1747	 */
1748	while (--drop_count >= 0)
1749		drop_futex_key_refs(&key1);
1750
1751out_put_keys:
1752	put_futex_key(&key2);
1753out_put_key1:
1754	put_futex_key(&key1);
1755out:
1756	if (pi_state != NULL)
1757		free_pi_state(pi_state);
1758	return ret ? ret : task_count;
1759}
1760
1761/* The key must be already stored in q->key. */
1762static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1763	__acquires(&hb->lock)
1764{
1765	struct futex_hash_bucket *hb;
1766
1767	hb = hash_futex(&q->key);
1768
1769	/*
1770	 * Increment the counter before taking the lock so that
1771	 * a potential waker won't miss a to-be-slept task that is
1772	 * waiting for the spinlock. This is safe as all queue_lock()
1773	 * users end up calling queue_me(). Similarly, for housekeeping,
1774	 * decrement the counter at queue_unlock() when some error has
1775	 * occurred and we don't end up adding the task to the list.
1776	 */
1777	hb_waiters_inc(hb);
1778
1779	q->lock_ptr = &hb->lock;
1780
1781	spin_lock(&hb->lock); /* implies MB (A) */
1782	return hb;
1783}
1784
1785static inline void
1786queue_unlock(struct futex_hash_bucket *hb)
1787	__releases(&hb->lock)
1788{
1789	spin_unlock(&hb->lock);
1790	hb_waiters_dec(hb);
1791}
1792
1793/**
1794 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1795 * @q:	The futex_q to enqueue
1796 * @hb:	The destination hash bucket
1797 *
1798 * The hb->lock must be held by the caller, and is released here. A call to
1799 * queue_me() is typically paired with exactly one call to unqueue_me().  The
1800 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1801 * or nothing if the unqueue is done as part of the wake process and the unqueue
1802 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1803 * an example).
1804 */
1805static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1806	__releases(&hb->lock)
1807{
1808	int prio;
1809
1810	/*
1811	 * The priority used to register this element is
1812	 * - either the real thread-priority for the real-time threads
1813	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1814	 * - or MAX_RT_PRIO for non-RT threads.
1815	 * Thus, all RT-threads are woken first in priority order, and
1816	 * the others are woken last, in FIFO order.
1817	 */
1818	prio = min(current->normal_prio, MAX_RT_PRIO);
1819
1820	plist_node_init(&q->list, prio);
1821	plist_add(&q->list, &hb->chain);
1822	q->task = current;
1823	spin_unlock(&hb->lock);
1824}
1825
1826/**
1827 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1828 * @q:	The futex_q to unqueue
1829 *
1830 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1831 * be paired with exactly one earlier call to queue_me().
1832 *
1833 * Return:
1834 *   1 - if the futex_q was still queued (and we removed unqueued it);
1835 *   0 - if the futex_q was already removed by the waking thread
1836 */
1837static int unqueue_me(struct futex_q *q)
1838{
1839	spinlock_t *lock_ptr;
1840	int ret = 0;
1841
1842	/* In the common case we don't take the spinlock, which is nice. */
1843retry:
1844	lock_ptr = q->lock_ptr;
1845	barrier();
 
 
 
 
1846	if (lock_ptr != NULL) {
1847		spin_lock(lock_ptr);
1848		/*
1849		 * q->lock_ptr can change between reading it and
1850		 * spin_lock(), causing us to take the wrong lock.  This
1851		 * corrects the race condition.
1852		 *
1853		 * Reasoning goes like this: if we have the wrong lock,
1854		 * q->lock_ptr must have changed (maybe several times)
1855		 * between reading it and the spin_lock().  It can
1856		 * change again after the spin_lock() but only if it was
1857		 * already changed before the spin_lock().  It cannot,
1858		 * however, change back to the original value.  Therefore
1859		 * we can detect whether we acquired the correct lock.
1860		 */
1861		if (unlikely(lock_ptr != q->lock_ptr)) {
1862			spin_unlock(lock_ptr);
1863			goto retry;
1864		}
1865		__unqueue_futex(q);
1866
1867		BUG_ON(q->pi_state);
1868
1869		spin_unlock(lock_ptr);
1870		ret = 1;
1871	}
1872
1873	drop_futex_key_refs(&q->key);
1874	return ret;
1875}
1876
1877/*
1878 * PI futexes can not be requeued and must remove themself from the
1879 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1880 * and dropped here.
1881 */
1882static void unqueue_me_pi(struct futex_q *q)
1883	__releases(q->lock_ptr)
1884{
1885	__unqueue_futex(q);
1886
1887	BUG_ON(!q->pi_state);
1888	free_pi_state(q->pi_state);
1889	q->pi_state = NULL;
1890
1891	spin_unlock(q->lock_ptr);
1892}
1893
1894/*
1895 * Fixup the pi_state owner with the new owner.
1896 *
1897 * Must be called with hash bucket lock held and mm->sem held for non
1898 * private futexes.
1899 */
1900static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1901				struct task_struct *newowner)
1902{
1903	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1904	struct futex_pi_state *pi_state = q->pi_state;
1905	struct task_struct *oldowner = pi_state->owner;
1906	u32 uval, uninitialized_var(curval), newval;
1907	int ret;
1908
1909	/* Owner died? */
1910	if (!pi_state->owner)
1911		newtid |= FUTEX_OWNER_DIED;
1912
1913	/*
1914	 * We are here either because we stole the rtmutex from the
1915	 * previous highest priority waiter or we are the highest priority
1916	 * waiter but failed to get the rtmutex the first time.
1917	 * We have to replace the newowner TID in the user space variable.
1918	 * This must be atomic as we have to preserve the owner died bit here.
1919	 *
1920	 * Note: We write the user space value _before_ changing the pi_state
1921	 * because we can fault here. Imagine swapped out pages or a fork
1922	 * that marked all the anonymous memory readonly for cow.
1923	 *
1924	 * Modifying pi_state _before_ the user space value would
1925	 * leave the pi_state in an inconsistent state when we fault
1926	 * here, because we need to drop the hash bucket lock to
1927	 * handle the fault. This might be observed in the PID check
1928	 * in lookup_pi_state.
1929	 */
1930retry:
1931	if (get_futex_value_locked(&uval, uaddr))
1932		goto handle_fault;
1933
1934	while (1) {
1935		newval = (uval & FUTEX_OWNER_DIED) | newtid;
1936
1937		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1938			goto handle_fault;
1939		if (curval == uval)
1940			break;
1941		uval = curval;
1942	}
1943
1944	/*
1945	 * We fixed up user space. Now we need to fix the pi_state
1946	 * itself.
1947	 */
1948	if (pi_state->owner != NULL) {
1949		raw_spin_lock_irq(&pi_state->owner->pi_lock);
1950		WARN_ON(list_empty(&pi_state->list));
1951		list_del_init(&pi_state->list);
1952		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1953	}
1954
1955	pi_state->owner = newowner;
1956
1957	raw_spin_lock_irq(&newowner->pi_lock);
1958	WARN_ON(!list_empty(&pi_state->list));
1959	list_add(&pi_state->list, &newowner->pi_state_list);
1960	raw_spin_unlock_irq(&newowner->pi_lock);
1961	return 0;
1962
1963	/*
1964	 * To handle the page fault we need to drop the hash bucket
1965	 * lock here. That gives the other task (either the highest priority
1966	 * waiter itself or the task which stole the rtmutex) the
1967	 * chance to try the fixup of the pi_state. So once we are
1968	 * back from handling the fault we need to check the pi_state
1969	 * after reacquiring the hash bucket lock and before trying to
1970	 * do another fixup. When the fixup has been done already we
1971	 * simply return.
1972	 */
1973handle_fault:
1974	spin_unlock(q->lock_ptr);
1975
1976	ret = fault_in_user_writeable(uaddr);
1977
1978	spin_lock(q->lock_ptr);
1979
1980	/*
1981	 * Check if someone else fixed it for us:
1982	 */
1983	if (pi_state->owner != oldowner)
1984		return 0;
1985
1986	if (ret)
1987		return ret;
1988
1989	goto retry;
1990}
1991
1992static long futex_wait_restart(struct restart_block *restart);
1993
1994/**
1995 * fixup_owner() - Post lock pi_state and corner case management
1996 * @uaddr:	user address of the futex
1997 * @q:		futex_q (contains pi_state and access to the rt_mutex)
1998 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
1999 *
2000 * After attempting to lock an rt_mutex, this function is called to cleanup
2001 * the pi_state owner as well as handle race conditions that may allow us to
2002 * acquire the lock. Must be called with the hb lock held.
2003 *
2004 * Return:
2005 *  1 - success, lock taken;
2006 *  0 - success, lock not taken;
2007 * <0 - on error (-EFAULT)
2008 */
2009static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2010{
2011	struct task_struct *owner;
2012	int ret = 0;
2013
2014	if (locked) {
2015		/*
2016		 * Got the lock. We might not be the anticipated owner if we
2017		 * did a lock-steal - fix up the PI-state in that case:
2018		 */
2019		if (q->pi_state->owner != current)
2020			ret = fixup_pi_state_owner(uaddr, q, current);
2021		goto out;
2022	}
2023
2024	/*
2025	 * Catch the rare case, where the lock was released when we were on the
2026	 * way back before we locked the hash bucket.
2027	 */
2028	if (q->pi_state->owner == current) {
2029		/*
2030		 * Try to get the rt_mutex now. This might fail as some other
2031		 * task acquired the rt_mutex after we removed ourself from the
2032		 * rt_mutex waiters list.
2033		 */
2034		if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2035			locked = 1;
2036			goto out;
2037		}
2038
2039		/*
2040		 * pi_state is incorrect, some other task did a lock steal and
2041		 * we returned due to timeout or signal without taking the
2042		 * rt_mutex. Too late.
2043		 */
2044		raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
2045		owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2046		if (!owner)
2047			owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2048		raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
2049		ret = fixup_pi_state_owner(uaddr, q, owner);
2050		goto out;
2051	}
2052
2053	/*
2054	 * Paranoia check. If we did not take the lock, then we should not be
2055	 * the owner of the rt_mutex.
2056	 */
2057	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2058		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2059				"pi-state %p\n", ret,
2060				q->pi_state->pi_mutex.owner,
2061				q->pi_state->owner);
2062
2063out:
2064	return ret ? ret : locked;
2065}
2066
2067/**
2068 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2069 * @hb:		the futex hash bucket, must be locked by the caller
2070 * @q:		the futex_q to queue up on
2071 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
2072 */
2073static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2074				struct hrtimer_sleeper *timeout)
2075{
2076	/*
2077	 * The task state is guaranteed to be set before another task can
2078	 * wake it. set_current_state() is implemented using set_mb() and
2079	 * queue_me() calls spin_unlock() upon completion, both serializing
2080	 * access to the hash list and forcing another memory barrier.
2081	 */
2082	set_current_state(TASK_INTERRUPTIBLE);
2083	queue_me(q, hb);
2084
2085	/* Arm the timer */
2086	if (timeout) {
2087		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2088		if (!hrtimer_active(&timeout->timer))
2089			timeout->task = NULL;
2090	}
2091
2092	/*
2093	 * If we have been removed from the hash list, then another task
2094	 * has tried to wake us, and we can skip the call to schedule().
2095	 */
2096	if (likely(!plist_node_empty(&q->list))) {
2097		/*
2098		 * If the timer has already expired, current will already be
2099		 * flagged for rescheduling. Only call schedule if there
2100		 * is no timeout, or if it has yet to expire.
2101		 */
2102		if (!timeout || timeout->task)
2103			freezable_schedule();
2104	}
2105	__set_current_state(TASK_RUNNING);
2106}
2107
2108/**
2109 * futex_wait_setup() - Prepare to wait on a futex
2110 * @uaddr:	the futex userspace address
2111 * @val:	the expected value
2112 * @flags:	futex flags (FLAGS_SHARED, etc.)
2113 * @q:		the associated futex_q
2114 * @hb:		storage for hash_bucket pointer to be returned to caller
2115 *
2116 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
2117 * compare it with the expected value.  Handle atomic faults internally.
2118 * Return with the hb lock held and a q.key reference on success, and unlocked
2119 * with no q.key reference on failure.
2120 *
2121 * Return:
2122 *  0 - uaddr contains val and hb has been locked;
2123 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2124 */
2125static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2126			   struct futex_q *q, struct futex_hash_bucket **hb)
2127{
2128	u32 uval;
2129	int ret;
2130
2131	/*
2132	 * Access the page AFTER the hash-bucket is locked.
2133	 * Order is important:
2134	 *
2135	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2136	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
2137	 *
2138	 * The basic logical guarantee of a futex is that it blocks ONLY
2139	 * if cond(var) is known to be true at the time of blocking, for
2140	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
2141	 * would open a race condition where we could block indefinitely with
2142	 * cond(var) false, which would violate the guarantee.
2143	 *
2144	 * On the other hand, we insert q and release the hash-bucket only
2145	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
2146	 * absorb a wakeup if *uaddr does not match the desired values
2147	 * while the syscall executes.
2148	 */
2149retry:
2150	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2151	if (unlikely(ret != 0))
2152		return ret;
2153
2154retry_private:
2155	*hb = queue_lock(q);
2156
2157	ret = get_futex_value_locked(&uval, uaddr);
2158
2159	if (ret) {
2160		queue_unlock(*hb);
2161
2162		ret = get_user(uval, uaddr);
2163		if (ret)
2164			goto out;
2165
2166		if (!(flags & FLAGS_SHARED))
2167			goto retry_private;
2168
2169		put_futex_key(&q->key);
2170		goto retry;
2171	}
2172
2173	if (uval != val) {
2174		queue_unlock(*hb);
2175		ret = -EWOULDBLOCK;
2176	}
2177
2178out:
2179	if (ret)
2180		put_futex_key(&q->key);
2181	return ret;
2182}
2183
2184static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2185		      ktime_t *abs_time, u32 bitset)
2186{
2187	struct hrtimer_sleeper timeout, *to = NULL;
2188	struct restart_block *restart;
2189	struct futex_hash_bucket *hb;
2190	struct futex_q q = futex_q_init;
2191	int ret;
2192
2193	if (!bitset)
2194		return -EINVAL;
2195	q.bitset = bitset;
2196
2197	if (abs_time) {
2198		to = &timeout;
2199
2200		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2201				      CLOCK_REALTIME : CLOCK_MONOTONIC,
2202				      HRTIMER_MODE_ABS);
2203		hrtimer_init_sleeper(to, current);
2204		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2205					     current->timer_slack_ns);
2206	}
2207
2208retry:
2209	/*
2210	 * Prepare to wait on uaddr. On success, holds hb lock and increments
2211	 * q.key refs.
2212	 */
2213	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2214	if (ret)
2215		goto out;
2216
2217	/* queue_me and wait for wakeup, timeout, or a signal. */
2218	futex_wait_queue_me(hb, &q, to);
2219
2220	/* If we were woken (and unqueued), we succeeded, whatever. */
2221	ret = 0;
2222	/* unqueue_me() drops q.key ref */
2223	if (!unqueue_me(&q))
2224		goto out;
2225	ret = -ETIMEDOUT;
2226	if (to && !to->task)
2227		goto out;
2228
2229	/*
2230	 * We expect signal_pending(current), but we might be the
2231	 * victim of a spurious wakeup as well.
2232	 */
2233	if (!signal_pending(current))
2234		goto retry;
2235
2236	ret = -ERESTARTSYS;
2237	if (!abs_time)
2238		goto out;
2239
2240	restart = &current_thread_info()->restart_block;
2241	restart->fn = futex_wait_restart;
2242	restart->futex.uaddr = uaddr;
2243	restart->futex.val = val;
2244	restart->futex.time = abs_time->tv64;
2245	restart->futex.bitset = bitset;
2246	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2247
2248	ret = -ERESTART_RESTARTBLOCK;
2249
2250out:
2251	if (to) {
2252		hrtimer_cancel(&to->timer);
2253		destroy_hrtimer_on_stack(&to->timer);
2254	}
2255	return ret;
2256}
2257
2258
2259static long futex_wait_restart(struct restart_block *restart)
2260{
2261	u32 __user *uaddr = restart->futex.uaddr;
2262	ktime_t t, *tp = NULL;
2263
2264	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2265		t.tv64 = restart->futex.time;
2266		tp = &t;
2267	}
2268	restart->fn = do_no_restart_syscall;
2269
2270	return (long)futex_wait(uaddr, restart->futex.flags,
2271				restart->futex.val, tp, restart->futex.bitset);
2272}
2273
2274
2275/*
2276 * Userspace tried a 0 -> TID atomic transition of the futex value
2277 * and failed. The kernel side here does the whole locking operation:
2278 * if there are waiters then it will block, it does PI, etc. (Due to
2279 * races the kernel might see a 0 value of the futex too.)
 
 
 
2280 */
2281static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
2282			 ktime_t *time, int trylock)
2283{
2284	struct hrtimer_sleeper timeout, *to = NULL;
2285	struct futex_hash_bucket *hb;
2286	struct futex_q q = futex_q_init;
2287	int res, ret;
2288
2289	if (refill_pi_state_cache())
2290		return -ENOMEM;
2291
2292	if (time) {
2293		to = &timeout;
2294		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2295				      HRTIMER_MODE_ABS);
2296		hrtimer_init_sleeper(to, current);
2297		hrtimer_set_expires(&to->timer, *time);
2298	}
2299
2300retry:
2301	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2302	if (unlikely(ret != 0))
2303		goto out;
2304
2305retry_private:
2306	hb = queue_lock(&q);
2307
2308	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2309	if (unlikely(ret)) {
 
 
 
 
2310		switch (ret) {
2311		case 1:
2312			/* We got the lock. */
2313			ret = 0;
2314			goto out_unlock_put_key;
2315		case -EFAULT:
2316			goto uaddr_faulted;
2317		case -EAGAIN:
2318			/*
2319			 * Task is exiting and we just wait for the
2320			 * exit to complete.
 
 
2321			 */
2322			queue_unlock(hb);
2323			put_futex_key(&q.key);
2324			cond_resched();
2325			goto retry;
2326		default:
2327			goto out_unlock_put_key;
2328		}
2329	}
2330
2331	/*
2332	 * Only actually queue now that the atomic ops are done:
2333	 */
2334	queue_me(&q, hb);
2335
2336	WARN_ON(!q.pi_state);
2337	/*
2338	 * Block on the PI mutex:
2339	 */
2340	if (!trylock)
2341		ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
2342	else {
2343		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2344		/* Fixup the trylock return value: */
2345		ret = ret ? 0 : -EWOULDBLOCK;
2346	}
2347
2348	spin_lock(q.lock_ptr);
2349	/*
2350	 * Fixup the pi_state owner and possibly acquire the lock if we
2351	 * haven't already.
2352	 */
2353	res = fixup_owner(uaddr, &q, !ret);
2354	/*
2355	 * If fixup_owner() returned an error, proprogate that.  If it acquired
2356	 * the lock, clear our -ETIMEDOUT or -EINTR.
2357	 */
2358	if (res)
2359		ret = (res < 0) ? res : 0;
2360
2361	/*
2362	 * If fixup_owner() faulted and was unable to handle the fault, unlock
2363	 * it and return the fault to userspace.
2364	 */
2365	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2366		rt_mutex_unlock(&q.pi_state->pi_mutex);
2367
2368	/* Unqueue and drop the lock */
2369	unqueue_me_pi(&q);
2370
2371	goto out_put_key;
2372
2373out_unlock_put_key:
2374	queue_unlock(hb);
2375
2376out_put_key:
2377	put_futex_key(&q.key);
2378out:
2379	if (to)
2380		destroy_hrtimer_on_stack(&to->timer);
2381	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2382
2383uaddr_faulted:
2384	queue_unlock(hb);
2385
2386	ret = fault_in_user_writeable(uaddr);
2387	if (ret)
2388		goto out_put_key;
2389
2390	if (!(flags & FLAGS_SHARED))
2391		goto retry_private;
2392
2393	put_futex_key(&q.key);
2394	goto retry;
2395}
2396
2397/*
2398 * Userspace attempted a TID -> 0 atomic transition, and failed.
2399 * This is the in-kernel slowpath: we look up the PI state (if any),
2400 * and do the rt-mutex unlock.
2401 */
2402static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2403{
2404	struct futex_hash_bucket *hb;
2405	struct futex_q *this, *next;
2406	union futex_key key = FUTEX_KEY_INIT;
2407	u32 uval, vpid = task_pid_vnr(current);
 
2408	int ret;
2409
2410retry:
2411	if (get_user(uval, uaddr))
2412		return -EFAULT;
2413	/*
2414	 * We release only a lock we actually own:
2415	 */
2416	if ((uval & FUTEX_TID_MASK) != vpid)
2417		return -EPERM;
2418
2419	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2420	if (unlikely(ret != 0))
2421		goto out;
2422
2423	hb = hash_futex(&key);
2424	spin_lock(&hb->lock);
2425
2426	/*
2427	 * To avoid races, try to do the TID -> 0 atomic transition
2428	 * again. If it succeeds then we can return without waking
2429	 * anyone else up. We only try this if neither the waiters nor
2430	 * the owner died bit are set.
2431	 */
2432	if (!(uval & ~FUTEX_TID_MASK) &&
2433	    cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
2434		goto pi_faulted;
2435	/*
2436	 * Rare case: we managed to release the lock atomically,
2437	 * no need to wake anyone else up:
2438	 */
2439	if (unlikely(uval == vpid))
2440		goto out_unlock;
2441
2442	/*
2443	 * Ok, other tasks may need to be woken up - check waiters
2444	 * and do the wakeup if necessary:
2445	 */
2446	plist_for_each_entry_safe(this, next, &hb->chain, list) {
2447		if (!match_futex (&this->key, &key))
2448			continue;
2449		ret = wake_futex_pi(uaddr, uval, this);
2450		/*
2451		 * The atomic access to the futex value
2452		 * generated a pagefault, so retry the
2453		 * user-access and the wakeup:
2454		 */
2455		if (ret == -EFAULT)
2456			goto pi_faulted;
 
 
 
 
 
 
 
 
 
 
 
 
 
2457		goto out_unlock;
2458	}
 
2459	/*
2460	 * No waiters - kernel unlocks the futex:
 
 
 
 
2461	 */
2462	ret = unlock_futex_pi(uaddr, uval);
2463	if (ret == -EFAULT)
2464		goto pi_faulted;
2465
 
 
 
 
 
2466out_unlock:
2467	spin_unlock(&hb->lock);
 
2468	put_futex_key(&key);
2469
2470out:
2471	return ret;
2472
2473pi_faulted:
2474	spin_unlock(&hb->lock);
2475	put_futex_key(&key);
2476
2477	ret = fault_in_user_writeable(uaddr);
2478	if (!ret)
2479		goto retry;
2480
2481	return ret;
2482}
2483
2484/**
2485 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2486 * @hb:		the hash_bucket futex_q was original enqueued on
2487 * @q:		the futex_q woken while waiting to be requeued
2488 * @key2:	the futex_key of the requeue target futex
2489 * @timeout:	the timeout associated with the wait (NULL if none)
2490 *
2491 * Detect if the task was woken on the initial futex as opposed to the requeue
2492 * target futex.  If so, determine if it was a timeout or a signal that caused
2493 * the wakeup and return the appropriate error code to the caller.  Must be
2494 * called with the hb lock held.
2495 *
2496 * Return:
2497 *  0 = no early wakeup detected;
2498 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2499 */
2500static inline
2501int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2502				   struct futex_q *q, union futex_key *key2,
2503				   struct hrtimer_sleeper *timeout)
2504{
2505	int ret = 0;
2506
2507	/*
2508	 * With the hb lock held, we avoid races while we process the wakeup.
2509	 * We only need to hold hb (and not hb2) to ensure atomicity as the
2510	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2511	 * It can't be requeued from uaddr2 to something else since we don't
2512	 * support a PI aware source futex for requeue.
2513	 */
2514	if (!match_futex(&q->key, key2)) {
2515		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2516		/*
2517		 * We were woken prior to requeue by a timeout or a signal.
2518		 * Unqueue the futex_q and determine which it was.
2519		 */
2520		plist_del(&q->list, &hb->chain);
2521		hb_waiters_dec(hb);
2522
2523		/* Handle spurious wakeups gracefully */
2524		ret = -EWOULDBLOCK;
2525		if (timeout && !timeout->task)
2526			ret = -ETIMEDOUT;
2527		else if (signal_pending(current))
2528			ret = -ERESTARTNOINTR;
2529	}
2530	return ret;
2531}
2532
2533/**
2534 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2535 * @uaddr:	the futex we initially wait on (non-pi)
2536 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2537 * 		the same type, no requeueing from private to shared, etc.
2538 * @val:	the expected value of uaddr
2539 * @abs_time:	absolute timeout
2540 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
2541 * @uaddr2:	the pi futex we will take prior to returning to user-space
2542 *
2543 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2544 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
2545 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2546 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
2547 * without one, the pi logic would not know which task to boost/deboost, if
2548 * there was a need to.
2549 *
2550 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2551 * via the following--
2552 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2553 * 2) wakeup on uaddr2 after a requeue
2554 * 3) signal
2555 * 4) timeout
2556 *
2557 * If 3, cleanup and return -ERESTARTNOINTR.
2558 *
2559 * If 2, we may then block on trying to take the rt_mutex and return via:
2560 * 5) successful lock
2561 * 6) signal
2562 * 7) timeout
2563 * 8) other lock acquisition failure
2564 *
2565 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2566 *
2567 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2568 *
2569 * Return:
2570 *  0 - On success;
2571 * <0 - On error
2572 */
2573static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2574				 u32 val, ktime_t *abs_time, u32 bitset,
2575				 u32 __user *uaddr2)
2576{
2577	struct hrtimer_sleeper timeout, *to = NULL;
2578	struct rt_mutex_waiter rt_waiter;
2579	struct rt_mutex *pi_mutex = NULL;
2580	struct futex_hash_bucket *hb;
2581	union futex_key key2 = FUTEX_KEY_INIT;
2582	struct futex_q q = futex_q_init;
2583	int res, ret;
2584
2585	if (uaddr == uaddr2)
2586		return -EINVAL;
2587
2588	if (!bitset)
2589		return -EINVAL;
2590
2591	if (abs_time) {
2592		to = &timeout;
2593		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2594				      CLOCK_REALTIME : CLOCK_MONOTONIC,
2595				      HRTIMER_MODE_ABS);
2596		hrtimer_init_sleeper(to, current);
2597		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2598					     current->timer_slack_ns);
2599	}
2600
2601	/*
2602	 * The waiter is allocated on our stack, manipulated by the requeue
2603	 * code while we sleep on uaddr.
2604	 */
2605	debug_rt_mutex_init_waiter(&rt_waiter);
2606	RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2607	RB_CLEAR_NODE(&rt_waiter.tree_entry);
2608	rt_waiter.task = NULL;
2609
2610	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2611	if (unlikely(ret != 0))
2612		goto out;
2613
2614	q.bitset = bitset;
2615	q.rt_waiter = &rt_waiter;
2616	q.requeue_pi_key = &key2;
2617
2618	/*
2619	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2620	 * count.
2621	 */
2622	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2623	if (ret)
2624		goto out_key2;
2625
2626	/*
2627	 * The check above which compares uaddrs is not sufficient for
2628	 * shared futexes. We need to compare the keys:
2629	 */
2630	if (match_futex(&q.key, &key2)) {
 
2631		ret = -EINVAL;
2632		goto out_put_keys;
2633	}
2634
2635	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
2636	futex_wait_queue_me(hb, &q, to);
2637
2638	spin_lock(&hb->lock);
2639	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2640	spin_unlock(&hb->lock);
2641	if (ret)
2642		goto out_put_keys;
2643
2644	/*
2645	 * In order for us to be here, we know our q.key == key2, and since
2646	 * we took the hb->lock above, we also know that futex_requeue() has
2647	 * completed and we no longer have to concern ourselves with a wakeup
2648	 * race with the atomic proxy lock acquisition by the requeue code. The
2649	 * futex_requeue dropped our key1 reference and incremented our key2
2650	 * reference count.
2651	 */
2652
2653	/* Check if the requeue code acquired the second futex for us. */
2654	if (!q.rt_waiter) {
2655		/*
2656		 * Got the lock. We might not be the anticipated owner if we
2657		 * did a lock-steal - fix up the PI-state in that case.
2658		 */
2659		if (q.pi_state && (q.pi_state->owner != current)) {
2660			spin_lock(q.lock_ptr);
2661			ret = fixup_pi_state_owner(uaddr2, &q, current);
 
 
 
 
 
2662			spin_unlock(q.lock_ptr);
2663		}
2664	} else {
2665		/*
2666		 * We have been woken up by futex_unlock_pi(), a timeout, or a
2667		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2668		 * the pi_state.
2669		 */
2670		WARN_ON(!q.pi_state);
2671		pi_mutex = &q.pi_state->pi_mutex;
2672		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2673		debug_rt_mutex_free_waiter(&rt_waiter);
2674
2675		spin_lock(q.lock_ptr);
2676		/*
2677		 * Fixup the pi_state owner and possibly acquire the lock if we
2678		 * haven't already.
2679		 */
2680		res = fixup_owner(uaddr2, &q, !ret);
2681		/*
2682		 * If fixup_owner() returned an error, proprogate that.  If it
2683		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2684		 */
2685		if (res)
2686			ret = (res < 0) ? res : 0;
2687
2688		/* Unqueue and drop the lock. */
2689		unqueue_me_pi(&q);
2690	}
2691
2692	/*
2693	 * If fixup_pi_state_owner() faulted and was unable to handle the
2694	 * fault, unlock the rt_mutex and return the fault to userspace.
2695	 */
2696	if (ret == -EFAULT) {
2697		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2698			rt_mutex_unlock(pi_mutex);
2699	} else if (ret == -EINTR) {
2700		/*
2701		 * We've already been requeued, but cannot restart by calling
2702		 * futex_lock_pi() directly. We could restart this syscall, but
2703		 * it would detect that the user space "val" changed and return
2704		 * -EWOULDBLOCK.  Save the overhead of the restart and return
2705		 * -EWOULDBLOCK directly.
2706		 */
2707		ret = -EWOULDBLOCK;
2708	}
2709
2710out_put_keys:
2711	put_futex_key(&q.key);
2712out_key2:
2713	put_futex_key(&key2);
2714
2715out:
2716	if (to) {
2717		hrtimer_cancel(&to->timer);
2718		destroy_hrtimer_on_stack(&to->timer);
2719	}
2720	return ret;
2721}
2722
2723/*
2724 * Support for robust futexes: the kernel cleans up held futexes at
2725 * thread exit time.
2726 *
2727 * Implementation: user-space maintains a per-thread list of locks it
2728 * is holding. Upon do_exit(), the kernel carefully walks this list,
2729 * and marks all locks that are owned by this thread with the
2730 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2731 * always manipulated with the lock held, so the list is private and
2732 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2733 * field, to allow the kernel to clean up if the thread dies after
2734 * acquiring the lock, but just before it could have added itself to
2735 * the list. There can only be one such pending lock.
2736 */
2737
2738/**
2739 * sys_set_robust_list() - Set the robust-futex list head of a task
2740 * @head:	pointer to the list-head
2741 * @len:	length of the list-head, as userspace expects
2742 */
2743SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2744		size_t, len)
2745{
2746	if (!futex_cmpxchg_enabled)
2747		return -ENOSYS;
2748	/*
2749	 * The kernel knows only one size for now:
2750	 */
2751	if (unlikely(len != sizeof(*head)))
2752		return -EINVAL;
2753
2754	current->robust_list = head;
2755
2756	return 0;
2757}
2758
2759/**
2760 * sys_get_robust_list() - Get the robust-futex list head of a task
2761 * @pid:	pid of the process [zero for current task]
2762 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
2763 * @len_ptr:	pointer to a length field, the kernel fills in the header size
2764 */
2765SYSCALL_DEFINE3(get_robust_list, int, pid,
2766		struct robust_list_head __user * __user *, head_ptr,
2767		size_t __user *, len_ptr)
2768{
2769	struct robust_list_head __user *head;
2770	unsigned long ret;
2771	struct task_struct *p;
2772
2773	if (!futex_cmpxchg_enabled)
2774		return -ENOSYS;
2775
2776	rcu_read_lock();
2777
2778	ret = -ESRCH;
2779	if (!pid)
2780		p = current;
2781	else {
2782		p = find_task_by_vpid(pid);
2783		if (!p)
2784			goto err_unlock;
2785	}
2786
2787	ret = -EPERM;
2788	if (!ptrace_may_access(p, PTRACE_MODE_READ))
2789		goto err_unlock;
2790
2791	head = p->robust_list;
2792	rcu_read_unlock();
2793
2794	if (put_user(sizeof(*head), len_ptr))
2795		return -EFAULT;
2796	return put_user(head, head_ptr);
2797
2798err_unlock:
2799	rcu_read_unlock();
2800
2801	return ret;
2802}
2803
2804/*
2805 * Process a futex-list entry, check whether it's owned by the
2806 * dying task, and do notification if so:
2807 */
2808int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2809{
2810	u32 uval, uninitialized_var(nval), mval;
2811
2812retry:
2813	if (get_user(uval, uaddr))
2814		return -1;
2815
2816	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2817		/*
2818		 * Ok, this dying thread is truly holding a futex
2819		 * of interest. Set the OWNER_DIED bit atomically
2820		 * via cmpxchg, and if the value had FUTEX_WAITERS
2821		 * set, wake up a waiter (if any). (We have to do a
2822		 * futex_wake() even if OWNER_DIED is already set -
2823		 * to handle the rare but possible case of recursive
2824		 * thread-death.) The rest of the cleanup is done in
2825		 * userspace.
2826		 */
2827		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2828		/*
2829		 * We are not holding a lock here, but we want to have
2830		 * the pagefault_disable/enable() protection because
2831		 * we want to handle the fault gracefully. If the
2832		 * access fails we try to fault in the futex with R/W
2833		 * verification via get_user_pages. get_user() above
2834		 * does not guarantee R/W access. If that fails we
2835		 * give up and leave the futex locked.
2836		 */
2837		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
2838			if (fault_in_user_writeable(uaddr))
2839				return -1;
2840			goto retry;
2841		}
2842		if (nval != uval)
2843			goto retry;
2844
2845		/*
2846		 * Wake robust non-PI futexes here. The wakeup of
2847		 * PI futexes happens in exit_pi_state():
2848		 */
2849		if (!pi && (uval & FUTEX_WAITERS))
2850			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2851	}
2852	return 0;
2853}
2854
2855/*
2856 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2857 */
2858static inline int fetch_robust_entry(struct robust_list __user **entry,
2859				     struct robust_list __user * __user *head,
2860				     unsigned int *pi)
2861{
2862	unsigned long uentry;
2863
2864	if (get_user(uentry, (unsigned long __user *)head))
2865		return -EFAULT;
2866
2867	*entry = (void __user *)(uentry & ~1UL);
2868	*pi = uentry & 1;
2869
2870	return 0;
2871}
2872
2873/*
2874 * Walk curr->robust_list (very carefully, it's a userspace list!)
2875 * and mark any locks found there dead, and notify any waiters.
2876 *
2877 * We silently return on any sign of list-walking problem.
2878 */
2879void exit_robust_list(struct task_struct *curr)
2880{
2881	struct robust_list_head __user *head = curr->robust_list;
2882	struct robust_list __user *entry, *next_entry, *pending;
2883	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2884	unsigned int uninitialized_var(next_pi);
2885	unsigned long futex_offset;
2886	int rc;
2887
2888	if (!futex_cmpxchg_enabled)
2889		return;
2890
2891	/*
2892	 * Fetch the list head (which was registered earlier, via
2893	 * sys_set_robust_list()):
2894	 */
2895	if (fetch_robust_entry(&entry, &head->list.next, &pi))
2896		return;
2897	/*
2898	 * Fetch the relative futex offset:
2899	 */
2900	if (get_user(futex_offset, &head->futex_offset))
2901		return;
2902	/*
2903	 * Fetch any possibly pending lock-add first, and handle it
2904	 * if it exists:
2905	 */
2906	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2907		return;
2908
2909	next_entry = NULL;	/* avoid warning with gcc */
2910	while (entry != &head->list) {
2911		/*
2912		 * Fetch the next entry in the list before calling
2913		 * handle_futex_death:
2914		 */
2915		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2916		/*
2917		 * A pending lock might already be on the list, so
2918		 * don't process it twice:
2919		 */
2920		if (entry != pending)
2921			if (handle_futex_death((void __user *)entry + futex_offset,
2922						curr, pi))
2923				return;
2924		if (rc)
2925			return;
2926		entry = next_entry;
2927		pi = next_pi;
2928		/*
2929		 * Avoid excessively long or circular lists:
2930		 */
2931		if (!--limit)
2932			break;
2933
2934		cond_resched();
2935	}
2936
2937	if (pending)
2938		handle_futex_death((void __user *)pending + futex_offset,
2939				   curr, pip);
2940}
2941
2942long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2943		u32 __user *uaddr2, u32 val2, u32 val3)
2944{
2945	int cmd = op & FUTEX_CMD_MASK;
2946	unsigned int flags = 0;
2947
2948	if (!(op & FUTEX_PRIVATE_FLAG))
2949		flags |= FLAGS_SHARED;
2950
2951	if (op & FUTEX_CLOCK_REALTIME) {
2952		flags |= FLAGS_CLOCKRT;
2953		if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
 
2954			return -ENOSYS;
2955	}
2956
2957	switch (cmd) {
2958	case FUTEX_LOCK_PI:
2959	case FUTEX_UNLOCK_PI:
2960	case FUTEX_TRYLOCK_PI:
2961	case FUTEX_WAIT_REQUEUE_PI:
2962	case FUTEX_CMP_REQUEUE_PI:
2963		if (!futex_cmpxchg_enabled)
2964			return -ENOSYS;
2965	}
2966
2967	switch (cmd) {
2968	case FUTEX_WAIT:
2969		val3 = FUTEX_BITSET_MATCH_ANY;
2970	case FUTEX_WAIT_BITSET:
2971		return futex_wait(uaddr, flags, val, timeout, val3);
2972	case FUTEX_WAKE:
2973		val3 = FUTEX_BITSET_MATCH_ANY;
2974	case FUTEX_WAKE_BITSET:
2975		return futex_wake(uaddr, flags, val, val3);
2976	case FUTEX_REQUEUE:
2977		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
2978	case FUTEX_CMP_REQUEUE:
2979		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2980	case FUTEX_WAKE_OP:
2981		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2982	case FUTEX_LOCK_PI:
2983		return futex_lock_pi(uaddr, flags, val, timeout, 0);
2984	case FUTEX_UNLOCK_PI:
2985		return futex_unlock_pi(uaddr, flags);
2986	case FUTEX_TRYLOCK_PI:
2987		return futex_lock_pi(uaddr, flags, 0, timeout, 1);
2988	case FUTEX_WAIT_REQUEUE_PI:
2989		val3 = FUTEX_BITSET_MATCH_ANY;
2990		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2991					     uaddr2);
2992	case FUTEX_CMP_REQUEUE_PI:
2993		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
2994	}
2995	return -ENOSYS;
2996}
2997
2998
2999SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3000		struct timespec __user *, utime, u32 __user *, uaddr2,
3001		u32, val3)
3002{
3003	struct timespec ts;
3004	ktime_t t, *tp = NULL;
3005	u32 val2 = 0;
3006	int cmd = op & FUTEX_CMD_MASK;
3007
3008	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3009		      cmd == FUTEX_WAIT_BITSET ||
3010		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
 
 
3011		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
3012			return -EFAULT;
3013		if (!timespec_valid(&ts))
3014			return -EINVAL;
3015
3016		t = timespec_to_ktime(ts);
3017		if (cmd == FUTEX_WAIT)
3018			t = ktime_add_safe(ktime_get(), t);
3019		tp = &t;
3020	}
3021	/*
3022	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3023	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3024	 */
3025	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3026	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3027		val2 = (u32) (unsigned long) utime;
3028
3029	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3030}
3031
3032static void __init futex_detect_cmpxchg(void)
3033{
3034#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3035	u32 curval;
3036
3037	/*
3038	 * This will fail and we want it. Some arch implementations do
3039	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3040	 * functionality. We want to know that before we call in any
3041	 * of the complex code paths. Also we want to prevent
3042	 * registration of robust lists in that case. NULL is
3043	 * guaranteed to fault and we get -EFAULT on functional
3044	 * implementation, the non-functional ones will return
3045	 * -ENOSYS.
3046	 */
3047	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3048		futex_cmpxchg_enabled = 1;
3049#endif
3050}
3051
3052static int __init futex_init(void)
3053{
3054	unsigned int futex_shift;
3055	unsigned long i;
3056
3057#if CONFIG_BASE_SMALL
3058	futex_hashsize = 16;
3059#else
3060	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3061#endif
3062
3063	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3064					       futex_hashsize, 0,
3065					       futex_hashsize < 256 ? HASH_SMALL : 0,
3066					       &futex_shift, NULL,
3067					       futex_hashsize, futex_hashsize);
3068	futex_hashsize = 1UL << futex_shift;
3069
3070	futex_detect_cmpxchg();
3071
3072	for (i = 0; i < futex_hashsize; i++) {
3073		atomic_set(&futex_queues[i].waiters, 0);
3074		plist_head_init(&futex_queues[i].chain);
3075		spin_lock_init(&futex_queues[i].lock);
3076	}
3077
3078	return 0;
3079}
3080__initcall(futex_init);
v4.6
   1/*
   2 *  Fast Userspace Mutexes (which I call "Futexes!").
   3 *  (C) Rusty Russell, IBM 2002
   4 *
   5 *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
   6 *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
   7 *
   8 *  Removed page pinning, fix privately mapped COW pages and other cleanups
   9 *  (C) Copyright 2003, 2004 Jamie Lokier
  10 *
  11 *  Robust futex support started by Ingo Molnar
  12 *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
  13 *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
  14 *
  15 *  PI-futex support started by Ingo Molnar and Thomas Gleixner
  16 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  17 *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  18 *
  19 *  PRIVATE futexes by Eric Dumazet
  20 *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
  21 *
  22 *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
  23 *  Copyright (C) IBM Corporation, 2009
  24 *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
  25 *
  26 *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
  27 *  enough at me, Linus for the original (flawed) idea, Matthew
  28 *  Kirkwood for proof-of-concept implementation.
  29 *
  30 *  "The futexes are also cursed."
  31 *  "But they come in a choice of three flavours!"
  32 *
  33 *  This program is free software; you can redistribute it and/or modify
  34 *  it under the terms of the GNU General Public License as published by
  35 *  the Free Software Foundation; either version 2 of the License, or
  36 *  (at your option) any later version.
  37 *
  38 *  This program is distributed in the hope that it will be useful,
  39 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  40 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  41 *  GNU General Public License for more details.
  42 *
  43 *  You should have received a copy of the GNU General Public License
  44 *  along with this program; if not, write to the Free Software
  45 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  46 */
  47#include <linux/slab.h>
  48#include <linux/poll.h>
  49#include <linux/fs.h>
  50#include <linux/file.h>
  51#include <linux/jhash.h>
  52#include <linux/init.h>
  53#include <linux/futex.h>
  54#include <linux/mount.h>
  55#include <linux/pagemap.h>
  56#include <linux/syscalls.h>
  57#include <linux/signal.h>
  58#include <linux/export.h>
  59#include <linux/magic.h>
  60#include <linux/pid.h>
  61#include <linux/nsproxy.h>
  62#include <linux/ptrace.h>
  63#include <linux/sched/rt.h>
  64#include <linux/hugetlb.h>
  65#include <linux/freezer.h>
  66#include <linux/bootmem.h>
  67#include <linux/fault-inject.h>
  68
  69#include <asm/futex.h>
  70
  71#include "locking/rtmutex_common.h"
  72
  73/*
  74 * READ this before attempting to hack on futexes!
  75 *
  76 * Basic futex operation and ordering guarantees
  77 * =============================================
  78 *
  79 * The waiter reads the futex value in user space and calls
  80 * futex_wait(). This function computes the hash bucket and acquires
  81 * the hash bucket lock. After that it reads the futex user space value
  82 * again and verifies that the data has not changed. If it has not changed
  83 * it enqueues itself into the hash bucket, releases the hash bucket lock
  84 * and schedules.
  85 *
  86 * The waker side modifies the user space value of the futex and calls
  87 * futex_wake(). This function computes the hash bucket and acquires the
  88 * hash bucket lock. Then it looks for waiters on that futex in the hash
  89 * bucket and wakes them.
  90 *
  91 * In futex wake up scenarios where no tasks are blocked on a futex, taking
  92 * the hb spinlock can be avoided and simply return. In order for this
  93 * optimization to work, ordering guarantees must exist so that the waiter
  94 * being added to the list is acknowledged when the list is concurrently being
  95 * checked by the waker, avoiding scenarios like the following:
  96 *
  97 * CPU 0                               CPU 1
  98 * val = *futex;
  99 * sys_futex(WAIT, futex, val);
 100 *   futex_wait(futex, val);
 101 *   uval = *futex;
 102 *                                     *futex = newval;
 103 *                                     sys_futex(WAKE, futex);
 104 *                                       futex_wake(futex);
 105 *                                       if (queue_empty())
 106 *                                         return;
 107 *   if (uval == val)
 108 *      lock(hash_bucket(futex));
 109 *      queue();
 110 *     unlock(hash_bucket(futex));
 111 *     schedule();
 112 *
 113 * This would cause the waiter on CPU 0 to wait forever because it
 114 * missed the transition of the user space value from val to newval
 115 * and the waker did not find the waiter in the hash bucket queue.
 116 *
 117 * The correct serialization ensures that a waiter either observes
 118 * the changed user space value before blocking or is woken by a
 119 * concurrent waker:
 120 *
 121 * CPU 0                                 CPU 1
 122 * val = *futex;
 123 * sys_futex(WAIT, futex, val);
 124 *   futex_wait(futex, val);
 125 *
 126 *   waiters++; (a)
 127 *   smp_mb(); (A) <-- paired with -.
 128 *                                  |
 129 *   lock(hash_bucket(futex));      |
 130 *                                  |
 131 *   uval = *futex;                 |
 132 *                                  |        *futex = newval;
 133 *                                  |        sys_futex(WAKE, futex);
 134 *                                  |          futex_wake(futex);
 135 *                                  |
 136 *                                  `--------> smp_mb(); (B)
 137 *   if (uval == val)
 138 *     queue();
 139 *     unlock(hash_bucket(futex));
 140 *     schedule();                         if (waiters)
 141 *                                           lock(hash_bucket(futex));
 142 *   else                                    wake_waiters(futex);
 143 *     waiters--; (b)                        unlock(hash_bucket(futex));
 144 *
 145 * Where (A) orders the waiters increment and the futex value read through
 146 * atomic operations (see hb_waiters_inc) and where (B) orders the write
 147 * to futex and the waiters read -- this is done by the barriers for both
 148 * shared and private futexes in get_futex_key_refs().
 
 149 *
 150 * This yields the following case (where X:=waiters, Y:=futex):
 151 *
 152 *	X = Y = 0
 153 *
 154 *	w[X]=1		w[Y]=1
 155 *	MB		MB
 156 *	r[Y]=y		r[X]=x
 157 *
 158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
 159 * the guarantee that we cannot both miss the futex variable change and the
 160 * enqueue.
 161 *
 162 * Note that a new waiter is accounted for in (a) even when it is possible that
 163 * the wait call can return error, in which case we backtrack from it in (b).
 164 * Refer to the comment in queue_lock().
 165 *
 166 * Similarly, in order to account for waiters being requeued on another
 167 * address we always increment the waiters for the destination bucket before
 168 * acquiring the lock. It then decrements them again  after releasing it -
 169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
 170 * will do the additional required waiter count housekeeping. This is done for
 171 * double_lock_hb() and double_unlock_hb(), respectively.
 172 */
 173
 174#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
 175int __read_mostly futex_cmpxchg_enabled;
 176#endif
 177
 178/*
 179 * Futex flags used to encode options to functions and preserve them across
 180 * restarts.
 181 */
 182#define FLAGS_SHARED		0x01
 183#define FLAGS_CLOCKRT		0x02
 184#define FLAGS_HAS_TIMEOUT	0x04
 185
 186/*
 187 * Priority Inheritance state:
 188 */
 189struct futex_pi_state {
 190	/*
 191	 * list of 'owned' pi_state instances - these have to be
 192	 * cleaned up in do_exit() if the task exits prematurely:
 193	 */
 194	struct list_head list;
 195
 196	/*
 197	 * The PI object:
 198	 */
 199	struct rt_mutex pi_mutex;
 200
 201	struct task_struct *owner;
 202	atomic_t refcount;
 203
 204	union futex_key key;
 205};
 206
 207/**
 208 * struct futex_q - The hashed futex queue entry, one per waiting task
 209 * @list:		priority-sorted list of tasks waiting on this futex
 210 * @task:		the task waiting on the futex
 211 * @lock_ptr:		the hash bucket lock
 212 * @key:		the key the futex is hashed on
 213 * @pi_state:		optional priority inheritance state
 214 * @rt_waiter:		rt_waiter storage for use with requeue_pi
 215 * @requeue_pi_key:	the requeue_pi target futex key
 216 * @bitset:		bitset for the optional bitmasked wakeup
 217 *
 218 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
 219 * we can wake only the relevant ones (hashed queues may be shared).
 220 *
 221 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
 222 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
 223 * The order of wakeup is always to make the first condition true, then
 224 * the second.
 225 *
 226 * PI futexes are typically woken before they are removed from the hash list via
 227 * the rt_mutex code. See unqueue_me_pi().
 228 */
 229struct futex_q {
 230	struct plist_node list;
 231
 232	struct task_struct *task;
 233	spinlock_t *lock_ptr;
 234	union futex_key key;
 235	struct futex_pi_state *pi_state;
 236	struct rt_mutex_waiter *rt_waiter;
 237	union futex_key *requeue_pi_key;
 238	u32 bitset;
 239};
 240
 241static const struct futex_q futex_q_init = {
 242	/* list gets initialized in queue_me()*/
 243	.key = FUTEX_KEY_INIT,
 244	.bitset = FUTEX_BITSET_MATCH_ANY
 245};
 246
 247/*
 248 * Hash buckets are shared by all the futex_keys that hash to the same
 249 * location.  Each key may have multiple futex_q structures, one for each task
 250 * waiting on a futex.
 251 */
 252struct futex_hash_bucket {
 253	atomic_t waiters;
 254	spinlock_t lock;
 255	struct plist_head chain;
 256} ____cacheline_aligned_in_smp;
 257
 258/*
 259 * The base of the bucket array and its size are always used together
 260 * (after initialization only in hash_futex()), so ensure that they
 261 * reside in the same cacheline.
 262 */
 263static struct {
 264	struct futex_hash_bucket *queues;
 265	unsigned long            hashsize;
 266} __futex_data __read_mostly __aligned(2*sizeof(long));
 267#define futex_queues   (__futex_data.queues)
 268#define futex_hashsize (__futex_data.hashsize)
 269
 270
 271/*
 272 * Fault injections for futexes.
 273 */
 274#ifdef CONFIG_FAIL_FUTEX
 275
 276static struct {
 277	struct fault_attr attr;
 278
 279	bool ignore_private;
 280} fail_futex = {
 281	.attr = FAULT_ATTR_INITIALIZER,
 282	.ignore_private = false,
 283};
 284
 285static int __init setup_fail_futex(char *str)
 286{
 287	return setup_fault_attr(&fail_futex.attr, str);
 288}
 289__setup("fail_futex=", setup_fail_futex);
 290
 291static bool should_fail_futex(bool fshared)
 292{
 293	if (fail_futex.ignore_private && !fshared)
 294		return false;
 295
 296	return should_fail(&fail_futex.attr, 1);
 297}
 298
 299#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
 300
 301static int __init fail_futex_debugfs(void)
 302{
 303	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
 304	struct dentry *dir;
 305
 306	dir = fault_create_debugfs_attr("fail_futex", NULL,
 307					&fail_futex.attr);
 308	if (IS_ERR(dir))
 309		return PTR_ERR(dir);
 310
 311	if (!debugfs_create_bool("ignore-private", mode, dir,
 312				 &fail_futex.ignore_private)) {
 313		debugfs_remove_recursive(dir);
 314		return -ENOMEM;
 315	}
 316
 317	return 0;
 318}
 319
 320late_initcall(fail_futex_debugfs);
 321
 322#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
 323
 324#else
 325static inline bool should_fail_futex(bool fshared)
 326{
 327	return false;
 328}
 329#endif /* CONFIG_FAIL_FUTEX */
 330
 331static inline void futex_get_mm(union futex_key *key)
 332{
 333	atomic_inc(&key->private.mm->mm_count);
 334	/*
 335	 * Ensure futex_get_mm() implies a full barrier such that
 336	 * get_futex_key() implies a full barrier. This is relied upon
 337	 * as smp_mb(); (B), see the ordering comment above.
 338	 */
 339	smp_mb__after_atomic();
 340}
 341
 342/*
 343 * Reflects a new waiter being added to the waitqueue.
 344 */
 345static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
 346{
 347#ifdef CONFIG_SMP
 348	atomic_inc(&hb->waiters);
 349	/*
 350	 * Full barrier (A), see the ordering comment above.
 351	 */
 352	smp_mb__after_atomic();
 353#endif
 354}
 355
 356/*
 357 * Reflects a waiter being removed from the waitqueue by wakeup
 358 * paths.
 359 */
 360static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
 361{
 362#ifdef CONFIG_SMP
 363	atomic_dec(&hb->waiters);
 364#endif
 365}
 366
 367static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
 368{
 369#ifdef CONFIG_SMP
 370	return atomic_read(&hb->waiters);
 371#else
 372	return 1;
 373#endif
 374}
 375
 376/*
 377 * We hash on the keys returned from get_futex_key (see below).
 378 */
 379static struct futex_hash_bucket *hash_futex(union futex_key *key)
 380{
 381	u32 hash = jhash2((u32*)&key->both.word,
 382			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
 383			  key->both.offset);
 384	return &futex_queues[hash & (futex_hashsize - 1)];
 385}
 386
 387/*
 388 * Return 1 if two futex_keys are equal, 0 otherwise.
 389 */
 390static inline int match_futex(union futex_key *key1, union futex_key *key2)
 391{
 392	return (key1 && key2
 393		&& key1->both.word == key2->both.word
 394		&& key1->both.ptr == key2->both.ptr
 395		&& key1->both.offset == key2->both.offset);
 396}
 397
 398/*
 399 * Take a reference to the resource addressed by a key.
 400 * Can be called while holding spinlocks.
 401 *
 402 */
 403static void get_futex_key_refs(union futex_key *key)
 404{
 405	if (!key->both.ptr)
 406		return;
 407
 408	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
 409	case FUT_OFF_INODE:
 410		ihold(key->shared.inode); /* implies smp_mb(); (B) */
 411		break;
 412	case FUT_OFF_MMSHARED:
 413		futex_get_mm(key); /* implies smp_mb(); (B) */
 414		break;
 415	default:
 416		/*
 417		 * Private futexes do not hold reference on an inode or
 418		 * mm, therefore the only purpose of calling get_futex_key_refs
 419		 * is because we need the barrier for the lockless waiter check.
 420		 */
 421		smp_mb(); /* explicit smp_mb(); (B) */
 422	}
 423}
 424
 425/*
 426 * Drop a reference to the resource addressed by a key.
 427 * The hash bucket spinlock must not be held. This is
 428 * a no-op for private futexes, see comment in the get
 429 * counterpart.
 430 */
 431static void drop_futex_key_refs(union futex_key *key)
 432{
 433	if (!key->both.ptr) {
 434		/* If we're here then we tried to put a key we failed to get */
 435		WARN_ON_ONCE(1);
 436		return;
 437	}
 438
 439	switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
 440	case FUT_OFF_INODE:
 441		iput(key->shared.inode);
 442		break;
 443	case FUT_OFF_MMSHARED:
 444		mmdrop(key->private.mm);
 445		break;
 446	}
 447}
 448
 449/**
 450 * get_futex_key() - Get parameters which are the keys for a futex
 451 * @uaddr:	virtual address of the futex
 452 * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
 453 * @key:	address where result is stored.
 454 * @rw:		mapping needs to be read/write (values: VERIFY_READ,
 455 *              VERIFY_WRITE)
 456 *
 457 * Return: a negative error code or 0
 458 *
 459 * The key words are stored in *key on success.
 460 *
 461 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
 462 * offset_within_page).  For private mappings, it's (uaddr, current->mm).
 463 * We can usually work out the index without swapping in the page.
 464 *
 465 * lock_page() might sleep, the caller should not hold a spinlock.
 466 */
 467static int
 468get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 469{
 470	unsigned long address = (unsigned long)uaddr;
 471	struct mm_struct *mm = current->mm;
 472	struct page *page;
 473	struct address_space *mapping;
 474	int err, ro = 0;
 475
 476	/*
 477	 * The futex address must be "naturally" aligned.
 478	 */
 479	key->both.offset = address % PAGE_SIZE;
 480	if (unlikely((address % sizeof(u32)) != 0))
 481		return -EINVAL;
 482	address -= key->both.offset;
 483
 484	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
 485		return -EFAULT;
 486
 487	if (unlikely(should_fail_futex(fshared)))
 488		return -EFAULT;
 489
 490	/*
 491	 * PROCESS_PRIVATE futexes are fast.
 492	 * As the mm cannot disappear under us and the 'key' only needs
 493	 * virtual address, we dont even have to find the underlying vma.
 494	 * Note : We do have to check 'uaddr' is a valid user address,
 495	 *        but access_ok() should be faster than find_vma()
 496	 */
 497	if (!fshared) {
 498		key->private.mm = mm;
 499		key->private.address = address;
 500		get_futex_key_refs(key);  /* implies smp_mb(); (B) */
 501		return 0;
 502	}
 503
 504again:
 505	/* Ignore any VERIFY_READ mapping (futex common case) */
 506	if (unlikely(should_fail_futex(fshared)))
 507		return -EFAULT;
 508
 509	err = get_user_pages_fast(address, 1, 1, &page);
 510	/*
 511	 * If write access is not required (eg. FUTEX_WAIT), try
 512	 * and get read-only access.
 513	 */
 514	if (err == -EFAULT && rw == VERIFY_READ) {
 515		err = get_user_pages_fast(address, 1, 0, &page);
 516		ro = 1;
 517	}
 518	if (err < 0)
 519		return err;
 520	else
 521		err = 0;
 522
 523	/*
 524	 * The treatment of mapping from this point on is critical. The page
 525	 * lock protects many things but in this context the page lock
 526	 * stabilizes mapping, prevents inode freeing in the shared
 527	 * file-backed region case and guards against movement to swap cache.
 528	 *
 529	 * Strictly speaking the page lock is not needed in all cases being
 530	 * considered here and page lock forces unnecessarily serialization
 531	 * From this point on, mapping will be re-verified if necessary and
 532	 * page lock will be acquired only if it is unavoidable
 533	 */
 534	page = compound_head(page);
 535	mapping = READ_ONCE(page->mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 536
 537	/*
 538	 * If page->mapping is NULL, then it cannot be a PageAnon
 539	 * page; but it might be the ZERO_PAGE or in the gate area or
 540	 * in a special mapping (all cases which we are happy to fail);
 541	 * or it may have been a good file page when get_user_pages_fast
 542	 * found it, but truncated or holepunched or subjected to
 543	 * invalidate_complete_page2 before we got the page lock (also
 544	 * cases which we are happy to fail).  And we hold a reference,
 545	 * so refcount care in invalidate_complete_page's remove_mapping
 546	 * prevents drop_caches from setting mapping to NULL beneath us.
 547	 *
 548	 * The case we do have to guard against is when memory pressure made
 549	 * shmem_writepage move it from filecache to swapcache beneath us:
 550	 * an unlikely race, but we do need to retry for page->mapping.
 551	 */
 552	if (unlikely(!mapping)) {
 553		int shmem_swizzled;
 554
 555		/*
 556		 * Page lock is required to identify which special case above
 557		 * applies. If this is really a shmem page then the page lock
 558		 * will prevent unexpected transitions.
 559		 */
 560		lock_page(page);
 561		shmem_swizzled = PageSwapCache(page) || page->mapping;
 562		unlock_page(page);
 563		put_page(page);
 564
 565		if (shmem_swizzled)
 566			goto again;
 567
 568		return -EFAULT;
 569	}
 570
 571	/*
 572	 * Private mappings are handled in a simple way.
 573	 *
 574	 * If the futex key is stored on an anonymous page, then the associated
 575	 * object is the mm which is implicitly pinned by the calling process.
 576	 *
 577	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
 578	 * it's a read-only handle, it's expected that futexes attach to
 579	 * the object not the particular process.
 580	 */
 581	if (PageAnon(page)) {
 582		/*
 583		 * A RO anonymous page will never change and thus doesn't make
 584		 * sense for futex operations.
 585		 */
 586		if (unlikely(should_fail_futex(fshared)) || ro) {
 587			err = -EFAULT;
 588			goto out;
 589		}
 590
 591		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
 592		key->private.mm = mm;
 593		key->private.address = address;
 594
 595		get_futex_key_refs(key); /* implies smp_mb(); (B) */
 596
 597	} else {
 598		struct inode *inode;
 599
 600		/*
 601		 * The associated futex object in this case is the inode and
 602		 * the page->mapping must be traversed. Ordinarily this should
 603		 * be stabilised under page lock but it's not strictly
 604		 * necessary in this case as we just want to pin the inode, not
 605		 * update the radix tree or anything like that.
 606		 *
 607		 * The RCU read lock is taken as the inode is finally freed
 608		 * under RCU. If the mapping still matches expectations then the
 609		 * mapping->host can be safely accessed as being a valid inode.
 610		 */
 611		rcu_read_lock();
 612
 613		if (READ_ONCE(page->mapping) != mapping) {
 614			rcu_read_unlock();
 615			put_page(page);
 616
 617			goto again;
 618		}
 619
 620		inode = READ_ONCE(mapping->host);
 621		if (!inode) {
 622			rcu_read_unlock();
 623			put_page(page);
 624
 625			goto again;
 626		}
 627
 628		/*
 629		 * Take a reference unless it is about to be freed. Previously
 630		 * this reference was taken by ihold under the page lock
 631		 * pinning the inode in place so i_lock was unnecessary. The
 632		 * only way for this check to fail is if the inode was
 633		 * truncated in parallel so warn for now if this happens.
 634		 *
 635		 * We are not calling into get_futex_key_refs() in file-backed
 636		 * cases, therefore a successful atomic_inc return below will
 637		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
 638		 */
 639		if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
 640			rcu_read_unlock();
 641			put_page(page);
 642
 643			goto again;
 644		}
 645
 646		/* Should be impossible but lets be paranoid for now */
 647		if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
 648			err = -EFAULT;
 649			rcu_read_unlock();
 650			iput(inode);
 651
 652			goto out;
 653		}
 654
 655		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
 656		key->shared.inode = inode;
 657		key->shared.pgoff = basepage_index(page);
 658		rcu_read_unlock();
 659	}
 660
 
 
 661out:
 662	put_page(page);
 
 663	return err;
 664}
 665
 666static inline void put_futex_key(union futex_key *key)
 667{
 668	drop_futex_key_refs(key);
 669}
 670
 671/**
 672 * fault_in_user_writeable() - Fault in user address and verify RW access
 673 * @uaddr:	pointer to faulting user space address
 674 *
 675 * Slow path to fixup the fault we just took in the atomic write
 676 * access to @uaddr.
 677 *
 678 * We have no generic implementation of a non-destructive write to the
 679 * user address. We know that we faulted in the atomic pagefault
 680 * disabled section so we can as well avoid the #PF overhead by
 681 * calling get_user_pages() right away.
 682 */
 683static int fault_in_user_writeable(u32 __user *uaddr)
 684{
 685	struct mm_struct *mm = current->mm;
 686	int ret;
 687
 688	down_read(&mm->mmap_sem);
 689	ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
 690			       FAULT_FLAG_WRITE, NULL);
 691	up_read(&mm->mmap_sem);
 692
 693	return ret < 0 ? ret : 0;
 694}
 695
 696/**
 697 * futex_top_waiter() - Return the highest priority waiter on a futex
 698 * @hb:		the hash bucket the futex_q's reside in
 699 * @key:	the futex key (to distinguish it from other futex futex_q's)
 700 *
 701 * Must be called with the hb lock held.
 702 */
 703static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
 704					union futex_key *key)
 705{
 706	struct futex_q *this;
 707
 708	plist_for_each_entry(this, &hb->chain, list) {
 709		if (match_futex(&this->key, key))
 710			return this;
 711	}
 712	return NULL;
 713}
 714
 715static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
 716				      u32 uval, u32 newval)
 717{
 718	int ret;
 719
 720	pagefault_disable();
 721	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
 722	pagefault_enable();
 723
 724	return ret;
 725}
 726
 727static int get_futex_value_locked(u32 *dest, u32 __user *from)
 728{
 729	int ret;
 730
 731	pagefault_disable();
 732	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
 733	pagefault_enable();
 734
 735	return ret ? -EFAULT : 0;
 736}
 737
 738
 739/*
 740 * PI code:
 741 */
 742static int refill_pi_state_cache(void)
 743{
 744	struct futex_pi_state *pi_state;
 745
 746	if (likely(current->pi_state_cache))
 747		return 0;
 748
 749	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
 750
 751	if (!pi_state)
 752		return -ENOMEM;
 753
 754	INIT_LIST_HEAD(&pi_state->list);
 755	/* pi_mutex gets initialized later */
 756	pi_state->owner = NULL;
 757	atomic_set(&pi_state->refcount, 1);
 758	pi_state->key = FUTEX_KEY_INIT;
 759
 760	current->pi_state_cache = pi_state;
 761
 762	return 0;
 763}
 764
 765static struct futex_pi_state * alloc_pi_state(void)
 766{
 767	struct futex_pi_state *pi_state = current->pi_state_cache;
 768
 769	WARN_ON(!pi_state);
 770	current->pi_state_cache = NULL;
 771
 772	return pi_state;
 773}
 774
 775/*
 776 * Drops a reference to the pi_state object and frees or caches it
 777 * when the last reference is gone.
 778 *
 779 * Must be called with the hb lock held.
 780 */
 781static void put_pi_state(struct futex_pi_state *pi_state)
 782{
 783	if (!pi_state)
 784		return;
 785
 786	if (!atomic_dec_and_test(&pi_state->refcount))
 787		return;
 788
 789	/*
 790	 * If pi_state->owner is NULL, the owner is most probably dying
 791	 * and has cleaned up the pi_state already
 792	 */
 793	if (pi_state->owner) {
 794		raw_spin_lock_irq(&pi_state->owner->pi_lock);
 795		list_del_init(&pi_state->list);
 796		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
 797
 798		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
 799	}
 800
 801	if (current->pi_state_cache)
 802		kfree(pi_state);
 803	else {
 804		/*
 805		 * pi_state->list is already empty.
 806		 * clear pi_state->owner.
 807		 * refcount is at 0 - put it back to 1.
 808		 */
 809		pi_state->owner = NULL;
 810		atomic_set(&pi_state->refcount, 1);
 811		current->pi_state_cache = pi_state;
 812	}
 813}
 814
 815/*
 816 * Look up the task based on what TID userspace gave us.
 817 * We dont trust it.
 818 */
 819static struct task_struct * futex_find_get_task(pid_t pid)
 820{
 821	struct task_struct *p;
 822
 823	rcu_read_lock();
 824	p = find_task_by_vpid(pid);
 825	if (p)
 826		get_task_struct(p);
 827
 828	rcu_read_unlock();
 829
 830	return p;
 831}
 832
 833/*
 834 * This task is holding PI mutexes at exit time => bad.
 835 * Kernel cleans up PI-state, but userspace is likely hosed.
 836 * (Robust-futex cleanup is separate and might save the day for userspace.)
 837 */
 838void exit_pi_state_list(struct task_struct *curr)
 839{
 840	struct list_head *next, *head = &curr->pi_state_list;
 841	struct futex_pi_state *pi_state;
 842	struct futex_hash_bucket *hb;
 843	union futex_key key = FUTEX_KEY_INIT;
 844
 845	if (!futex_cmpxchg_enabled)
 846		return;
 847	/*
 848	 * We are a ZOMBIE and nobody can enqueue itself on
 849	 * pi_state_list anymore, but we have to be careful
 850	 * versus waiters unqueueing themselves:
 851	 */
 852	raw_spin_lock_irq(&curr->pi_lock);
 853	while (!list_empty(head)) {
 854
 855		next = head->next;
 856		pi_state = list_entry(next, struct futex_pi_state, list);
 857		key = pi_state->key;
 858		hb = hash_futex(&key);
 859		raw_spin_unlock_irq(&curr->pi_lock);
 860
 861		spin_lock(&hb->lock);
 862
 863		raw_spin_lock_irq(&curr->pi_lock);
 864		/*
 865		 * We dropped the pi-lock, so re-check whether this
 866		 * task still owns the PI-state:
 867		 */
 868		if (head->next != next) {
 869			spin_unlock(&hb->lock);
 870			continue;
 871		}
 872
 873		WARN_ON(pi_state->owner != curr);
 874		WARN_ON(list_empty(&pi_state->list));
 875		list_del_init(&pi_state->list);
 876		pi_state->owner = NULL;
 877		raw_spin_unlock_irq(&curr->pi_lock);
 878
 879		rt_mutex_unlock(&pi_state->pi_mutex);
 880
 881		spin_unlock(&hb->lock);
 882
 883		raw_spin_lock_irq(&curr->pi_lock);
 884	}
 885	raw_spin_unlock_irq(&curr->pi_lock);
 886}
 887
 888/*
 889 * We need to check the following states:
 890 *
 891 *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
 892 *
 893 * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
 894 * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
 895 *
 896 * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
 897 *
 898 * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
 899 * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
 900 *
 901 * [6]  Found  | Found    | task      | 0         | 1      | Valid
 902 *
 903 * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
 904 *
 905 * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
 906 * [9]  Found  | Found    | task      | 0         | 0      | Invalid
 907 * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
 908 *
 909 * [1]	Indicates that the kernel can acquire the futex atomically. We
 910 *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
 911 *
 912 * [2]	Valid, if TID does not belong to a kernel thread. If no matching
 913 *      thread is found then it indicates that the owner TID has died.
 914 *
 915 * [3]	Invalid. The waiter is queued on a non PI futex
 916 *
 917 * [4]	Valid state after exit_robust_list(), which sets the user space
 918 *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
 919 *
 920 * [5]	The user space value got manipulated between exit_robust_list()
 921 *	and exit_pi_state_list()
 922 *
 923 * [6]	Valid state after exit_pi_state_list() which sets the new owner in
 924 *	the pi_state but cannot access the user space value.
 925 *
 926 * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
 927 *
 928 * [8]	Owner and user space value match
 929 *
 930 * [9]	There is no transient state which sets the user space TID to 0
 931 *	except exit_robust_list(), but this is indicated by the
 932 *	FUTEX_OWNER_DIED bit. See [4]
 933 *
 934 * [10] There is no transient state which leaves owner and user space
 935 *	TID out of sync.
 936 */
 937
 938/*
 939 * Validate that the existing waiter has a pi_state and sanity check
 940 * the pi_state against the user space value. If correct, attach to
 941 * it.
 942 */
 943static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
 944			      struct futex_pi_state **ps)
 945{
 
 
 
 946	pid_t pid = uval & FUTEX_TID_MASK;
 947
 948	/*
 949	 * Userspace might have messed up non-PI and PI futexes [3]
 950	 */
 951	if (unlikely(!pi_state))
 952		return -EINVAL;
 
 
 
 
 
 
 
 
 953
 954	WARN_ON(!atomic_read(&pi_state->refcount));
 955
 956	/*
 957	 * Handle the owner died case:
 958	 */
 959	if (uval & FUTEX_OWNER_DIED) {
 960		/*
 961		 * exit_pi_state_list sets owner to NULL and wakes the
 962		 * topmost waiter. The task which acquires the
 963		 * pi_state->rt_mutex will fixup owner.
 964		 */
 965		if (!pi_state->owner) {
 966			/*
 967			 * No pi state owner, but the user space TID
 968			 * is not 0. Inconsistent state. [5]
 969			 */
 970			if (pid)
 971				return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972			/*
 973			 * Take a ref on the state and return success. [4]
 
 
 
 974			 */
 975			goto out_state;
 
 
 
 
 
 
 976		}
 977
 978		/*
 979		 * If TID is 0, then either the dying owner has not
 980		 * yet executed exit_pi_state_list() or some waiter
 981		 * acquired the rtmutex in the pi state, but did not
 982		 * yet fixup the TID in user space.
 983		 *
 984		 * Take a ref on the state and return success. [6]
 985		 */
 986		if (!pid)
 987			goto out_state;
 988	} else {
 989		/*
 990		 * If the owner died bit is not set, then the pi_state
 991		 * must have an owner. [7]
 992		 */
 993		if (!pi_state->owner)
 994			return -EINVAL;
 995	}
 996
 997	/*
 998	 * Bail out if user space manipulated the futex value. If pi
 999	 * state exists then the owner TID must be the same as the
1000	 * user space TID. [9/10]
1001	 */
1002	if (pid != task_pid_vnr(pi_state->owner))
1003		return -EINVAL;
1004out_state:
1005	atomic_inc(&pi_state->refcount);
1006	*ps = pi_state;
1007	return 0;
1008}
1009
1010/*
1011 * Lookup the task for the TID provided from user space and attach to
1012 * it after doing proper sanity checks.
1013 */
1014static int attach_to_pi_owner(u32 uval, union futex_key *key,
1015			      struct futex_pi_state **ps)
1016{
1017	pid_t pid = uval & FUTEX_TID_MASK;
1018	struct futex_pi_state *pi_state;
1019	struct task_struct *p;
1020
1021	/*
1022	 * We are the first waiter - try to look up the real owner and attach
1023	 * the new pi_state to it, but bail out when TID = 0 [1]
1024	 */
1025	if (!pid)
1026		return -ESRCH;
1027	p = futex_find_get_task(pid);
1028	if (!p)
1029		return -ESRCH;
1030
1031	if (unlikely(p->flags & PF_KTHREAD)) {
1032		put_task_struct(p);
1033		return -EPERM;
1034	}
1035
1036	/*
1037	 * We need to look at the task state flags to figure out,
1038	 * whether the task is exiting. To protect against the do_exit
1039	 * change of the task flags, we do this protected by
1040	 * p->pi_lock:
1041	 */
1042	raw_spin_lock_irq(&p->pi_lock);
1043	if (unlikely(p->flags & PF_EXITING)) {
1044		/*
1045		 * The task is on the way out. When PF_EXITPIDONE is
1046		 * set, we know that the task has finished the
1047		 * cleanup:
1048		 */
1049		int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1050
1051		raw_spin_unlock_irq(&p->pi_lock);
1052		put_task_struct(p);
1053		return ret;
1054	}
1055
1056	/*
1057	 * No existing pi state. First waiter. [2]
1058	 */
1059	pi_state = alloc_pi_state();
1060
1061	/*
1062	 * Initialize the pi_mutex in locked state and make @p
1063	 * the owner of it:
1064	 */
1065	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1066
1067	/* Store the key for possible exit cleanups: */
1068	pi_state->key = *key;
1069
1070	WARN_ON(!list_empty(&pi_state->list));
1071	list_add(&pi_state->list, &p->pi_state_list);
1072	pi_state->owner = p;
1073	raw_spin_unlock_irq(&p->pi_lock);
1074
1075	put_task_struct(p);
1076
1077	*ps = pi_state;
1078
1079	return 0;
1080}
1081
1082static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
1083			   union futex_key *key, struct futex_pi_state **ps)
1084{
1085	struct futex_q *match = futex_top_waiter(hb, key);
1086
1087	/*
1088	 * If there is a waiter on that futex, validate it and
1089	 * attach to the pi_state when the validation succeeds.
1090	 */
1091	if (match)
1092		return attach_to_pi_state(uval, match->pi_state, ps);
1093
1094	/*
1095	 * We are the first waiter - try to look up the owner based on
1096	 * @uval and attach to it.
1097	 */
1098	return attach_to_pi_owner(uval, key, ps);
1099}
1100
1101static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1102{
1103	u32 uninitialized_var(curval);
1104
1105	if (unlikely(should_fail_futex(true)))
1106		return -EFAULT;
1107
1108	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1109		return -EFAULT;
1110
1111	/*If user space value changed, let the caller retry */
1112	return curval != uval ? -EAGAIN : 0;
1113}
1114
1115/**
1116 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1117 * @uaddr:		the pi futex user address
1118 * @hb:			the pi futex hash bucket
1119 * @key:		the futex key associated with uaddr and hb
1120 * @ps:			the pi_state pointer where we store the result of the
1121 *			lookup
1122 * @task:		the task to perform the atomic lock work for.  This will
1123 *			be "current" except in the case of requeue pi.
1124 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1125 *
1126 * Return:
1127 *  0 - ready to wait;
1128 *  1 - acquired the lock;
1129 * <0 - error
1130 *
1131 * The hb->lock and futex_key refs shall be held by the caller.
1132 */
1133static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1134				union futex_key *key,
1135				struct futex_pi_state **ps,
1136				struct task_struct *task, int set_waiters)
1137{
1138	u32 uval, newval, vpid = task_pid_vnr(task);
1139	struct futex_q *match;
1140	int ret;
 
 
1141
1142	/*
1143	 * Read the user space value first so we can validate a few
1144	 * things before proceeding further.
 
1145	 */
1146	if (get_futex_value_locked(&uval, uaddr))
1147		return -EFAULT;
 
1148
1149	if (unlikely(should_fail_futex(true)))
1150		return -EFAULT;
1151
1152	/*
1153	 * Detect deadlocks.
1154	 */
1155	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1156		return -EDEADLK;
1157
1158	if ((unlikely(should_fail_futex(true))))
1159		return -EDEADLK;
 
 
 
 
 
 
 
 
 
 
 
 
 
1160
1161	/*
1162	 * Lookup existing state first. If it exists, try to attach to
1163	 * its pi_state.
1164	 */
1165	match = futex_top_waiter(hb, key);
1166	if (match)
1167		return attach_to_pi_state(uval, match->pi_state, ps);
1168
1169	/*
1170	 * No waiter and user TID is 0. We are here because the
1171	 * waiters or the owner died bit is set or called from
1172	 * requeue_cmp_pi or for whatever reason something took the
1173	 * syscall.
1174	 */
1175	if (!(uval & FUTEX_TID_MASK)) {
1176		/*
1177		 * We take over the futex. No other waiters and the user space
1178		 * TID is 0. We preserve the owner died bit.
1179		 */
1180		newval = uval & FUTEX_OWNER_DIED;
1181		newval |= vpid;
 
 
1182
1183		/* The futex requeue_pi code can enforce the waiters bit */
1184		if (set_waiters)
1185			newval |= FUTEX_WAITERS;
1186
1187		ret = lock_pi_update_atomic(uaddr, uval, newval);
1188		/* If the take over worked, return 1 */
1189		return ret < 0 ? ret : 1;
1190	}
1191
1192	/*
1193	 * First waiter. Set the waiters bit before attaching ourself to
1194	 * the owner. If owner tries to unlock, it will be forced into
1195	 * the kernel and blocked on hb->lock.
1196	 */
1197	newval = uval | FUTEX_WAITERS;
1198	ret = lock_pi_update_atomic(uaddr, uval, newval);
1199	if (ret)
1200		return ret;
1201	/*
1202	 * If the update of the user space value succeeded, we try to
1203	 * attach to the owner. If that fails, no harm done, we only
1204	 * set the FUTEX_WAITERS bit in the user space variable.
1205	 */
1206	return attach_to_pi_owner(uval, key, ps);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1207}
1208
1209/**
1210 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1211 * @q:	The futex_q to unqueue
1212 *
1213 * The q->lock_ptr must not be NULL and must be held by the caller.
1214 */
1215static void __unqueue_futex(struct futex_q *q)
1216{
1217	struct futex_hash_bucket *hb;
1218
1219	if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1220	    || WARN_ON(plist_node_empty(&q->list)))
1221		return;
1222
1223	hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1224	plist_del(&q->list, &hb->chain);
1225	hb_waiters_dec(hb);
1226}
1227
1228/*
1229 * The hash bucket lock must be held when this is called.
1230 * Afterwards, the futex_q must not be accessed. Callers
1231 * must ensure to later call wake_up_q() for the actual
1232 * wakeups to occur.
1233 */
1234static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1235{
1236	struct task_struct *p = q->task;
1237
1238	if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1239		return;
1240
1241	/*
1242	 * Queue the task for later wakeup for after we've released
1243	 * the hb->lock. wake_q_add() grabs reference to p.
 
 
 
1244	 */
1245	wake_q_add(wake_q, p);
 
1246	__unqueue_futex(q);
1247	/*
1248	 * The waiting task can free the futex_q as soon as
1249	 * q->lock_ptr = NULL is written, without taking any locks. A
1250	 * memory barrier is required here to prevent the following
1251	 * store to lock_ptr from getting ahead of the plist_del.
1252	 */
1253	smp_wmb();
1254	q->lock_ptr = NULL;
 
 
 
1255}
1256
1257static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1258			 struct futex_hash_bucket *hb)
1259{
1260	struct task_struct *new_owner;
1261	struct futex_pi_state *pi_state = this->pi_state;
1262	u32 uninitialized_var(curval), newval;
1263	WAKE_Q(wake_q);
1264	bool deboost;
1265	int ret = 0;
1266
1267	if (!pi_state)
1268		return -EINVAL;
1269
1270	/*
1271	 * If current does not own the pi_state then the futex is
1272	 * inconsistent and user space fiddled with the futex value.
1273	 */
1274	if (pi_state->owner != current)
1275		return -EINVAL;
1276
1277	raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1278	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1279
1280	/*
1281	 * It is possible that the next waiter (the one that brought
1282	 * this owner to the kernel) timed out and is no longer
1283	 * waiting on the lock.
1284	 */
1285	if (!new_owner)
1286		new_owner = this->task;
1287
1288	/*
1289	 * We pass it to the next owner. The WAITERS bit is always
1290	 * kept enabled while there is PI state around. We cleanup the
1291	 * owner died bit, because we are the owner.
1292	 */
1293	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1294
1295	if (unlikely(should_fail_futex(true)))
1296		ret = -EFAULT;
1297
1298	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1299		ret = -EFAULT;
1300	} else if (curval != uval) {
1301		/*
1302		 * If a unconditional UNLOCK_PI operation (user space did not
1303		 * try the TID->0 transition) raced with a waiter setting the
1304		 * FUTEX_WAITERS flag between get_user() and locking the hash
1305		 * bucket lock, retry the operation.
1306		 */
1307		if ((FUTEX_TID_MASK & curval) == uval)
1308			ret = -EAGAIN;
1309		else
1310			ret = -EINVAL;
1311	}
1312	if (ret) {
1313		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1314		return ret;
1315	}
1316
1317	raw_spin_lock(&pi_state->owner->pi_lock);
1318	WARN_ON(list_empty(&pi_state->list));
1319	list_del_init(&pi_state->list);
1320	raw_spin_unlock(&pi_state->owner->pi_lock);
1321
1322	raw_spin_lock(&new_owner->pi_lock);
1323	WARN_ON(!list_empty(&pi_state->list));
1324	list_add(&pi_state->list, &new_owner->pi_state_list);
1325	pi_state->owner = new_owner;
1326	raw_spin_unlock(&new_owner->pi_lock);
 
 
 
1327
1328	raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
 
1329
1330	deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
 
 
1331
1332	/*
1333	 * First unlock HB so the waiter does not spin on it once he got woken
1334	 * up. Second wake up the waiter before the priority is adjusted. If we
1335	 * deboost first (and lose our higher priority), then the task might get
1336	 * scheduled away before the wake up can take place.
1337	 */
1338	spin_unlock(&hb->lock);
1339	wake_up_q(&wake_q);
1340	if (deboost)
1341		rt_mutex_adjust_prio(current);
1342
1343	return 0;
1344}
1345
1346/*
1347 * Express the locking dependencies for lockdep:
1348 */
1349static inline void
1350double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1351{
1352	if (hb1 <= hb2) {
1353		spin_lock(&hb1->lock);
1354		if (hb1 < hb2)
1355			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1356	} else { /* hb1 > hb2 */
1357		spin_lock(&hb2->lock);
1358		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1359	}
1360}
1361
1362static inline void
1363double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1364{
1365	spin_unlock(&hb1->lock);
1366	if (hb1 != hb2)
1367		spin_unlock(&hb2->lock);
1368}
1369
1370/*
1371 * Wake up waiters matching bitset queued on this futex (uaddr).
1372 */
1373static int
1374futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1375{
1376	struct futex_hash_bucket *hb;
1377	struct futex_q *this, *next;
1378	union futex_key key = FUTEX_KEY_INIT;
1379	int ret;
1380	WAKE_Q(wake_q);
1381
1382	if (!bitset)
1383		return -EINVAL;
1384
1385	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1386	if (unlikely(ret != 0))
1387		goto out;
1388
1389	hb = hash_futex(&key);
1390
1391	/* Make sure we really have tasks to wakeup */
1392	if (!hb_waiters_pending(hb))
1393		goto out_put_key;
1394
1395	spin_lock(&hb->lock);
1396
1397	plist_for_each_entry_safe(this, next, &hb->chain, list) {
1398		if (match_futex (&this->key, &key)) {
1399			if (this->pi_state || this->rt_waiter) {
1400				ret = -EINVAL;
1401				break;
1402			}
1403
1404			/* Check if one of the bits is set in both bitsets */
1405			if (!(this->bitset & bitset))
1406				continue;
1407
1408			mark_wake_futex(&wake_q, this);
1409			if (++ret >= nr_wake)
1410				break;
1411		}
1412	}
1413
1414	spin_unlock(&hb->lock);
1415	wake_up_q(&wake_q);
1416out_put_key:
1417	put_futex_key(&key);
1418out:
1419	return ret;
1420}
1421
1422/*
1423 * Wake up all waiters hashed on the physical page that is mapped
1424 * to this virtual address:
1425 */
1426static int
1427futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1428	      int nr_wake, int nr_wake2, int op)
1429{
1430	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1431	struct futex_hash_bucket *hb1, *hb2;
1432	struct futex_q *this, *next;
1433	int ret, op_ret;
1434	WAKE_Q(wake_q);
1435
1436retry:
1437	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1438	if (unlikely(ret != 0))
1439		goto out;
1440	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1441	if (unlikely(ret != 0))
1442		goto out_put_key1;
1443
1444	hb1 = hash_futex(&key1);
1445	hb2 = hash_futex(&key2);
1446
1447retry_private:
1448	double_lock_hb(hb1, hb2);
1449	op_ret = futex_atomic_op_inuser(op, uaddr2);
1450	if (unlikely(op_ret < 0)) {
1451
1452		double_unlock_hb(hb1, hb2);
1453
1454#ifndef CONFIG_MMU
1455		/*
1456		 * we don't get EFAULT from MMU faults if we don't have an MMU,
1457		 * but we might get them from range checking
1458		 */
1459		ret = op_ret;
1460		goto out_put_keys;
1461#endif
1462
1463		if (unlikely(op_ret != -EFAULT)) {
1464			ret = op_ret;
1465			goto out_put_keys;
1466		}
1467
1468		ret = fault_in_user_writeable(uaddr2);
1469		if (ret)
1470			goto out_put_keys;
1471
1472		if (!(flags & FLAGS_SHARED))
1473			goto retry_private;
1474
1475		put_futex_key(&key2);
1476		put_futex_key(&key1);
1477		goto retry;
1478	}
1479
1480	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1481		if (match_futex (&this->key, &key1)) {
1482			if (this->pi_state || this->rt_waiter) {
1483				ret = -EINVAL;
1484				goto out_unlock;
1485			}
1486			mark_wake_futex(&wake_q, this);
1487			if (++ret >= nr_wake)
1488				break;
1489		}
1490	}
1491
1492	if (op_ret > 0) {
1493		op_ret = 0;
1494		plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1495			if (match_futex (&this->key, &key2)) {
1496				if (this->pi_state || this->rt_waiter) {
1497					ret = -EINVAL;
1498					goto out_unlock;
1499				}
1500				mark_wake_futex(&wake_q, this);
1501				if (++op_ret >= nr_wake2)
1502					break;
1503			}
1504		}
1505		ret += op_ret;
1506	}
1507
1508out_unlock:
1509	double_unlock_hb(hb1, hb2);
1510	wake_up_q(&wake_q);
1511out_put_keys:
1512	put_futex_key(&key2);
1513out_put_key1:
1514	put_futex_key(&key1);
1515out:
1516	return ret;
1517}
1518
1519/**
1520 * requeue_futex() - Requeue a futex_q from one hb to another
1521 * @q:		the futex_q to requeue
1522 * @hb1:	the source hash_bucket
1523 * @hb2:	the target hash_bucket
1524 * @key2:	the new key for the requeued futex_q
1525 */
1526static inline
1527void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1528		   struct futex_hash_bucket *hb2, union futex_key *key2)
1529{
1530
1531	/*
1532	 * If key1 and key2 hash to the same bucket, no need to
1533	 * requeue.
1534	 */
1535	if (likely(&hb1->chain != &hb2->chain)) {
1536		plist_del(&q->list, &hb1->chain);
1537		hb_waiters_dec(hb1);
 
1538		hb_waiters_inc(hb2);
1539		plist_add(&q->list, &hb2->chain);
1540		q->lock_ptr = &hb2->lock;
1541	}
1542	get_futex_key_refs(key2);
1543	q->key = *key2;
1544}
1545
1546/**
1547 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1548 * @q:		the futex_q
1549 * @key:	the key of the requeue target futex
1550 * @hb:		the hash_bucket of the requeue target futex
1551 *
1552 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1553 * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1554 * to the requeue target futex so the waiter can detect the wakeup on the right
1555 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1556 * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1557 * to protect access to the pi_state to fixup the owner later.  Must be called
1558 * with both q->lock_ptr and hb->lock held.
1559 */
1560static inline
1561void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1562			   struct futex_hash_bucket *hb)
1563{
1564	get_futex_key_refs(key);
1565	q->key = *key;
1566
1567	__unqueue_futex(q);
1568
1569	WARN_ON(!q->rt_waiter);
1570	q->rt_waiter = NULL;
1571
1572	q->lock_ptr = &hb->lock;
1573
1574	wake_up_state(q->task, TASK_NORMAL);
1575}
1576
1577/**
1578 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1579 * @pifutex:		the user address of the to futex
1580 * @hb1:		the from futex hash bucket, must be locked by the caller
1581 * @hb2:		the to futex hash bucket, must be locked by the caller
1582 * @key1:		the from futex key
1583 * @key2:		the to futex key
1584 * @ps:			address to store the pi_state pointer
1585 * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
1586 *
1587 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1588 * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1589 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1590 * hb1 and hb2 must be held by the caller.
1591 *
1592 * Return:
1593 *  0 - failed to acquire the lock atomically;
1594 * >0 - acquired the lock, return value is vpid of the top_waiter
1595 * <0 - error
1596 */
1597static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1598				 struct futex_hash_bucket *hb1,
1599				 struct futex_hash_bucket *hb2,
1600				 union futex_key *key1, union futex_key *key2,
1601				 struct futex_pi_state **ps, int set_waiters)
1602{
1603	struct futex_q *top_waiter = NULL;
1604	u32 curval;
1605	int ret, vpid;
1606
1607	if (get_futex_value_locked(&curval, pifutex))
1608		return -EFAULT;
1609
1610	if (unlikely(should_fail_futex(true)))
1611		return -EFAULT;
1612
1613	/*
1614	 * Find the top_waiter and determine if there are additional waiters.
1615	 * If the caller intends to requeue more than 1 waiter to pifutex,
1616	 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1617	 * as we have means to handle the possible fault.  If not, don't set
1618	 * the bit unecessarily as it will force the subsequent unlock to enter
1619	 * the kernel.
1620	 */
1621	top_waiter = futex_top_waiter(hb1, key1);
1622
1623	/* There are no waiters, nothing for us to do. */
1624	if (!top_waiter)
1625		return 0;
1626
1627	/* Ensure we requeue to the expected futex. */
1628	if (!match_futex(top_waiter->requeue_pi_key, key2))
1629		return -EINVAL;
1630
1631	/*
1632	 * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1633	 * the contended case or if set_waiters is 1.  The pi_state is returned
1634	 * in ps in contended cases.
1635	 */
1636	vpid = task_pid_vnr(top_waiter->task);
1637	ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1638				   set_waiters);
1639	if (ret == 1) {
1640		requeue_pi_wake_futex(top_waiter, key2, hb2);
1641		return vpid;
1642	}
1643	return ret;
1644}
1645
1646/**
1647 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1648 * @uaddr1:	source futex user address
1649 * @flags:	futex flags (FLAGS_SHARED, etc.)
1650 * @uaddr2:	target futex user address
1651 * @nr_wake:	number of waiters to wake (must be 1 for requeue_pi)
1652 * @nr_requeue:	number of waiters to requeue (0-INT_MAX)
1653 * @cmpval:	@uaddr1 expected value (or %NULL)
1654 * @requeue_pi:	if we are attempting to requeue from a non-pi futex to a
1655 *		pi futex (pi to pi requeue is not supported)
1656 *
1657 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1658 * uaddr2 atomically on behalf of the top waiter.
1659 *
1660 * Return:
1661 * >=0 - on success, the number of tasks requeued or woken;
1662 *  <0 - on error
1663 */
1664static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1665			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1666			 u32 *cmpval, int requeue_pi)
1667{
1668	union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1669	int drop_count = 0, task_count = 0, ret;
1670	struct futex_pi_state *pi_state = NULL;
1671	struct futex_hash_bucket *hb1, *hb2;
1672	struct futex_q *this, *next;
1673	WAKE_Q(wake_q);
1674
1675	if (requeue_pi) {
1676		/*
1677		 * Requeue PI only works on two distinct uaddrs. This
1678		 * check is only valid for private futexes. See below.
1679		 */
1680		if (uaddr1 == uaddr2)
1681			return -EINVAL;
1682
1683		/*
1684		 * requeue_pi requires a pi_state, try to allocate it now
1685		 * without any locks in case it fails.
1686		 */
1687		if (refill_pi_state_cache())
1688			return -ENOMEM;
1689		/*
1690		 * requeue_pi must wake as many tasks as it can, up to nr_wake
1691		 * + nr_requeue, since it acquires the rt_mutex prior to
1692		 * returning to userspace, so as to not leave the rt_mutex with
1693		 * waiters and no owner.  However, second and third wake-ups
1694		 * cannot be predicted as they involve race conditions with the
1695		 * first wake and a fault while looking up the pi_state.  Both
1696		 * pthread_cond_signal() and pthread_cond_broadcast() should
1697		 * use nr_wake=1.
1698		 */
1699		if (nr_wake != 1)
1700			return -EINVAL;
1701	}
1702
1703retry:
 
 
 
 
 
 
 
 
 
1704	ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1705	if (unlikely(ret != 0))
1706		goto out;
1707	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1708			    requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1709	if (unlikely(ret != 0))
1710		goto out_put_key1;
1711
1712	/*
1713	 * The check above which compares uaddrs is not sufficient for
1714	 * shared futexes. We need to compare the keys:
1715	 */
1716	if (requeue_pi && match_futex(&key1, &key2)) {
1717		ret = -EINVAL;
1718		goto out_put_keys;
1719	}
1720
1721	hb1 = hash_futex(&key1);
1722	hb2 = hash_futex(&key2);
1723
1724retry_private:
1725	hb_waiters_inc(hb2);
1726	double_lock_hb(hb1, hb2);
1727
1728	if (likely(cmpval != NULL)) {
1729		u32 curval;
1730
1731		ret = get_futex_value_locked(&curval, uaddr1);
1732
1733		if (unlikely(ret)) {
1734			double_unlock_hb(hb1, hb2);
1735			hb_waiters_dec(hb2);
1736
1737			ret = get_user(curval, uaddr1);
1738			if (ret)
1739				goto out_put_keys;
1740
1741			if (!(flags & FLAGS_SHARED))
1742				goto retry_private;
1743
1744			put_futex_key(&key2);
1745			put_futex_key(&key1);
1746			goto retry;
1747		}
1748		if (curval != *cmpval) {
1749			ret = -EAGAIN;
1750			goto out_unlock;
1751		}
1752	}
1753
1754	if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1755		/*
1756		 * Attempt to acquire uaddr2 and wake the top waiter. If we
1757		 * intend to requeue waiters, force setting the FUTEX_WAITERS
1758		 * bit.  We force this here where we are able to easily handle
1759		 * faults rather in the requeue loop below.
1760		 */
1761		ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1762						 &key2, &pi_state, nr_requeue);
1763
1764		/*
1765		 * At this point the top_waiter has either taken uaddr2 or is
1766		 * waiting on it.  If the former, then the pi_state will not
1767		 * exist yet, look it up one more time to ensure we have a
1768		 * reference to it. If the lock was taken, ret contains the
1769		 * vpid of the top waiter task.
1770		 * If the lock was not taken, we have pi_state and an initial
1771		 * refcount on it. In case of an error we have nothing.
1772		 */
1773		if (ret > 0) {
1774			WARN_ON(pi_state);
1775			drop_count++;
1776			task_count++;
1777			/*
1778			 * If we acquired the lock, then the user space value
1779			 * of uaddr2 should be vpid. It cannot be changed by
1780			 * the top waiter as it is blocked on hb2 lock if it
1781			 * tries to do so. If something fiddled with it behind
1782			 * our back the pi state lookup might unearth it. So
1783			 * we rather use the known value than rereading and
1784			 * handing potential crap to lookup_pi_state.
1785			 *
1786			 * If that call succeeds then we have pi_state and an
1787			 * initial refcount on it.
1788			 */
1789			ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1790		}
1791
1792		switch (ret) {
1793		case 0:
1794			/* We hold a reference on the pi state. */
1795			break;
1796
1797			/* If the above failed, then pi_state is NULL */
1798		case -EFAULT:
1799			double_unlock_hb(hb1, hb2);
1800			hb_waiters_dec(hb2);
1801			put_futex_key(&key2);
1802			put_futex_key(&key1);
1803			ret = fault_in_user_writeable(uaddr2);
1804			if (!ret)
1805				goto retry;
1806			goto out;
1807		case -EAGAIN:
1808			/*
1809			 * Two reasons for this:
1810			 * - Owner is exiting and we just wait for the
1811			 *   exit to complete.
1812			 * - The user space value changed.
1813			 */
1814			double_unlock_hb(hb1, hb2);
1815			hb_waiters_dec(hb2);
1816			put_futex_key(&key2);
1817			put_futex_key(&key1);
1818			cond_resched();
1819			goto retry;
1820		default:
1821			goto out_unlock;
1822		}
1823	}
1824
1825	plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1826		if (task_count - nr_wake >= nr_requeue)
1827			break;
1828
1829		if (!match_futex(&this->key, &key1))
1830			continue;
1831
1832		/*
1833		 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1834		 * be paired with each other and no other futex ops.
1835		 *
1836		 * We should never be requeueing a futex_q with a pi_state,
1837		 * which is awaiting a futex_unlock_pi().
1838		 */
1839		if ((requeue_pi && !this->rt_waiter) ||
1840		    (!requeue_pi && this->rt_waiter) ||
1841		    this->pi_state) {
1842			ret = -EINVAL;
1843			break;
1844		}
1845
1846		/*
1847		 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1848		 * lock, we already woke the top_waiter.  If not, it will be
1849		 * woken by futex_unlock_pi().
1850		 */
1851		if (++task_count <= nr_wake && !requeue_pi) {
1852			mark_wake_futex(&wake_q, this);
1853			continue;
1854		}
1855
1856		/* Ensure we requeue to the expected futex for requeue_pi. */
1857		if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1858			ret = -EINVAL;
1859			break;
1860		}
1861
1862		/*
1863		 * Requeue nr_requeue waiters and possibly one more in the case
1864		 * of requeue_pi if we couldn't acquire the lock atomically.
1865		 */
1866		if (requeue_pi) {
1867			/*
1868			 * Prepare the waiter to take the rt_mutex. Take a
1869			 * refcount on the pi_state and store the pointer in
1870			 * the futex_q object of the waiter.
1871			 */
1872			atomic_inc(&pi_state->refcount);
1873			this->pi_state = pi_state;
1874			ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1875							this->rt_waiter,
1876							this->task);
1877			if (ret == 1) {
1878				/*
1879				 * We got the lock. We do neither drop the
1880				 * refcount on pi_state nor clear
1881				 * this->pi_state because the waiter needs the
1882				 * pi_state for cleaning up the user space
1883				 * value. It will drop the refcount after
1884				 * doing so.
1885				 */
1886				requeue_pi_wake_futex(this, &key2, hb2);
1887				drop_count++;
1888				continue;
1889			} else if (ret) {
1890				/*
1891				 * rt_mutex_start_proxy_lock() detected a
1892				 * potential deadlock when we tried to queue
1893				 * that waiter. Drop the pi_state reference
1894				 * which we took above and remove the pointer
1895				 * to the state from the waiters futex_q
1896				 * object.
1897				 */
1898				this->pi_state = NULL;
1899				put_pi_state(pi_state);
1900				/*
1901				 * We stop queueing more waiters and let user
1902				 * space deal with the mess.
1903				 */
1904				break;
1905			}
1906		}
1907		requeue_futex(this, hb1, hb2, &key2);
1908		drop_count++;
1909	}
1910
1911	/*
1912	 * We took an extra initial reference to the pi_state either
1913	 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
1914	 * need to drop it here again.
1915	 */
1916	put_pi_state(pi_state);
1917
1918out_unlock:
1919	double_unlock_hb(hb1, hb2);
1920	wake_up_q(&wake_q);
1921	hb_waiters_dec(hb2);
1922
1923	/*
1924	 * drop_futex_key_refs() must be called outside the spinlocks. During
1925	 * the requeue we moved futex_q's from the hash bucket at key1 to the
1926	 * one at key2 and updated their key pointer.  We no longer need to
1927	 * hold the references to key1.
1928	 */
1929	while (--drop_count >= 0)
1930		drop_futex_key_refs(&key1);
1931
1932out_put_keys:
1933	put_futex_key(&key2);
1934out_put_key1:
1935	put_futex_key(&key1);
1936out:
 
 
1937	return ret ? ret : task_count;
1938}
1939
1940/* The key must be already stored in q->key. */
1941static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1942	__acquires(&hb->lock)
1943{
1944	struct futex_hash_bucket *hb;
1945
1946	hb = hash_futex(&q->key);
1947
1948	/*
1949	 * Increment the counter before taking the lock so that
1950	 * a potential waker won't miss a to-be-slept task that is
1951	 * waiting for the spinlock. This is safe as all queue_lock()
1952	 * users end up calling queue_me(). Similarly, for housekeeping,
1953	 * decrement the counter at queue_unlock() when some error has
1954	 * occurred and we don't end up adding the task to the list.
1955	 */
1956	hb_waiters_inc(hb);
1957
1958	q->lock_ptr = &hb->lock;
1959
1960	spin_lock(&hb->lock); /* implies smp_mb(); (A) */
1961	return hb;
1962}
1963
1964static inline void
1965queue_unlock(struct futex_hash_bucket *hb)
1966	__releases(&hb->lock)
1967{
1968	spin_unlock(&hb->lock);
1969	hb_waiters_dec(hb);
1970}
1971
1972/**
1973 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1974 * @q:	The futex_q to enqueue
1975 * @hb:	The destination hash bucket
1976 *
1977 * The hb->lock must be held by the caller, and is released here. A call to
1978 * queue_me() is typically paired with exactly one call to unqueue_me().  The
1979 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1980 * or nothing if the unqueue is done as part of the wake process and the unqueue
1981 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1982 * an example).
1983 */
1984static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1985	__releases(&hb->lock)
1986{
1987	int prio;
1988
1989	/*
1990	 * The priority used to register this element is
1991	 * - either the real thread-priority for the real-time threads
1992	 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1993	 * - or MAX_RT_PRIO for non-RT threads.
1994	 * Thus, all RT-threads are woken first in priority order, and
1995	 * the others are woken last, in FIFO order.
1996	 */
1997	prio = min(current->normal_prio, MAX_RT_PRIO);
1998
1999	plist_node_init(&q->list, prio);
2000	plist_add(&q->list, &hb->chain);
2001	q->task = current;
2002	spin_unlock(&hb->lock);
2003}
2004
2005/**
2006 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2007 * @q:	The futex_q to unqueue
2008 *
2009 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2010 * be paired with exactly one earlier call to queue_me().
2011 *
2012 * Return:
2013 *   1 - if the futex_q was still queued (and we removed unqueued it);
2014 *   0 - if the futex_q was already removed by the waking thread
2015 */
2016static int unqueue_me(struct futex_q *q)
2017{
2018	spinlock_t *lock_ptr;
2019	int ret = 0;
2020
2021	/* In the common case we don't take the spinlock, which is nice. */
2022retry:
2023	/*
2024	 * q->lock_ptr can change between this read and the following spin_lock.
2025	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2026	 * optimizing lock_ptr out of the logic below.
2027	 */
2028	lock_ptr = READ_ONCE(q->lock_ptr);
2029	if (lock_ptr != NULL) {
2030		spin_lock(lock_ptr);
2031		/*
2032		 * q->lock_ptr can change between reading it and
2033		 * spin_lock(), causing us to take the wrong lock.  This
2034		 * corrects the race condition.
2035		 *
2036		 * Reasoning goes like this: if we have the wrong lock,
2037		 * q->lock_ptr must have changed (maybe several times)
2038		 * between reading it and the spin_lock().  It can
2039		 * change again after the spin_lock() but only if it was
2040		 * already changed before the spin_lock().  It cannot,
2041		 * however, change back to the original value.  Therefore
2042		 * we can detect whether we acquired the correct lock.
2043		 */
2044		if (unlikely(lock_ptr != q->lock_ptr)) {
2045			spin_unlock(lock_ptr);
2046			goto retry;
2047		}
2048		__unqueue_futex(q);
2049
2050		BUG_ON(q->pi_state);
2051
2052		spin_unlock(lock_ptr);
2053		ret = 1;
2054	}
2055
2056	drop_futex_key_refs(&q->key);
2057	return ret;
2058}
2059
2060/*
2061 * PI futexes can not be requeued and must remove themself from the
2062 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2063 * and dropped here.
2064 */
2065static void unqueue_me_pi(struct futex_q *q)
2066	__releases(q->lock_ptr)
2067{
2068	__unqueue_futex(q);
2069
2070	BUG_ON(!q->pi_state);
2071	put_pi_state(q->pi_state);
2072	q->pi_state = NULL;
2073
2074	spin_unlock(q->lock_ptr);
2075}
2076
2077/*
2078 * Fixup the pi_state owner with the new owner.
2079 *
2080 * Must be called with hash bucket lock held and mm->sem held for non
2081 * private futexes.
2082 */
2083static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2084				struct task_struct *newowner)
2085{
2086	u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2087	struct futex_pi_state *pi_state = q->pi_state;
2088	struct task_struct *oldowner = pi_state->owner;
2089	u32 uval, uninitialized_var(curval), newval;
2090	int ret;
2091
2092	/* Owner died? */
2093	if (!pi_state->owner)
2094		newtid |= FUTEX_OWNER_DIED;
2095
2096	/*
2097	 * We are here either because we stole the rtmutex from the
2098	 * previous highest priority waiter or we are the highest priority
2099	 * waiter but failed to get the rtmutex the first time.
2100	 * We have to replace the newowner TID in the user space variable.
2101	 * This must be atomic as we have to preserve the owner died bit here.
2102	 *
2103	 * Note: We write the user space value _before_ changing the pi_state
2104	 * because we can fault here. Imagine swapped out pages or a fork
2105	 * that marked all the anonymous memory readonly for cow.
2106	 *
2107	 * Modifying pi_state _before_ the user space value would
2108	 * leave the pi_state in an inconsistent state when we fault
2109	 * here, because we need to drop the hash bucket lock to
2110	 * handle the fault. This might be observed in the PID check
2111	 * in lookup_pi_state.
2112	 */
2113retry:
2114	if (get_futex_value_locked(&uval, uaddr))
2115		goto handle_fault;
2116
2117	while (1) {
2118		newval = (uval & FUTEX_OWNER_DIED) | newtid;
2119
2120		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2121			goto handle_fault;
2122		if (curval == uval)
2123			break;
2124		uval = curval;
2125	}
2126
2127	/*
2128	 * We fixed up user space. Now we need to fix the pi_state
2129	 * itself.
2130	 */
2131	if (pi_state->owner != NULL) {
2132		raw_spin_lock_irq(&pi_state->owner->pi_lock);
2133		WARN_ON(list_empty(&pi_state->list));
2134		list_del_init(&pi_state->list);
2135		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
2136	}
2137
2138	pi_state->owner = newowner;
2139
2140	raw_spin_lock_irq(&newowner->pi_lock);
2141	WARN_ON(!list_empty(&pi_state->list));
2142	list_add(&pi_state->list, &newowner->pi_state_list);
2143	raw_spin_unlock_irq(&newowner->pi_lock);
2144	return 0;
2145
2146	/*
2147	 * To handle the page fault we need to drop the hash bucket
2148	 * lock here. That gives the other task (either the highest priority
2149	 * waiter itself or the task which stole the rtmutex) the
2150	 * chance to try the fixup of the pi_state. So once we are
2151	 * back from handling the fault we need to check the pi_state
2152	 * after reacquiring the hash bucket lock and before trying to
2153	 * do another fixup. When the fixup has been done already we
2154	 * simply return.
2155	 */
2156handle_fault:
2157	spin_unlock(q->lock_ptr);
2158
2159	ret = fault_in_user_writeable(uaddr);
2160
2161	spin_lock(q->lock_ptr);
2162
2163	/*
2164	 * Check if someone else fixed it for us:
2165	 */
2166	if (pi_state->owner != oldowner)
2167		return 0;
2168
2169	if (ret)
2170		return ret;
2171
2172	goto retry;
2173}
2174
2175static long futex_wait_restart(struct restart_block *restart);
2176
2177/**
2178 * fixup_owner() - Post lock pi_state and corner case management
2179 * @uaddr:	user address of the futex
2180 * @q:		futex_q (contains pi_state and access to the rt_mutex)
2181 * @locked:	if the attempt to take the rt_mutex succeeded (1) or not (0)
2182 *
2183 * After attempting to lock an rt_mutex, this function is called to cleanup
2184 * the pi_state owner as well as handle race conditions that may allow us to
2185 * acquire the lock. Must be called with the hb lock held.
2186 *
2187 * Return:
2188 *  1 - success, lock taken;
2189 *  0 - success, lock not taken;
2190 * <0 - on error (-EFAULT)
2191 */
2192static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2193{
2194	struct task_struct *owner;
2195	int ret = 0;
2196
2197	if (locked) {
2198		/*
2199		 * Got the lock. We might not be the anticipated owner if we
2200		 * did a lock-steal - fix up the PI-state in that case:
2201		 */
2202		if (q->pi_state->owner != current)
2203			ret = fixup_pi_state_owner(uaddr, q, current);
2204		goto out;
2205	}
2206
2207	/*
2208	 * Catch the rare case, where the lock was released when we were on the
2209	 * way back before we locked the hash bucket.
2210	 */
2211	if (q->pi_state->owner == current) {
2212		/*
2213		 * Try to get the rt_mutex now. This might fail as some other
2214		 * task acquired the rt_mutex after we removed ourself from the
2215		 * rt_mutex waiters list.
2216		 */
2217		if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2218			locked = 1;
2219			goto out;
2220		}
2221
2222		/*
2223		 * pi_state is incorrect, some other task did a lock steal and
2224		 * we returned due to timeout or signal without taking the
2225		 * rt_mutex. Too late.
2226		 */
2227		raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
2228		owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2229		if (!owner)
2230			owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2231		raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
2232		ret = fixup_pi_state_owner(uaddr, q, owner);
2233		goto out;
2234	}
2235
2236	/*
2237	 * Paranoia check. If we did not take the lock, then we should not be
2238	 * the owner of the rt_mutex.
2239	 */
2240	if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2241		printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2242				"pi-state %p\n", ret,
2243				q->pi_state->pi_mutex.owner,
2244				q->pi_state->owner);
2245
2246out:
2247	return ret ? ret : locked;
2248}
2249
2250/**
2251 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2252 * @hb:		the futex hash bucket, must be locked by the caller
2253 * @q:		the futex_q to queue up on
2254 * @timeout:	the prepared hrtimer_sleeper, or null for no timeout
2255 */
2256static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2257				struct hrtimer_sleeper *timeout)
2258{
2259	/*
2260	 * The task state is guaranteed to be set before another task can
2261	 * wake it. set_current_state() is implemented using smp_store_mb() and
2262	 * queue_me() calls spin_unlock() upon completion, both serializing
2263	 * access to the hash list and forcing another memory barrier.
2264	 */
2265	set_current_state(TASK_INTERRUPTIBLE);
2266	queue_me(q, hb);
2267
2268	/* Arm the timer */
2269	if (timeout)
2270		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
 
 
 
2271
2272	/*
2273	 * If we have been removed from the hash list, then another task
2274	 * has tried to wake us, and we can skip the call to schedule().
2275	 */
2276	if (likely(!plist_node_empty(&q->list))) {
2277		/*
2278		 * If the timer has already expired, current will already be
2279		 * flagged for rescheduling. Only call schedule if there
2280		 * is no timeout, or if it has yet to expire.
2281		 */
2282		if (!timeout || timeout->task)
2283			freezable_schedule();
2284	}
2285	__set_current_state(TASK_RUNNING);
2286}
2287
2288/**
2289 * futex_wait_setup() - Prepare to wait on a futex
2290 * @uaddr:	the futex userspace address
2291 * @val:	the expected value
2292 * @flags:	futex flags (FLAGS_SHARED, etc.)
2293 * @q:		the associated futex_q
2294 * @hb:		storage for hash_bucket pointer to be returned to caller
2295 *
2296 * Setup the futex_q and locate the hash_bucket.  Get the futex value and
2297 * compare it with the expected value.  Handle atomic faults internally.
2298 * Return with the hb lock held and a q.key reference on success, and unlocked
2299 * with no q.key reference on failure.
2300 *
2301 * Return:
2302 *  0 - uaddr contains val and hb has been locked;
2303 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2304 */
2305static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2306			   struct futex_q *q, struct futex_hash_bucket **hb)
2307{
2308	u32 uval;
2309	int ret;
2310
2311	/*
2312	 * Access the page AFTER the hash-bucket is locked.
2313	 * Order is important:
2314	 *
2315	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2316	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
2317	 *
2318	 * The basic logical guarantee of a futex is that it blocks ONLY
2319	 * if cond(var) is known to be true at the time of blocking, for
2320	 * any cond.  If we locked the hash-bucket after testing *uaddr, that
2321	 * would open a race condition where we could block indefinitely with
2322	 * cond(var) false, which would violate the guarantee.
2323	 *
2324	 * On the other hand, we insert q and release the hash-bucket only
2325	 * after testing *uaddr.  This guarantees that futex_wait() will NOT
2326	 * absorb a wakeup if *uaddr does not match the desired values
2327	 * while the syscall executes.
2328	 */
2329retry:
2330	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2331	if (unlikely(ret != 0))
2332		return ret;
2333
2334retry_private:
2335	*hb = queue_lock(q);
2336
2337	ret = get_futex_value_locked(&uval, uaddr);
2338
2339	if (ret) {
2340		queue_unlock(*hb);
2341
2342		ret = get_user(uval, uaddr);
2343		if (ret)
2344			goto out;
2345
2346		if (!(flags & FLAGS_SHARED))
2347			goto retry_private;
2348
2349		put_futex_key(&q->key);
2350		goto retry;
2351	}
2352
2353	if (uval != val) {
2354		queue_unlock(*hb);
2355		ret = -EWOULDBLOCK;
2356	}
2357
2358out:
2359	if (ret)
2360		put_futex_key(&q->key);
2361	return ret;
2362}
2363
2364static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2365		      ktime_t *abs_time, u32 bitset)
2366{
2367	struct hrtimer_sleeper timeout, *to = NULL;
2368	struct restart_block *restart;
2369	struct futex_hash_bucket *hb;
2370	struct futex_q q = futex_q_init;
2371	int ret;
2372
2373	if (!bitset)
2374		return -EINVAL;
2375	q.bitset = bitset;
2376
2377	if (abs_time) {
2378		to = &timeout;
2379
2380		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2381				      CLOCK_REALTIME : CLOCK_MONOTONIC,
2382				      HRTIMER_MODE_ABS);
2383		hrtimer_init_sleeper(to, current);
2384		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2385					     current->timer_slack_ns);
2386	}
2387
2388retry:
2389	/*
2390	 * Prepare to wait on uaddr. On success, holds hb lock and increments
2391	 * q.key refs.
2392	 */
2393	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2394	if (ret)
2395		goto out;
2396
2397	/* queue_me and wait for wakeup, timeout, or a signal. */
2398	futex_wait_queue_me(hb, &q, to);
2399
2400	/* If we were woken (and unqueued), we succeeded, whatever. */
2401	ret = 0;
2402	/* unqueue_me() drops q.key ref */
2403	if (!unqueue_me(&q))
2404		goto out;
2405	ret = -ETIMEDOUT;
2406	if (to && !to->task)
2407		goto out;
2408
2409	/*
2410	 * We expect signal_pending(current), but we might be the
2411	 * victim of a spurious wakeup as well.
2412	 */
2413	if (!signal_pending(current))
2414		goto retry;
2415
2416	ret = -ERESTARTSYS;
2417	if (!abs_time)
2418		goto out;
2419
2420	restart = &current->restart_block;
2421	restart->fn = futex_wait_restart;
2422	restart->futex.uaddr = uaddr;
2423	restart->futex.val = val;
2424	restart->futex.time = abs_time->tv64;
2425	restart->futex.bitset = bitset;
2426	restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2427
2428	ret = -ERESTART_RESTARTBLOCK;
2429
2430out:
2431	if (to) {
2432		hrtimer_cancel(&to->timer);
2433		destroy_hrtimer_on_stack(&to->timer);
2434	}
2435	return ret;
2436}
2437
2438
2439static long futex_wait_restart(struct restart_block *restart)
2440{
2441	u32 __user *uaddr = restart->futex.uaddr;
2442	ktime_t t, *tp = NULL;
2443
2444	if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2445		t.tv64 = restart->futex.time;
2446		tp = &t;
2447	}
2448	restart->fn = do_no_restart_syscall;
2449
2450	return (long)futex_wait(uaddr, restart->futex.flags,
2451				restart->futex.val, tp, restart->futex.bitset);
2452}
2453
2454
2455/*
2456 * Userspace tried a 0 -> TID atomic transition of the futex value
2457 * and failed. The kernel side here does the whole locking operation:
2458 * if there are waiters then it will block as a consequence of relying
2459 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2460 * a 0 value of the futex too.).
2461 *
2462 * Also serves as futex trylock_pi()'ing, and due semantics.
2463 */
2464static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2465			 ktime_t *time, int trylock)
2466{
2467	struct hrtimer_sleeper timeout, *to = NULL;
2468	struct futex_hash_bucket *hb;
2469	struct futex_q q = futex_q_init;
2470	int res, ret;
2471
2472	if (refill_pi_state_cache())
2473		return -ENOMEM;
2474
2475	if (time) {
2476		to = &timeout;
2477		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2478				      HRTIMER_MODE_ABS);
2479		hrtimer_init_sleeper(to, current);
2480		hrtimer_set_expires(&to->timer, *time);
2481	}
2482
2483retry:
2484	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2485	if (unlikely(ret != 0))
2486		goto out;
2487
2488retry_private:
2489	hb = queue_lock(&q);
2490
2491	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2492	if (unlikely(ret)) {
2493		/*
2494		 * Atomic work succeeded and we got the lock,
2495		 * or failed. Either way, we do _not_ block.
2496		 */
2497		switch (ret) {
2498		case 1:
2499			/* We got the lock. */
2500			ret = 0;
2501			goto out_unlock_put_key;
2502		case -EFAULT:
2503			goto uaddr_faulted;
2504		case -EAGAIN:
2505			/*
2506			 * Two reasons for this:
2507			 * - Task is exiting and we just wait for the
2508			 *   exit to complete.
2509			 * - The user space value changed.
2510			 */
2511			queue_unlock(hb);
2512			put_futex_key(&q.key);
2513			cond_resched();
2514			goto retry;
2515		default:
2516			goto out_unlock_put_key;
2517		}
2518	}
2519
2520	/*
2521	 * Only actually queue now that the atomic ops are done:
2522	 */
2523	queue_me(&q, hb);
2524
2525	WARN_ON(!q.pi_state);
2526	/*
2527	 * Block on the PI mutex:
2528	 */
2529	if (!trylock) {
2530		ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2531	} else {
2532		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2533		/* Fixup the trylock return value: */
2534		ret = ret ? 0 : -EWOULDBLOCK;
2535	}
2536
2537	spin_lock(q.lock_ptr);
2538	/*
2539	 * Fixup the pi_state owner and possibly acquire the lock if we
2540	 * haven't already.
2541	 */
2542	res = fixup_owner(uaddr, &q, !ret);
2543	/*
2544	 * If fixup_owner() returned an error, proprogate that.  If it acquired
2545	 * the lock, clear our -ETIMEDOUT or -EINTR.
2546	 */
2547	if (res)
2548		ret = (res < 0) ? res : 0;
2549
2550	/*
2551	 * If fixup_owner() faulted and was unable to handle the fault, unlock
2552	 * it and return the fault to userspace.
2553	 */
2554	if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2555		rt_mutex_unlock(&q.pi_state->pi_mutex);
2556
2557	/* Unqueue and drop the lock */
2558	unqueue_me_pi(&q);
2559
2560	goto out_put_key;
2561
2562out_unlock_put_key:
2563	queue_unlock(hb);
2564
2565out_put_key:
2566	put_futex_key(&q.key);
2567out:
2568	if (to)
2569		destroy_hrtimer_on_stack(&to->timer);
2570	return ret != -EINTR ? ret : -ERESTARTNOINTR;
2571
2572uaddr_faulted:
2573	queue_unlock(hb);
2574
2575	ret = fault_in_user_writeable(uaddr);
2576	if (ret)
2577		goto out_put_key;
2578
2579	if (!(flags & FLAGS_SHARED))
2580		goto retry_private;
2581
2582	put_futex_key(&q.key);
2583	goto retry;
2584}
2585
2586/*
2587 * Userspace attempted a TID -> 0 atomic transition, and failed.
2588 * This is the in-kernel slowpath: we look up the PI state (if any),
2589 * and do the rt-mutex unlock.
2590 */
2591static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2592{
2593	u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
 
2594	union futex_key key = FUTEX_KEY_INIT;
2595	struct futex_hash_bucket *hb;
2596	struct futex_q *match;
2597	int ret;
2598
2599retry:
2600	if (get_user(uval, uaddr))
2601		return -EFAULT;
2602	/*
2603	 * We release only a lock we actually own:
2604	 */
2605	if ((uval & FUTEX_TID_MASK) != vpid)
2606		return -EPERM;
2607
2608	ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2609	if (ret)
2610		return ret;
2611
2612	hb = hash_futex(&key);
2613	spin_lock(&hb->lock);
2614
2615	/*
2616	 * Check waiters first. We do not trust user space values at
2617	 * all and we at least want to know if user space fiddled
2618	 * with the futex value instead of blindly unlocking.
2619	 */
2620	match = futex_top_waiter(hb, &key);
2621	if (match) {
2622		ret = wake_futex_pi(uaddr, uval, match, hb);
2623		/*
2624		 * In case of success wake_futex_pi dropped the hash
2625		 * bucket lock.
2626		 */
2627		if (!ret)
2628			goto out_putkey;
 
 
 
 
 
 
 
 
 
 
2629		/*
2630		 * The atomic access to the futex value generated a
2631		 * pagefault, so retry the user-access and the wakeup:
 
2632		 */
2633		if (ret == -EFAULT)
2634			goto pi_faulted;
2635		/*
2636		 * A unconditional UNLOCK_PI op raced against a waiter
2637		 * setting the FUTEX_WAITERS bit. Try again.
2638		 */
2639		if (ret == -EAGAIN) {
2640			spin_unlock(&hb->lock);
2641			put_futex_key(&key);
2642			goto retry;
2643		}
2644		/*
2645		 * wake_futex_pi has detected invalid state. Tell user
2646		 * space.
2647		 */
2648		goto out_unlock;
2649	}
2650
2651	/*
2652	 * We have no kernel internal state, i.e. no waiters in the
2653	 * kernel. Waiters which are about to queue themselves are stuck
2654	 * on hb->lock. So we can safely ignore them. We do neither
2655	 * preserve the WAITERS bit not the OWNER_DIED one. We are the
2656	 * owner.
2657	 */
2658	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
 
2659		goto pi_faulted;
2660
2661	/*
2662	 * If uval has changed, let user space handle it.
2663	 */
2664	ret = (curval == uval) ? 0 : -EAGAIN;
2665
2666out_unlock:
2667	spin_unlock(&hb->lock);
2668out_putkey:
2669	put_futex_key(&key);
 
 
2670	return ret;
2671
2672pi_faulted:
2673	spin_unlock(&hb->lock);
2674	put_futex_key(&key);
2675
2676	ret = fault_in_user_writeable(uaddr);
2677	if (!ret)
2678		goto retry;
2679
2680	return ret;
2681}
2682
2683/**
2684 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2685 * @hb:		the hash_bucket futex_q was original enqueued on
2686 * @q:		the futex_q woken while waiting to be requeued
2687 * @key2:	the futex_key of the requeue target futex
2688 * @timeout:	the timeout associated with the wait (NULL if none)
2689 *
2690 * Detect if the task was woken on the initial futex as opposed to the requeue
2691 * target futex.  If so, determine if it was a timeout or a signal that caused
2692 * the wakeup and return the appropriate error code to the caller.  Must be
2693 * called with the hb lock held.
2694 *
2695 * Return:
2696 *  0 = no early wakeup detected;
2697 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2698 */
2699static inline
2700int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2701				   struct futex_q *q, union futex_key *key2,
2702				   struct hrtimer_sleeper *timeout)
2703{
2704	int ret = 0;
2705
2706	/*
2707	 * With the hb lock held, we avoid races while we process the wakeup.
2708	 * We only need to hold hb (and not hb2) to ensure atomicity as the
2709	 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2710	 * It can't be requeued from uaddr2 to something else since we don't
2711	 * support a PI aware source futex for requeue.
2712	 */
2713	if (!match_futex(&q->key, key2)) {
2714		WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2715		/*
2716		 * We were woken prior to requeue by a timeout or a signal.
2717		 * Unqueue the futex_q and determine which it was.
2718		 */
2719		plist_del(&q->list, &hb->chain);
2720		hb_waiters_dec(hb);
2721
2722		/* Handle spurious wakeups gracefully */
2723		ret = -EWOULDBLOCK;
2724		if (timeout && !timeout->task)
2725			ret = -ETIMEDOUT;
2726		else if (signal_pending(current))
2727			ret = -ERESTARTNOINTR;
2728	}
2729	return ret;
2730}
2731
2732/**
2733 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2734 * @uaddr:	the futex we initially wait on (non-pi)
2735 * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2736 *		the same type, no requeueing from private to shared, etc.
2737 * @val:	the expected value of uaddr
2738 * @abs_time:	absolute timeout
2739 * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
2740 * @uaddr2:	the pi futex we will take prior to returning to user-space
2741 *
2742 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2743 * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
2744 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2745 * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
2746 * without one, the pi logic would not know which task to boost/deboost, if
2747 * there was a need to.
2748 *
2749 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2750 * via the following--
2751 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2752 * 2) wakeup on uaddr2 after a requeue
2753 * 3) signal
2754 * 4) timeout
2755 *
2756 * If 3, cleanup and return -ERESTARTNOINTR.
2757 *
2758 * If 2, we may then block on trying to take the rt_mutex and return via:
2759 * 5) successful lock
2760 * 6) signal
2761 * 7) timeout
2762 * 8) other lock acquisition failure
2763 *
2764 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2765 *
2766 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2767 *
2768 * Return:
2769 *  0 - On success;
2770 * <0 - On error
2771 */
2772static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2773				 u32 val, ktime_t *abs_time, u32 bitset,
2774				 u32 __user *uaddr2)
2775{
2776	struct hrtimer_sleeper timeout, *to = NULL;
2777	struct rt_mutex_waiter rt_waiter;
2778	struct rt_mutex *pi_mutex = NULL;
2779	struct futex_hash_bucket *hb;
2780	union futex_key key2 = FUTEX_KEY_INIT;
2781	struct futex_q q = futex_q_init;
2782	int res, ret;
2783
2784	if (uaddr == uaddr2)
2785		return -EINVAL;
2786
2787	if (!bitset)
2788		return -EINVAL;
2789
2790	if (abs_time) {
2791		to = &timeout;
2792		hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2793				      CLOCK_REALTIME : CLOCK_MONOTONIC,
2794				      HRTIMER_MODE_ABS);
2795		hrtimer_init_sleeper(to, current);
2796		hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2797					     current->timer_slack_ns);
2798	}
2799
2800	/*
2801	 * The waiter is allocated on our stack, manipulated by the requeue
2802	 * code while we sleep on uaddr.
2803	 */
2804	debug_rt_mutex_init_waiter(&rt_waiter);
2805	RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2806	RB_CLEAR_NODE(&rt_waiter.tree_entry);
2807	rt_waiter.task = NULL;
2808
2809	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2810	if (unlikely(ret != 0))
2811		goto out;
2812
2813	q.bitset = bitset;
2814	q.rt_waiter = &rt_waiter;
2815	q.requeue_pi_key = &key2;
2816
2817	/*
2818	 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2819	 * count.
2820	 */
2821	ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2822	if (ret)
2823		goto out_key2;
2824
2825	/*
2826	 * The check above which compares uaddrs is not sufficient for
2827	 * shared futexes. We need to compare the keys:
2828	 */
2829	if (match_futex(&q.key, &key2)) {
2830		queue_unlock(hb);
2831		ret = -EINVAL;
2832		goto out_put_keys;
2833	}
2834
2835	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
2836	futex_wait_queue_me(hb, &q, to);
2837
2838	spin_lock(&hb->lock);
2839	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2840	spin_unlock(&hb->lock);
2841	if (ret)
2842		goto out_put_keys;
2843
2844	/*
2845	 * In order for us to be here, we know our q.key == key2, and since
2846	 * we took the hb->lock above, we also know that futex_requeue() has
2847	 * completed and we no longer have to concern ourselves with a wakeup
2848	 * race with the atomic proxy lock acquisition by the requeue code. The
2849	 * futex_requeue dropped our key1 reference and incremented our key2
2850	 * reference count.
2851	 */
2852
2853	/* Check if the requeue code acquired the second futex for us. */
2854	if (!q.rt_waiter) {
2855		/*
2856		 * Got the lock. We might not be the anticipated owner if we
2857		 * did a lock-steal - fix up the PI-state in that case.
2858		 */
2859		if (q.pi_state && (q.pi_state->owner != current)) {
2860			spin_lock(q.lock_ptr);
2861			ret = fixup_pi_state_owner(uaddr2, &q, current);
2862			/*
2863			 * Drop the reference to the pi state which
2864			 * the requeue_pi() code acquired for us.
2865			 */
2866			put_pi_state(q.pi_state);
2867			spin_unlock(q.lock_ptr);
2868		}
2869	} else {
2870		/*
2871		 * We have been woken up by futex_unlock_pi(), a timeout, or a
2872		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2873		 * the pi_state.
2874		 */
2875		WARN_ON(!q.pi_state);
2876		pi_mutex = &q.pi_state->pi_mutex;
2877		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2878		debug_rt_mutex_free_waiter(&rt_waiter);
2879
2880		spin_lock(q.lock_ptr);
2881		/*
2882		 * Fixup the pi_state owner and possibly acquire the lock if we
2883		 * haven't already.
2884		 */
2885		res = fixup_owner(uaddr2, &q, !ret);
2886		/*
2887		 * If fixup_owner() returned an error, proprogate that.  If it
2888		 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2889		 */
2890		if (res)
2891			ret = (res < 0) ? res : 0;
2892
2893		/* Unqueue and drop the lock. */
2894		unqueue_me_pi(&q);
2895	}
2896
2897	/*
2898	 * If fixup_pi_state_owner() faulted and was unable to handle the
2899	 * fault, unlock the rt_mutex and return the fault to userspace.
2900	 */
2901	if (ret == -EFAULT) {
2902		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2903			rt_mutex_unlock(pi_mutex);
2904	} else if (ret == -EINTR) {
2905		/*
2906		 * We've already been requeued, but cannot restart by calling
2907		 * futex_lock_pi() directly. We could restart this syscall, but
2908		 * it would detect that the user space "val" changed and return
2909		 * -EWOULDBLOCK.  Save the overhead of the restart and return
2910		 * -EWOULDBLOCK directly.
2911		 */
2912		ret = -EWOULDBLOCK;
2913	}
2914
2915out_put_keys:
2916	put_futex_key(&q.key);
2917out_key2:
2918	put_futex_key(&key2);
2919
2920out:
2921	if (to) {
2922		hrtimer_cancel(&to->timer);
2923		destroy_hrtimer_on_stack(&to->timer);
2924	}
2925	return ret;
2926}
2927
2928/*
2929 * Support for robust futexes: the kernel cleans up held futexes at
2930 * thread exit time.
2931 *
2932 * Implementation: user-space maintains a per-thread list of locks it
2933 * is holding. Upon do_exit(), the kernel carefully walks this list,
2934 * and marks all locks that are owned by this thread with the
2935 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2936 * always manipulated with the lock held, so the list is private and
2937 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2938 * field, to allow the kernel to clean up if the thread dies after
2939 * acquiring the lock, but just before it could have added itself to
2940 * the list. There can only be one such pending lock.
2941 */
2942
2943/**
2944 * sys_set_robust_list() - Set the robust-futex list head of a task
2945 * @head:	pointer to the list-head
2946 * @len:	length of the list-head, as userspace expects
2947 */
2948SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2949		size_t, len)
2950{
2951	if (!futex_cmpxchg_enabled)
2952		return -ENOSYS;
2953	/*
2954	 * The kernel knows only one size for now:
2955	 */
2956	if (unlikely(len != sizeof(*head)))
2957		return -EINVAL;
2958
2959	current->robust_list = head;
2960
2961	return 0;
2962}
2963
2964/**
2965 * sys_get_robust_list() - Get the robust-futex list head of a task
2966 * @pid:	pid of the process [zero for current task]
2967 * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
2968 * @len_ptr:	pointer to a length field, the kernel fills in the header size
2969 */
2970SYSCALL_DEFINE3(get_robust_list, int, pid,
2971		struct robust_list_head __user * __user *, head_ptr,
2972		size_t __user *, len_ptr)
2973{
2974	struct robust_list_head __user *head;
2975	unsigned long ret;
2976	struct task_struct *p;
2977
2978	if (!futex_cmpxchg_enabled)
2979		return -ENOSYS;
2980
2981	rcu_read_lock();
2982
2983	ret = -ESRCH;
2984	if (!pid)
2985		p = current;
2986	else {
2987		p = find_task_by_vpid(pid);
2988		if (!p)
2989			goto err_unlock;
2990	}
2991
2992	ret = -EPERM;
2993	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
2994		goto err_unlock;
2995
2996	head = p->robust_list;
2997	rcu_read_unlock();
2998
2999	if (put_user(sizeof(*head), len_ptr))
3000		return -EFAULT;
3001	return put_user(head, head_ptr);
3002
3003err_unlock:
3004	rcu_read_unlock();
3005
3006	return ret;
3007}
3008
3009/*
3010 * Process a futex-list entry, check whether it's owned by the
3011 * dying task, and do notification if so:
3012 */
3013int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3014{
3015	u32 uval, uninitialized_var(nval), mval;
3016
3017retry:
3018	if (get_user(uval, uaddr))
3019		return -1;
3020
3021	if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
3022		/*
3023		 * Ok, this dying thread is truly holding a futex
3024		 * of interest. Set the OWNER_DIED bit atomically
3025		 * via cmpxchg, and if the value had FUTEX_WAITERS
3026		 * set, wake up a waiter (if any). (We have to do a
3027		 * futex_wake() even if OWNER_DIED is already set -
3028		 * to handle the rare but possible case of recursive
3029		 * thread-death.) The rest of the cleanup is done in
3030		 * userspace.
3031		 */
3032		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3033		/*
3034		 * We are not holding a lock here, but we want to have
3035		 * the pagefault_disable/enable() protection because
3036		 * we want to handle the fault gracefully. If the
3037		 * access fails we try to fault in the futex with R/W
3038		 * verification via get_user_pages. get_user() above
3039		 * does not guarantee R/W access. If that fails we
3040		 * give up and leave the futex locked.
3041		 */
3042		if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
3043			if (fault_in_user_writeable(uaddr))
3044				return -1;
3045			goto retry;
3046		}
3047		if (nval != uval)
3048			goto retry;
3049
3050		/*
3051		 * Wake robust non-PI futexes here. The wakeup of
3052		 * PI futexes happens in exit_pi_state():
3053		 */
3054		if (!pi && (uval & FUTEX_WAITERS))
3055			futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3056	}
3057	return 0;
3058}
3059
3060/*
3061 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3062 */
3063static inline int fetch_robust_entry(struct robust_list __user **entry,
3064				     struct robust_list __user * __user *head,
3065				     unsigned int *pi)
3066{
3067	unsigned long uentry;
3068
3069	if (get_user(uentry, (unsigned long __user *)head))
3070		return -EFAULT;
3071
3072	*entry = (void __user *)(uentry & ~1UL);
3073	*pi = uentry & 1;
3074
3075	return 0;
3076}
3077
3078/*
3079 * Walk curr->robust_list (very carefully, it's a userspace list!)
3080 * and mark any locks found there dead, and notify any waiters.
3081 *
3082 * We silently return on any sign of list-walking problem.
3083 */
3084void exit_robust_list(struct task_struct *curr)
3085{
3086	struct robust_list_head __user *head = curr->robust_list;
3087	struct robust_list __user *entry, *next_entry, *pending;
3088	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3089	unsigned int uninitialized_var(next_pi);
3090	unsigned long futex_offset;
3091	int rc;
3092
3093	if (!futex_cmpxchg_enabled)
3094		return;
3095
3096	/*
3097	 * Fetch the list head (which was registered earlier, via
3098	 * sys_set_robust_list()):
3099	 */
3100	if (fetch_robust_entry(&entry, &head->list.next, &pi))
3101		return;
3102	/*
3103	 * Fetch the relative futex offset:
3104	 */
3105	if (get_user(futex_offset, &head->futex_offset))
3106		return;
3107	/*
3108	 * Fetch any possibly pending lock-add first, and handle it
3109	 * if it exists:
3110	 */
3111	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3112		return;
3113
3114	next_entry = NULL;	/* avoid warning with gcc */
3115	while (entry != &head->list) {
3116		/*
3117		 * Fetch the next entry in the list before calling
3118		 * handle_futex_death:
3119		 */
3120		rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3121		/*
3122		 * A pending lock might already be on the list, so
3123		 * don't process it twice:
3124		 */
3125		if (entry != pending)
3126			if (handle_futex_death((void __user *)entry + futex_offset,
3127						curr, pi))
3128				return;
3129		if (rc)
3130			return;
3131		entry = next_entry;
3132		pi = next_pi;
3133		/*
3134		 * Avoid excessively long or circular lists:
3135		 */
3136		if (!--limit)
3137			break;
3138
3139		cond_resched();
3140	}
3141
3142	if (pending)
3143		handle_futex_death((void __user *)pending + futex_offset,
3144				   curr, pip);
3145}
3146
3147long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3148		u32 __user *uaddr2, u32 val2, u32 val3)
3149{
3150	int cmd = op & FUTEX_CMD_MASK;
3151	unsigned int flags = 0;
3152
3153	if (!(op & FUTEX_PRIVATE_FLAG))
3154		flags |= FLAGS_SHARED;
3155
3156	if (op & FUTEX_CLOCK_REALTIME) {
3157		flags |= FLAGS_CLOCKRT;
3158		if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3159		    cmd != FUTEX_WAIT_REQUEUE_PI)
3160			return -ENOSYS;
3161	}
3162
3163	switch (cmd) {
3164	case FUTEX_LOCK_PI:
3165	case FUTEX_UNLOCK_PI:
3166	case FUTEX_TRYLOCK_PI:
3167	case FUTEX_WAIT_REQUEUE_PI:
3168	case FUTEX_CMP_REQUEUE_PI:
3169		if (!futex_cmpxchg_enabled)
3170			return -ENOSYS;
3171	}
3172
3173	switch (cmd) {
3174	case FUTEX_WAIT:
3175		val3 = FUTEX_BITSET_MATCH_ANY;
3176	case FUTEX_WAIT_BITSET:
3177		return futex_wait(uaddr, flags, val, timeout, val3);
3178	case FUTEX_WAKE:
3179		val3 = FUTEX_BITSET_MATCH_ANY;
3180	case FUTEX_WAKE_BITSET:
3181		return futex_wake(uaddr, flags, val, val3);
3182	case FUTEX_REQUEUE:
3183		return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3184	case FUTEX_CMP_REQUEUE:
3185		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3186	case FUTEX_WAKE_OP:
3187		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3188	case FUTEX_LOCK_PI:
3189		return futex_lock_pi(uaddr, flags, timeout, 0);
3190	case FUTEX_UNLOCK_PI:
3191		return futex_unlock_pi(uaddr, flags);
3192	case FUTEX_TRYLOCK_PI:
3193		return futex_lock_pi(uaddr, flags, NULL, 1);
3194	case FUTEX_WAIT_REQUEUE_PI:
3195		val3 = FUTEX_BITSET_MATCH_ANY;
3196		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3197					     uaddr2);
3198	case FUTEX_CMP_REQUEUE_PI:
3199		return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3200	}
3201	return -ENOSYS;
3202}
3203
3204
3205SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3206		struct timespec __user *, utime, u32 __user *, uaddr2,
3207		u32, val3)
3208{
3209	struct timespec ts;
3210	ktime_t t, *tp = NULL;
3211	u32 val2 = 0;
3212	int cmd = op & FUTEX_CMD_MASK;
3213
3214	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3215		      cmd == FUTEX_WAIT_BITSET ||
3216		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
3217		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3218			return -EFAULT;
3219		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
3220			return -EFAULT;
3221		if (!timespec_valid(&ts))
3222			return -EINVAL;
3223
3224		t = timespec_to_ktime(ts);
3225		if (cmd == FUTEX_WAIT)
3226			t = ktime_add_safe(ktime_get(), t);
3227		tp = &t;
3228	}
3229	/*
3230	 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3231	 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3232	 */
3233	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3234	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3235		val2 = (u32) (unsigned long) utime;
3236
3237	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3238}
3239
3240static void __init futex_detect_cmpxchg(void)
3241{
3242#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3243	u32 curval;
3244
3245	/*
3246	 * This will fail and we want it. Some arch implementations do
3247	 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3248	 * functionality. We want to know that before we call in any
3249	 * of the complex code paths. Also we want to prevent
3250	 * registration of robust lists in that case. NULL is
3251	 * guaranteed to fault and we get -EFAULT on functional
3252	 * implementation, the non-functional ones will return
3253	 * -ENOSYS.
3254	 */
3255	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3256		futex_cmpxchg_enabled = 1;
3257#endif
3258}
3259
3260static int __init futex_init(void)
3261{
3262	unsigned int futex_shift;
3263	unsigned long i;
3264
3265#if CONFIG_BASE_SMALL
3266	futex_hashsize = 16;
3267#else
3268	futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3269#endif
3270
3271	futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3272					       futex_hashsize, 0,
3273					       futex_hashsize < 256 ? HASH_SMALL : 0,
3274					       &futex_shift, NULL,
3275					       futex_hashsize, futex_hashsize);
3276	futex_hashsize = 1UL << futex_shift;
3277
3278	futex_detect_cmpxchg();
3279
3280	for (i = 0; i < futex_hashsize; i++) {
3281		atomic_set(&futex_queues[i].waiters, 0);
3282		plist_head_init(&futex_queues[i].chain);
3283		spin_lock_init(&futex_queues[i].lock);
3284	}
3285
3286	return 0;
3287}
3288__initcall(futex_init);