Loading...
1/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
57#include <linux/signal.h>
58#include <linux/export.h>
59#include <linux/magic.h>
60#include <linux/pid.h>
61#include <linux/nsproxy.h>
62#include <linux/ptrace.h>
63#include <linux/sched/rt.h>
64#include <linux/hugetlb.h>
65#include <linux/freezer.h>
66#include <linux/bootmem.h>
67#include <linux/fault-inject.h>
68
69#include <asm/futex.h>
70
71#include "locking/rtmutex_common.h"
72
73/*
74 * READ this before attempting to hack on futexes!
75 *
76 * Basic futex operation and ordering guarantees
77 * =============================================
78 *
79 * The waiter reads the futex value in user space and calls
80 * futex_wait(). This function computes the hash bucket and acquires
81 * the hash bucket lock. After that it reads the futex user space value
82 * again and verifies that the data has not changed. If it has not changed
83 * it enqueues itself into the hash bucket, releases the hash bucket lock
84 * and schedules.
85 *
86 * The waker side modifies the user space value of the futex and calls
87 * futex_wake(). This function computes the hash bucket and acquires the
88 * hash bucket lock. Then it looks for waiters on that futex in the hash
89 * bucket and wakes them.
90 *
91 * In futex wake up scenarios where no tasks are blocked on a futex, taking
92 * the hb spinlock can be avoided and simply return. In order for this
93 * optimization to work, ordering guarantees must exist so that the waiter
94 * being added to the list is acknowledged when the list is concurrently being
95 * checked by the waker, avoiding scenarios like the following:
96 *
97 * CPU 0 CPU 1
98 * val = *futex;
99 * sys_futex(WAIT, futex, val);
100 * futex_wait(futex, val);
101 * uval = *futex;
102 * *futex = newval;
103 * sys_futex(WAKE, futex);
104 * futex_wake(futex);
105 * if (queue_empty())
106 * return;
107 * if (uval == val)
108 * lock(hash_bucket(futex));
109 * queue();
110 * unlock(hash_bucket(futex));
111 * schedule();
112 *
113 * This would cause the waiter on CPU 0 to wait forever because it
114 * missed the transition of the user space value from val to newval
115 * and the waker did not find the waiter in the hash bucket queue.
116 *
117 * The correct serialization ensures that a waiter either observes
118 * the changed user space value before blocking or is woken by a
119 * concurrent waker:
120 *
121 * CPU 0 CPU 1
122 * val = *futex;
123 * sys_futex(WAIT, futex, val);
124 * futex_wait(futex, val);
125 *
126 * waiters++; (a)
127 * smp_mb(); (A) <-- paired with -.
128 * |
129 * lock(hash_bucket(futex)); |
130 * |
131 * uval = *futex; |
132 * | *futex = newval;
133 * | sys_futex(WAKE, futex);
134 * | futex_wake(futex);
135 * |
136 * `--------> smp_mb(); (B)
137 * if (uval == val)
138 * queue();
139 * unlock(hash_bucket(futex));
140 * schedule(); if (waiters)
141 * lock(hash_bucket(futex));
142 * else wake_waiters(futex);
143 * waiters--; (b) unlock(hash_bucket(futex));
144 *
145 * Where (A) orders the waiters increment and the futex value read through
146 * atomic operations (see hb_waiters_inc) and where (B) orders the write
147 * to futex and the waiters read -- this is done by the barriers for both
148 * shared and private futexes in get_futex_key_refs().
149 *
150 * This yields the following case (where X:=waiters, Y:=futex):
151 *
152 * X = Y = 0
153 *
154 * w[X]=1 w[Y]=1
155 * MB MB
156 * r[Y]=y r[X]=x
157 *
158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
159 * the guarantee that we cannot both miss the futex variable change and the
160 * enqueue.
161 *
162 * Note that a new waiter is accounted for in (a) even when it is possible that
163 * the wait call can return error, in which case we backtrack from it in (b).
164 * Refer to the comment in queue_lock().
165 *
166 * Similarly, in order to account for waiters being requeued on another
167 * address we always increment the waiters for the destination bucket before
168 * acquiring the lock. It then decrements them again after releasing it -
169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170 * will do the additional required waiter count housekeeping. This is done for
171 * double_lock_hb() and double_unlock_hb(), respectively.
172 */
173
174#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175int __read_mostly futex_cmpxchg_enabled;
176#endif
177
178/*
179 * Futex flags used to encode options to functions and preserve them across
180 * restarts.
181 */
182#ifdef CONFIG_MMU
183# define FLAGS_SHARED 0x01
184#else
185/*
186 * NOMMU does not have per process address space. Let the compiler optimize
187 * code away.
188 */
189# define FLAGS_SHARED 0x00
190#endif
191#define FLAGS_CLOCKRT 0x02
192#define FLAGS_HAS_TIMEOUT 0x04
193
194/*
195 * Priority Inheritance state:
196 */
197struct futex_pi_state {
198 /*
199 * list of 'owned' pi_state instances - these have to be
200 * cleaned up in do_exit() if the task exits prematurely:
201 */
202 struct list_head list;
203
204 /*
205 * The PI object:
206 */
207 struct rt_mutex pi_mutex;
208
209 struct task_struct *owner;
210 atomic_t refcount;
211
212 union futex_key key;
213};
214
215/**
216 * struct futex_q - The hashed futex queue entry, one per waiting task
217 * @list: priority-sorted list of tasks waiting on this futex
218 * @task: the task waiting on the futex
219 * @lock_ptr: the hash bucket lock
220 * @key: the key the futex is hashed on
221 * @pi_state: optional priority inheritance state
222 * @rt_waiter: rt_waiter storage for use with requeue_pi
223 * @requeue_pi_key: the requeue_pi target futex key
224 * @bitset: bitset for the optional bitmasked wakeup
225 *
226 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
227 * we can wake only the relevant ones (hashed queues may be shared).
228 *
229 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
230 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
231 * The order of wakeup is always to make the first condition true, then
232 * the second.
233 *
234 * PI futexes are typically woken before they are removed from the hash list via
235 * the rt_mutex code. See unqueue_me_pi().
236 */
237struct futex_q {
238 struct plist_node list;
239
240 struct task_struct *task;
241 spinlock_t *lock_ptr;
242 union futex_key key;
243 struct futex_pi_state *pi_state;
244 struct rt_mutex_waiter *rt_waiter;
245 union futex_key *requeue_pi_key;
246 u32 bitset;
247};
248
249static const struct futex_q futex_q_init = {
250 /* list gets initialized in queue_me()*/
251 .key = FUTEX_KEY_INIT,
252 .bitset = FUTEX_BITSET_MATCH_ANY
253};
254
255/*
256 * Hash buckets are shared by all the futex_keys that hash to the same
257 * location. Each key may have multiple futex_q structures, one for each task
258 * waiting on a futex.
259 */
260struct futex_hash_bucket {
261 atomic_t waiters;
262 spinlock_t lock;
263 struct plist_head chain;
264} ____cacheline_aligned_in_smp;
265
266/*
267 * The base of the bucket array and its size are always used together
268 * (after initialization only in hash_futex()), so ensure that they
269 * reside in the same cacheline.
270 */
271static struct {
272 struct futex_hash_bucket *queues;
273 unsigned long hashsize;
274} __futex_data __read_mostly __aligned(2*sizeof(long));
275#define futex_queues (__futex_data.queues)
276#define futex_hashsize (__futex_data.hashsize)
277
278
279/*
280 * Fault injections for futexes.
281 */
282#ifdef CONFIG_FAIL_FUTEX
283
284static struct {
285 struct fault_attr attr;
286
287 bool ignore_private;
288} fail_futex = {
289 .attr = FAULT_ATTR_INITIALIZER,
290 .ignore_private = false,
291};
292
293static int __init setup_fail_futex(char *str)
294{
295 return setup_fault_attr(&fail_futex.attr, str);
296}
297__setup("fail_futex=", setup_fail_futex);
298
299static bool should_fail_futex(bool fshared)
300{
301 if (fail_futex.ignore_private && !fshared)
302 return false;
303
304 return should_fail(&fail_futex.attr, 1);
305}
306
307#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
308
309static int __init fail_futex_debugfs(void)
310{
311 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
312 struct dentry *dir;
313
314 dir = fault_create_debugfs_attr("fail_futex", NULL,
315 &fail_futex.attr);
316 if (IS_ERR(dir))
317 return PTR_ERR(dir);
318
319 if (!debugfs_create_bool("ignore-private", mode, dir,
320 &fail_futex.ignore_private)) {
321 debugfs_remove_recursive(dir);
322 return -ENOMEM;
323 }
324
325 return 0;
326}
327
328late_initcall(fail_futex_debugfs);
329
330#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
331
332#else
333static inline bool should_fail_futex(bool fshared)
334{
335 return false;
336}
337#endif /* CONFIG_FAIL_FUTEX */
338
339static inline void futex_get_mm(union futex_key *key)
340{
341 atomic_inc(&key->private.mm->mm_count);
342 /*
343 * Ensure futex_get_mm() implies a full barrier such that
344 * get_futex_key() implies a full barrier. This is relied upon
345 * as smp_mb(); (B), see the ordering comment above.
346 */
347 smp_mb__after_atomic();
348}
349
350/*
351 * Reflects a new waiter being added to the waitqueue.
352 */
353static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
354{
355#ifdef CONFIG_SMP
356 atomic_inc(&hb->waiters);
357 /*
358 * Full barrier (A), see the ordering comment above.
359 */
360 smp_mb__after_atomic();
361#endif
362}
363
364/*
365 * Reflects a waiter being removed from the waitqueue by wakeup
366 * paths.
367 */
368static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
369{
370#ifdef CONFIG_SMP
371 atomic_dec(&hb->waiters);
372#endif
373}
374
375static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
376{
377#ifdef CONFIG_SMP
378 return atomic_read(&hb->waiters);
379#else
380 return 1;
381#endif
382}
383
384/**
385 * hash_futex - Return the hash bucket in the global hash
386 * @key: Pointer to the futex key for which the hash is calculated
387 *
388 * We hash on the keys returned from get_futex_key (see below) and return the
389 * corresponding hash bucket in the global hash.
390 */
391static struct futex_hash_bucket *hash_futex(union futex_key *key)
392{
393 u32 hash = jhash2((u32*)&key->both.word,
394 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
395 key->both.offset);
396 return &futex_queues[hash & (futex_hashsize - 1)];
397}
398
399
400/**
401 * match_futex - Check whether two futex keys are equal
402 * @key1: Pointer to key1
403 * @key2: Pointer to key2
404 *
405 * Return 1 if two futex_keys are equal, 0 otherwise.
406 */
407static inline int match_futex(union futex_key *key1, union futex_key *key2)
408{
409 return (key1 && key2
410 && key1->both.word == key2->both.word
411 && key1->both.ptr == key2->both.ptr
412 && key1->both.offset == key2->both.offset);
413}
414
415/*
416 * Take a reference to the resource addressed by a key.
417 * Can be called while holding spinlocks.
418 *
419 */
420static void get_futex_key_refs(union futex_key *key)
421{
422 if (!key->both.ptr)
423 return;
424
425 /*
426 * On MMU less systems futexes are always "private" as there is no per
427 * process address space. We need the smp wmb nevertheless - yes,
428 * arch/blackfin has MMU less SMP ...
429 */
430 if (!IS_ENABLED(CONFIG_MMU)) {
431 smp_mb(); /* explicit smp_mb(); (B) */
432 return;
433 }
434
435 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
436 case FUT_OFF_INODE:
437 ihold(key->shared.inode); /* implies smp_mb(); (B) */
438 break;
439 case FUT_OFF_MMSHARED:
440 futex_get_mm(key); /* implies smp_mb(); (B) */
441 break;
442 default:
443 /*
444 * Private futexes do not hold reference on an inode or
445 * mm, therefore the only purpose of calling get_futex_key_refs
446 * is because we need the barrier for the lockless waiter check.
447 */
448 smp_mb(); /* explicit smp_mb(); (B) */
449 }
450}
451
452/*
453 * Drop a reference to the resource addressed by a key.
454 * The hash bucket spinlock must not be held. This is
455 * a no-op for private futexes, see comment in the get
456 * counterpart.
457 */
458static void drop_futex_key_refs(union futex_key *key)
459{
460 if (!key->both.ptr) {
461 /* If we're here then we tried to put a key we failed to get */
462 WARN_ON_ONCE(1);
463 return;
464 }
465
466 if (!IS_ENABLED(CONFIG_MMU))
467 return;
468
469 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
470 case FUT_OFF_INODE:
471 iput(key->shared.inode);
472 break;
473 case FUT_OFF_MMSHARED:
474 mmdrop(key->private.mm);
475 break;
476 }
477}
478
479/**
480 * get_futex_key() - Get parameters which are the keys for a futex
481 * @uaddr: virtual address of the futex
482 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
483 * @key: address where result is stored.
484 * @rw: mapping needs to be read/write (values: VERIFY_READ,
485 * VERIFY_WRITE)
486 *
487 * Return: a negative error code or 0
488 *
489 * The key words are stored in *key on success.
490 *
491 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
492 * offset_within_page). For private mappings, it's (uaddr, current->mm).
493 * We can usually work out the index without swapping in the page.
494 *
495 * lock_page() might sleep, the caller should not hold a spinlock.
496 */
497static int
498get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
499{
500 unsigned long address = (unsigned long)uaddr;
501 struct mm_struct *mm = current->mm;
502 struct page *page, *tail;
503 struct address_space *mapping;
504 int err, ro = 0;
505
506 /*
507 * The futex address must be "naturally" aligned.
508 */
509 key->both.offset = address % PAGE_SIZE;
510 if (unlikely((address % sizeof(u32)) != 0))
511 return -EINVAL;
512 address -= key->both.offset;
513
514 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
515 return -EFAULT;
516
517 if (unlikely(should_fail_futex(fshared)))
518 return -EFAULT;
519
520 /*
521 * PROCESS_PRIVATE futexes are fast.
522 * As the mm cannot disappear under us and the 'key' only needs
523 * virtual address, we dont even have to find the underlying vma.
524 * Note : We do have to check 'uaddr' is a valid user address,
525 * but access_ok() should be faster than find_vma()
526 */
527 if (!fshared) {
528 key->private.mm = mm;
529 key->private.address = address;
530 get_futex_key_refs(key); /* implies smp_mb(); (B) */
531 return 0;
532 }
533
534again:
535 /* Ignore any VERIFY_READ mapping (futex common case) */
536 if (unlikely(should_fail_futex(fshared)))
537 return -EFAULT;
538
539 err = get_user_pages_fast(address, 1, 1, &page);
540 /*
541 * If write access is not required (eg. FUTEX_WAIT), try
542 * and get read-only access.
543 */
544 if (err == -EFAULT && rw == VERIFY_READ) {
545 err = get_user_pages_fast(address, 1, 0, &page);
546 ro = 1;
547 }
548 if (err < 0)
549 return err;
550 else
551 err = 0;
552
553 /*
554 * The treatment of mapping from this point on is critical. The page
555 * lock protects many things but in this context the page lock
556 * stabilizes mapping, prevents inode freeing in the shared
557 * file-backed region case and guards against movement to swap cache.
558 *
559 * Strictly speaking the page lock is not needed in all cases being
560 * considered here and page lock forces unnecessarily serialization
561 * From this point on, mapping will be re-verified if necessary and
562 * page lock will be acquired only if it is unavoidable
563 *
564 * Mapping checks require the head page for any compound page so the
565 * head page and mapping is looked up now. For anonymous pages, it
566 * does not matter if the page splits in the future as the key is
567 * based on the address. For filesystem-backed pages, the tail is
568 * required as the index of the page determines the key. For
569 * base pages, there is no tail page and tail == page.
570 */
571 tail = page;
572 page = compound_head(page);
573 mapping = READ_ONCE(page->mapping);
574
575 /*
576 * If page->mapping is NULL, then it cannot be a PageAnon
577 * page; but it might be the ZERO_PAGE or in the gate area or
578 * in a special mapping (all cases which we are happy to fail);
579 * or it may have been a good file page when get_user_pages_fast
580 * found it, but truncated or holepunched or subjected to
581 * invalidate_complete_page2 before we got the page lock (also
582 * cases which we are happy to fail). And we hold a reference,
583 * so refcount care in invalidate_complete_page's remove_mapping
584 * prevents drop_caches from setting mapping to NULL beneath us.
585 *
586 * The case we do have to guard against is when memory pressure made
587 * shmem_writepage move it from filecache to swapcache beneath us:
588 * an unlikely race, but we do need to retry for page->mapping.
589 */
590 if (unlikely(!mapping)) {
591 int shmem_swizzled;
592
593 /*
594 * Page lock is required to identify which special case above
595 * applies. If this is really a shmem page then the page lock
596 * will prevent unexpected transitions.
597 */
598 lock_page(page);
599 shmem_swizzled = PageSwapCache(page) || page->mapping;
600 unlock_page(page);
601 put_page(page);
602
603 if (shmem_swizzled)
604 goto again;
605
606 return -EFAULT;
607 }
608
609 /*
610 * Private mappings are handled in a simple way.
611 *
612 * If the futex key is stored on an anonymous page, then the associated
613 * object is the mm which is implicitly pinned by the calling process.
614 *
615 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
616 * it's a read-only handle, it's expected that futexes attach to
617 * the object not the particular process.
618 */
619 if (PageAnon(page)) {
620 /*
621 * A RO anonymous page will never change and thus doesn't make
622 * sense for futex operations.
623 */
624 if (unlikely(should_fail_futex(fshared)) || ro) {
625 err = -EFAULT;
626 goto out;
627 }
628
629 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
630 key->private.mm = mm;
631 key->private.address = address;
632
633 get_futex_key_refs(key); /* implies smp_mb(); (B) */
634
635 } else {
636 struct inode *inode;
637
638 /*
639 * The associated futex object in this case is the inode and
640 * the page->mapping must be traversed. Ordinarily this should
641 * be stabilised under page lock but it's not strictly
642 * necessary in this case as we just want to pin the inode, not
643 * update the radix tree or anything like that.
644 *
645 * The RCU read lock is taken as the inode is finally freed
646 * under RCU. If the mapping still matches expectations then the
647 * mapping->host can be safely accessed as being a valid inode.
648 */
649 rcu_read_lock();
650
651 if (READ_ONCE(page->mapping) != mapping) {
652 rcu_read_unlock();
653 put_page(page);
654
655 goto again;
656 }
657
658 inode = READ_ONCE(mapping->host);
659 if (!inode) {
660 rcu_read_unlock();
661 put_page(page);
662
663 goto again;
664 }
665
666 /*
667 * Take a reference unless it is about to be freed. Previously
668 * this reference was taken by ihold under the page lock
669 * pinning the inode in place so i_lock was unnecessary. The
670 * only way for this check to fail is if the inode was
671 * truncated in parallel so warn for now if this happens.
672 *
673 * We are not calling into get_futex_key_refs() in file-backed
674 * cases, therefore a successful atomic_inc return below will
675 * guarantee that get_futex_key() will still imply smp_mb(); (B).
676 */
677 if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
678 rcu_read_unlock();
679 put_page(page);
680
681 goto again;
682 }
683
684 /* Should be impossible but lets be paranoid for now */
685 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
686 err = -EFAULT;
687 rcu_read_unlock();
688 iput(inode);
689
690 goto out;
691 }
692
693 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
694 key->shared.inode = inode;
695 key->shared.pgoff = basepage_index(tail);
696 rcu_read_unlock();
697 }
698
699out:
700 put_page(page);
701 return err;
702}
703
704static inline void put_futex_key(union futex_key *key)
705{
706 drop_futex_key_refs(key);
707}
708
709/**
710 * fault_in_user_writeable() - Fault in user address and verify RW access
711 * @uaddr: pointer to faulting user space address
712 *
713 * Slow path to fixup the fault we just took in the atomic write
714 * access to @uaddr.
715 *
716 * We have no generic implementation of a non-destructive write to the
717 * user address. We know that we faulted in the atomic pagefault
718 * disabled section so we can as well avoid the #PF overhead by
719 * calling get_user_pages() right away.
720 */
721static int fault_in_user_writeable(u32 __user *uaddr)
722{
723 struct mm_struct *mm = current->mm;
724 int ret;
725
726 down_read(&mm->mmap_sem);
727 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
728 FAULT_FLAG_WRITE, NULL);
729 up_read(&mm->mmap_sem);
730
731 return ret < 0 ? ret : 0;
732}
733
734/**
735 * futex_top_waiter() - Return the highest priority waiter on a futex
736 * @hb: the hash bucket the futex_q's reside in
737 * @key: the futex key (to distinguish it from other futex futex_q's)
738 *
739 * Must be called with the hb lock held.
740 */
741static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
742 union futex_key *key)
743{
744 struct futex_q *this;
745
746 plist_for_each_entry(this, &hb->chain, list) {
747 if (match_futex(&this->key, key))
748 return this;
749 }
750 return NULL;
751}
752
753static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
754 u32 uval, u32 newval)
755{
756 int ret;
757
758 pagefault_disable();
759 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
760 pagefault_enable();
761
762 return ret;
763}
764
765static int get_futex_value_locked(u32 *dest, u32 __user *from)
766{
767 int ret;
768
769 pagefault_disable();
770 ret = __get_user(*dest, from);
771 pagefault_enable();
772
773 return ret ? -EFAULT : 0;
774}
775
776
777/*
778 * PI code:
779 */
780static int refill_pi_state_cache(void)
781{
782 struct futex_pi_state *pi_state;
783
784 if (likely(current->pi_state_cache))
785 return 0;
786
787 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
788
789 if (!pi_state)
790 return -ENOMEM;
791
792 INIT_LIST_HEAD(&pi_state->list);
793 /* pi_mutex gets initialized later */
794 pi_state->owner = NULL;
795 atomic_set(&pi_state->refcount, 1);
796 pi_state->key = FUTEX_KEY_INIT;
797
798 current->pi_state_cache = pi_state;
799
800 return 0;
801}
802
803static struct futex_pi_state * alloc_pi_state(void)
804{
805 struct futex_pi_state *pi_state = current->pi_state_cache;
806
807 WARN_ON(!pi_state);
808 current->pi_state_cache = NULL;
809
810 return pi_state;
811}
812
813/*
814 * Drops a reference to the pi_state object and frees or caches it
815 * when the last reference is gone.
816 *
817 * Must be called with the hb lock held.
818 */
819static void put_pi_state(struct futex_pi_state *pi_state)
820{
821 if (!pi_state)
822 return;
823
824 if (!atomic_dec_and_test(&pi_state->refcount))
825 return;
826
827 /*
828 * If pi_state->owner is NULL, the owner is most probably dying
829 * and has cleaned up the pi_state already
830 */
831 if (pi_state->owner) {
832 raw_spin_lock_irq(&pi_state->owner->pi_lock);
833 list_del_init(&pi_state->list);
834 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
835
836 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
837 }
838
839 if (current->pi_state_cache)
840 kfree(pi_state);
841 else {
842 /*
843 * pi_state->list is already empty.
844 * clear pi_state->owner.
845 * refcount is at 0 - put it back to 1.
846 */
847 pi_state->owner = NULL;
848 atomic_set(&pi_state->refcount, 1);
849 current->pi_state_cache = pi_state;
850 }
851}
852
853/*
854 * Look up the task based on what TID userspace gave us.
855 * We dont trust it.
856 */
857static struct task_struct * futex_find_get_task(pid_t pid)
858{
859 struct task_struct *p;
860
861 rcu_read_lock();
862 p = find_task_by_vpid(pid);
863 if (p)
864 get_task_struct(p);
865
866 rcu_read_unlock();
867
868 return p;
869}
870
871/*
872 * This task is holding PI mutexes at exit time => bad.
873 * Kernel cleans up PI-state, but userspace is likely hosed.
874 * (Robust-futex cleanup is separate and might save the day for userspace.)
875 */
876void exit_pi_state_list(struct task_struct *curr)
877{
878 struct list_head *next, *head = &curr->pi_state_list;
879 struct futex_pi_state *pi_state;
880 struct futex_hash_bucket *hb;
881 union futex_key key = FUTEX_KEY_INIT;
882
883 if (!futex_cmpxchg_enabled)
884 return;
885 /*
886 * We are a ZOMBIE and nobody can enqueue itself on
887 * pi_state_list anymore, but we have to be careful
888 * versus waiters unqueueing themselves:
889 */
890 raw_spin_lock_irq(&curr->pi_lock);
891 while (!list_empty(head)) {
892
893 next = head->next;
894 pi_state = list_entry(next, struct futex_pi_state, list);
895 key = pi_state->key;
896 hb = hash_futex(&key);
897 raw_spin_unlock_irq(&curr->pi_lock);
898
899 spin_lock(&hb->lock);
900
901 raw_spin_lock_irq(&curr->pi_lock);
902 /*
903 * We dropped the pi-lock, so re-check whether this
904 * task still owns the PI-state:
905 */
906 if (head->next != next) {
907 spin_unlock(&hb->lock);
908 continue;
909 }
910
911 WARN_ON(pi_state->owner != curr);
912 WARN_ON(list_empty(&pi_state->list));
913 list_del_init(&pi_state->list);
914 pi_state->owner = NULL;
915 raw_spin_unlock_irq(&curr->pi_lock);
916
917 rt_mutex_unlock(&pi_state->pi_mutex);
918
919 spin_unlock(&hb->lock);
920
921 raw_spin_lock_irq(&curr->pi_lock);
922 }
923 raw_spin_unlock_irq(&curr->pi_lock);
924}
925
926/*
927 * We need to check the following states:
928 *
929 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
930 *
931 * [1] NULL | --- | --- | 0 | 0/1 | Valid
932 * [2] NULL | --- | --- | >0 | 0/1 | Valid
933 *
934 * [3] Found | NULL | -- | Any | 0/1 | Invalid
935 *
936 * [4] Found | Found | NULL | 0 | 1 | Valid
937 * [5] Found | Found | NULL | >0 | 1 | Invalid
938 *
939 * [6] Found | Found | task | 0 | 1 | Valid
940 *
941 * [7] Found | Found | NULL | Any | 0 | Invalid
942 *
943 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
944 * [9] Found | Found | task | 0 | 0 | Invalid
945 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
946 *
947 * [1] Indicates that the kernel can acquire the futex atomically. We
948 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
949 *
950 * [2] Valid, if TID does not belong to a kernel thread. If no matching
951 * thread is found then it indicates that the owner TID has died.
952 *
953 * [3] Invalid. The waiter is queued on a non PI futex
954 *
955 * [4] Valid state after exit_robust_list(), which sets the user space
956 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
957 *
958 * [5] The user space value got manipulated between exit_robust_list()
959 * and exit_pi_state_list()
960 *
961 * [6] Valid state after exit_pi_state_list() which sets the new owner in
962 * the pi_state but cannot access the user space value.
963 *
964 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
965 *
966 * [8] Owner and user space value match
967 *
968 * [9] There is no transient state which sets the user space TID to 0
969 * except exit_robust_list(), but this is indicated by the
970 * FUTEX_OWNER_DIED bit. See [4]
971 *
972 * [10] There is no transient state which leaves owner and user space
973 * TID out of sync.
974 */
975
976/*
977 * Validate that the existing waiter has a pi_state and sanity check
978 * the pi_state against the user space value. If correct, attach to
979 * it.
980 */
981static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
982 struct futex_pi_state **ps)
983{
984 pid_t pid = uval & FUTEX_TID_MASK;
985
986 /*
987 * Userspace might have messed up non-PI and PI futexes [3]
988 */
989 if (unlikely(!pi_state))
990 return -EINVAL;
991
992 WARN_ON(!atomic_read(&pi_state->refcount));
993
994 /*
995 * Handle the owner died case:
996 */
997 if (uval & FUTEX_OWNER_DIED) {
998 /*
999 * exit_pi_state_list sets owner to NULL and wakes the
1000 * topmost waiter. The task which acquires the
1001 * pi_state->rt_mutex will fixup owner.
1002 */
1003 if (!pi_state->owner) {
1004 /*
1005 * No pi state owner, but the user space TID
1006 * is not 0. Inconsistent state. [5]
1007 */
1008 if (pid)
1009 return -EINVAL;
1010 /*
1011 * Take a ref on the state and return success. [4]
1012 */
1013 goto out_state;
1014 }
1015
1016 /*
1017 * If TID is 0, then either the dying owner has not
1018 * yet executed exit_pi_state_list() or some waiter
1019 * acquired the rtmutex in the pi state, but did not
1020 * yet fixup the TID in user space.
1021 *
1022 * Take a ref on the state and return success. [6]
1023 */
1024 if (!pid)
1025 goto out_state;
1026 } else {
1027 /*
1028 * If the owner died bit is not set, then the pi_state
1029 * must have an owner. [7]
1030 */
1031 if (!pi_state->owner)
1032 return -EINVAL;
1033 }
1034
1035 /*
1036 * Bail out if user space manipulated the futex value. If pi
1037 * state exists then the owner TID must be the same as the
1038 * user space TID. [9/10]
1039 */
1040 if (pid != task_pid_vnr(pi_state->owner))
1041 return -EINVAL;
1042out_state:
1043 atomic_inc(&pi_state->refcount);
1044 *ps = pi_state;
1045 return 0;
1046}
1047
1048/*
1049 * Lookup the task for the TID provided from user space and attach to
1050 * it after doing proper sanity checks.
1051 */
1052static int attach_to_pi_owner(u32 uval, union futex_key *key,
1053 struct futex_pi_state **ps)
1054{
1055 pid_t pid = uval & FUTEX_TID_MASK;
1056 struct futex_pi_state *pi_state;
1057 struct task_struct *p;
1058
1059 /*
1060 * We are the first waiter - try to look up the real owner and attach
1061 * the new pi_state to it, but bail out when TID = 0 [1]
1062 */
1063 if (!pid)
1064 return -ESRCH;
1065 p = futex_find_get_task(pid);
1066 if (!p)
1067 return -ESRCH;
1068
1069 if (unlikely(p->flags & PF_KTHREAD)) {
1070 put_task_struct(p);
1071 return -EPERM;
1072 }
1073
1074 /*
1075 * We need to look at the task state flags to figure out,
1076 * whether the task is exiting. To protect against the do_exit
1077 * change of the task flags, we do this protected by
1078 * p->pi_lock:
1079 */
1080 raw_spin_lock_irq(&p->pi_lock);
1081 if (unlikely(p->flags & PF_EXITING)) {
1082 /*
1083 * The task is on the way out. When PF_EXITPIDONE is
1084 * set, we know that the task has finished the
1085 * cleanup:
1086 */
1087 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1088
1089 raw_spin_unlock_irq(&p->pi_lock);
1090 put_task_struct(p);
1091 return ret;
1092 }
1093
1094 /*
1095 * No existing pi state. First waiter. [2]
1096 */
1097 pi_state = alloc_pi_state();
1098
1099 /*
1100 * Initialize the pi_mutex in locked state and make @p
1101 * the owner of it:
1102 */
1103 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1104
1105 /* Store the key for possible exit cleanups: */
1106 pi_state->key = *key;
1107
1108 WARN_ON(!list_empty(&pi_state->list));
1109 list_add(&pi_state->list, &p->pi_state_list);
1110 pi_state->owner = p;
1111 raw_spin_unlock_irq(&p->pi_lock);
1112
1113 put_task_struct(p);
1114
1115 *ps = pi_state;
1116
1117 return 0;
1118}
1119
1120static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
1121 union futex_key *key, struct futex_pi_state **ps)
1122{
1123 struct futex_q *match = futex_top_waiter(hb, key);
1124
1125 /*
1126 * If there is a waiter on that futex, validate it and
1127 * attach to the pi_state when the validation succeeds.
1128 */
1129 if (match)
1130 return attach_to_pi_state(uval, match->pi_state, ps);
1131
1132 /*
1133 * We are the first waiter - try to look up the owner based on
1134 * @uval and attach to it.
1135 */
1136 return attach_to_pi_owner(uval, key, ps);
1137}
1138
1139static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1140{
1141 u32 uninitialized_var(curval);
1142
1143 if (unlikely(should_fail_futex(true)))
1144 return -EFAULT;
1145
1146 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1147 return -EFAULT;
1148
1149 /*If user space value changed, let the caller retry */
1150 return curval != uval ? -EAGAIN : 0;
1151}
1152
1153/**
1154 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1155 * @uaddr: the pi futex user address
1156 * @hb: the pi futex hash bucket
1157 * @key: the futex key associated with uaddr and hb
1158 * @ps: the pi_state pointer where we store the result of the
1159 * lookup
1160 * @task: the task to perform the atomic lock work for. This will
1161 * be "current" except in the case of requeue pi.
1162 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1163 *
1164 * Return:
1165 * 0 - ready to wait;
1166 * 1 - acquired the lock;
1167 * <0 - error
1168 *
1169 * The hb->lock and futex_key refs shall be held by the caller.
1170 */
1171static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1172 union futex_key *key,
1173 struct futex_pi_state **ps,
1174 struct task_struct *task, int set_waiters)
1175{
1176 u32 uval, newval, vpid = task_pid_vnr(task);
1177 struct futex_q *match;
1178 int ret;
1179
1180 /*
1181 * Read the user space value first so we can validate a few
1182 * things before proceeding further.
1183 */
1184 if (get_futex_value_locked(&uval, uaddr))
1185 return -EFAULT;
1186
1187 if (unlikely(should_fail_futex(true)))
1188 return -EFAULT;
1189
1190 /*
1191 * Detect deadlocks.
1192 */
1193 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1194 return -EDEADLK;
1195
1196 if ((unlikely(should_fail_futex(true))))
1197 return -EDEADLK;
1198
1199 /*
1200 * Lookup existing state first. If it exists, try to attach to
1201 * its pi_state.
1202 */
1203 match = futex_top_waiter(hb, key);
1204 if (match)
1205 return attach_to_pi_state(uval, match->pi_state, ps);
1206
1207 /*
1208 * No waiter and user TID is 0. We are here because the
1209 * waiters or the owner died bit is set or called from
1210 * requeue_cmp_pi or for whatever reason something took the
1211 * syscall.
1212 */
1213 if (!(uval & FUTEX_TID_MASK)) {
1214 /*
1215 * We take over the futex. No other waiters and the user space
1216 * TID is 0. We preserve the owner died bit.
1217 */
1218 newval = uval & FUTEX_OWNER_DIED;
1219 newval |= vpid;
1220
1221 /* The futex requeue_pi code can enforce the waiters bit */
1222 if (set_waiters)
1223 newval |= FUTEX_WAITERS;
1224
1225 ret = lock_pi_update_atomic(uaddr, uval, newval);
1226 /* If the take over worked, return 1 */
1227 return ret < 0 ? ret : 1;
1228 }
1229
1230 /*
1231 * First waiter. Set the waiters bit before attaching ourself to
1232 * the owner. If owner tries to unlock, it will be forced into
1233 * the kernel and blocked on hb->lock.
1234 */
1235 newval = uval | FUTEX_WAITERS;
1236 ret = lock_pi_update_atomic(uaddr, uval, newval);
1237 if (ret)
1238 return ret;
1239 /*
1240 * If the update of the user space value succeeded, we try to
1241 * attach to the owner. If that fails, no harm done, we only
1242 * set the FUTEX_WAITERS bit in the user space variable.
1243 */
1244 return attach_to_pi_owner(uval, key, ps);
1245}
1246
1247/**
1248 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1249 * @q: The futex_q to unqueue
1250 *
1251 * The q->lock_ptr must not be NULL and must be held by the caller.
1252 */
1253static void __unqueue_futex(struct futex_q *q)
1254{
1255 struct futex_hash_bucket *hb;
1256
1257 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1258 || WARN_ON(plist_node_empty(&q->list)))
1259 return;
1260
1261 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1262 plist_del(&q->list, &hb->chain);
1263 hb_waiters_dec(hb);
1264}
1265
1266/*
1267 * The hash bucket lock must be held when this is called.
1268 * Afterwards, the futex_q must not be accessed. Callers
1269 * must ensure to later call wake_up_q() for the actual
1270 * wakeups to occur.
1271 */
1272static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1273{
1274 struct task_struct *p = q->task;
1275
1276 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1277 return;
1278
1279 /*
1280 * Queue the task for later wakeup for after we've released
1281 * the hb->lock. wake_q_add() grabs reference to p.
1282 */
1283 wake_q_add(wake_q, p);
1284 __unqueue_futex(q);
1285 /*
1286 * The waiting task can free the futex_q as soon as
1287 * q->lock_ptr = NULL is written, without taking any locks. A
1288 * memory barrier is required here to prevent the following
1289 * store to lock_ptr from getting ahead of the plist_del.
1290 */
1291 smp_wmb();
1292 q->lock_ptr = NULL;
1293}
1294
1295static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1296 struct futex_hash_bucket *hb)
1297{
1298 struct task_struct *new_owner;
1299 struct futex_pi_state *pi_state = this->pi_state;
1300 u32 uninitialized_var(curval), newval;
1301 DEFINE_WAKE_Q(wake_q);
1302 bool deboost;
1303 int ret = 0;
1304
1305 if (!pi_state)
1306 return -EINVAL;
1307
1308 /*
1309 * If current does not own the pi_state then the futex is
1310 * inconsistent and user space fiddled with the futex value.
1311 */
1312 if (pi_state->owner != current)
1313 return -EINVAL;
1314
1315 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1316 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1317
1318 /*
1319 * It is possible that the next waiter (the one that brought
1320 * this owner to the kernel) timed out and is no longer
1321 * waiting on the lock.
1322 */
1323 if (!new_owner)
1324 new_owner = this->task;
1325
1326 /*
1327 * We pass it to the next owner. The WAITERS bit is always
1328 * kept enabled while there is PI state around. We cleanup the
1329 * owner died bit, because we are the owner.
1330 */
1331 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1332
1333 if (unlikely(should_fail_futex(true)))
1334 ret = -EFAULT;
1335
1336 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1337 ret = -EFAULT;
1338 } else if (curval != uval) {
1339 /*
1340 * If a unconditional UNLOCK_PI operation (user space did not
1341 * try the TID->0 transition) raced with a waiter setting the
1342 * FUTEX_WAITERS flag between get_user() and locking the hash
1343 * bucket lock, retry the operation.
1344 */
1345 if ((FUTEX_TID_MASK & curval) == uval)
1346 ret = -EAGAIN;
1347 else
1348 ret = -EINVAL;
1349 }
1350 if (ret) {
1351 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1352 return ret;
1353 }
1354
1355 raw_spin_lock(&pi_state->owner->pi_lock);
1356 WARN_ON(list_empty(&pi_state->list));
1357 list_del_init(&pi_state->list);
1358 raw_spin_unlock(&pi_state->owner->pi_lock);
1359
1360 raw_spin_lock(&new_owner->pi_lock);
1361 WARN_ON(!list_empty(&pi_state->list));
1362 list_add(&pi_state->list, &new_owner->pi_state_list);
1363 pi_state->owner = new_owner;
1364 raw_spin_unlock(&new_owner->pi_lock);
1365
1366 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1367
1368 deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1369
1370 /*
1371 * First unlock HB so the waiter does not spin on it once he got woken
1372 * up. Second wake up the waiter before the priority is adjusted. If we
1373 * deboost first (and lose our higher priority), then the task might get
1374 * scheduled away before the wake up can take place.
1375 */
1376 spin_unlock(&hb->lock);
1377 wake_up_q(&wake_q);
1378 if (deboost)
1379 rt_mutex_adjust_prio(current);
1380
1381 return 0;
1382}
1383
1384/*
1385 * Express the locking dependencies for lockdep:
1386 */
1387static inline void
1388double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1389{
1390 if (hb1 <= hb2) {
1391 spin_lock(&hb1->lock);
1392 if (hb1 < hb2)
1393 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1394 } else { /* hb1 > hb2 */
1395 spin_lock(&hb2->lock);
1396 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1397 }
1398}
1399
1400static inline void
1401double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1402{
1403 spin_unlock(&hb1->lock);
1404 if (hb1 != hb2)
1405 spin_unlock(&hb2->lock);
1406}
1407
1408/*
1409 * Wake up waiters matching bitset queued on this futex (uaddr).
1410 */
1411static int
1412futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1413{
1414 struct futex_hash_bucket *hb;
1415 struct futex_q *this, *next;
1416 union futex_key key = FUTEX_KEY_INIT;
1417 int ret;
1418 DEFINE_WAKE_Q(wake_q);
1419
1420 if (!bitset)
1421 return -EINVAL;
1422
1423 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1424 if (unlikely(ret != 0))
1425 goto out;
1426
1427 hb = hash_futex(&key);
1428
1429 /* Make sure we really have tasks to wakeup */
1430 if (!hb_waiters_pending(hb))
1431 goto out_put_key;
1432
1433 spin_lock(&hb->lock);
1434
1435 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1436 if (match_futex (&this->key, &key)) {
1437 if (this->pi_state || this->rt_waiter) {
1438 ret = -EINVAL;
1439 break;
1440 }
1441
1442 /* Check if one of the bits is set in both bitsets */
1443 if (!(this->bitset & bitset))
1444 continue;
1445
1446 mark_wake_futex(&wake_q, this);
1447 if (++ret >= nr_wake)
1448 break;
1449 }
1450 }
1451
1452 spin_unlock(&hb->lock);
1453 wake_up_q(&wake_q);
1454out_put_key:
1455 put_futex_key(&key);
1456out:
1457 return ret;
1458}
1459
1460/*
1461 * Wake up all waiters hashed on the physical page that is mapped
1462 * to this virtual address:
1463 */
1464static int
1465futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1466 int nr_wake, int nr_wake2, int op)
1467{
1468 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1469 struct futex_hash_bucket *hb1, *hb2;
1470 struct futex_q *this, *next;
1471 int ret, op_ret;
1472 DEFINE_WAKE_Q(wake_q);
1473
1474retry:
1475 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1476 if (unlikely(ret != 0))
1477 goto out;
1478 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1479 if (unlikely(ret != 0))
1480 goto out_put_key1;
1481
1482 hb1 = hash_futex(&key1);
1483 hb2 = hash_futex(&key2);
1484
1485retry_private:
1486 double_lock_hb(hb1, hb2);
1487 op_ret = futex_atomic_op_inuser(op, uaddr2);
1488 if (unlikely(op_ret < 0)) {
1489
1490 double_unlock_hb(hb1, hb2);
1491
1492#ifndef CONFIG_MMU
1493 /*
1494 * we don't get EFAULT from MMU faults if we don't have an MMU,
1495 * but we might get them from range checking
1496 */
1497 ret = op_ret;
1498 goto out_put_keys;
1499#endif
1500
1501 if (unlikely(op_ret != -EFAULT)) {
1502 ret = op_ret;
1503 goto out_put_keys;
1504 }
1505
1506 ret = fault_in_user_writeable(uaddr2);
1507 if (ret)
1508 goto out_put_keys;
1509
1510 if (!(flags & FLAGS_SHARED))
1511 goto retry_private;
1512
1513 put_futex_key(&key2);
1514 put_futex_key(&key1);
1515 goto retry;
1516 }
1517
1518 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1519 if (match_futex (&this->key, &key1)) {
1520 if (this->pi_state || this->rt_waiter) {
1521 ret = -EINVAL;
1522 goto out_unlock;
1523 }
1524 mark_wake_futex(&wake_q, this);
1525 if (++ret >= nr_wake)
1526 break;
1527 }
1528 }
1529
1530 if (op_ret > 0) {
1531 op_ret = 0;
1532 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1533 if (match_futex (&this->key, &key2)) {
1534 if (this->pi_state || this->rt_waiter) {
1535 ret = -EINVAL;
1536 goto out_unlock;
1537 }
1538 mark_wake_futex(&wake_q, this);
1539 if (++op_ret >= nr_wake2)
1540 break;
1541 }
1542 }
1543 ret += op_ret;
1544 }
1545
1546out_unlock:
1547 double_unlock_hb(hb1, hb2);
1548 wake_up_q(&wake_q);
1549out_put_keys:
1550 put_futex_key(&key2);
1551out_put_key1:
1552 put_futex_key(&key1);
1553out:
1554 return ret;
1555}
1556
1557/**
1558 * requeue_futex() - Requeue a futex_q from one hb to another
1559 * @q: the futex_q to requeue
1560 * @hb1: the source hash_bucket
1561 * @hb2: the target hash_bucket
1562 * @key2: the new key for the requeued futex_q
1563 */
1564static inline
1565void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1566 struct futex_hash_bucket *hb2, union futex_key *key2)
1567{
1568
1569 /*
1570 * If key1 and key2 hash to the same bucket, no need to
1571 * requeue.
1572 */
1573 if (likely(&hb1->chain != &hb2->chain)) {
1574 plist_del(&q->list, &hb1->chain);
1575 hb_waiters_dec(hb1);
1576 hb_waiters_inc(hb2);
1577 plist_add(&q->list, &hb2->chain);
1578 q->lock_ptr = &hb2->lock;
1579 }
1580 get_futex_key_refs(key2);
1581 q->key = *key2;
1582}
1583
1584/**
1585 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1586 * @q: the futex_q
1587 * @key: the key of the requeue target futex
1588 * @hb: the hash_bucket of the requeue target futex
1589 *
1590 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1591 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1592 * to the requeue target futex so the waiter can detect the wakeup on the right
1593 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1594 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1595 * to protect access to the pi_state to fixup the owner later. Must be called
1596 * with both q->lock_ptr and hb->lock held.
1597 */
1598static inline
1599void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1600 struct futex_hash_bucket *hb)
1601{
1602 get_futex_key_refs(key);
1603 q->key = *key;
1604
1605 __unqueue_futex(q);
1606
1607 WARN_ON(!q->rt_waiter);
1608 q->rt_waiter = NULL;
1609
1610 q->lock_ptr = &hb->lock;
1611
1612 wake_up_state(q->task, TASK_NORMAL);
1613}
1614
1615/**
1616 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1617 * @pifutex: the user address of the to futex
1618 * @hb1: the from futex hash bucket, must be locked by the caller
1619 * @hb2: the to futex hash bucket, must be locked by the caller
1620 * @key1: the from futex key
1621 * @key2: the to futex key
1622 * @ps: address to store the pi_state pointer
1623 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1624 *
1625 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1626 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1627 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1628 * hb1 and hb2 must be held by the caller.
1629 *
1630 * Return:
1631 * 0 - failed to acquire the lock atomically;
1632 * >0 - acquired the lock, return value is vpid of the top_waiter
1633 * <0 - error
1634 */
1635static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1636 struct futex_hash_bucket *hb1,
1637 struct futex_hash_bucket *hb2,
1638 union futex_key *key1, union futex_key *key2,
1639 struct futex_pi_state **ps, int set_waiters)
1640{
1641 struct futex_q *top_waiter = NULL;
1642 u32 curval;
1643 int ret, vpid;
1644
1645 if (get_futex_value_locked(&curval, pifutex))
1646 return -EFAULT;
1647
1648 if (unlikely(should_fail_futex(true)))
1649 return -EFAULT;
1650
1651 /*
1652 * Find the top_waiter and determine if there are additional waiters.
1653 * If the caller intends to requeue more than 1 waiter to pifutex,
1654 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1655 * as we have means to handle the possible fault. If not, don't set
1656 * the bit unecessarily as it will force the subsequent unlock to enter
1657 * the kernel.
1658 */
1659 top_waiter = futex_top_waiter(hb1, key1);
1660
1661 /* There are no waiters, nothing for us to do. */
1662 if (!top_waiter)
1663 return 0;
1664
1665 /* Ensure we requeue to the expected futex. */
1666 if (!match_futex(top_waiter->requeue_pi_key, key2))
1667 return -EINVAL;
1668
1669 /*
1670 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1671 * the contended case or if set_waiters is 1. The pi_state is returned
1672 * in ps in contended cases.
1673 */
1674 vpid = task_pid_vnr(top_waiter->task);
1675 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1676 set_waiters);
1677 if (ret == 1) {
1678 requeue_pi_wake_futex(top_waiter, key2, hb2);
1679 return vpid;
1680 }
1681 return ret;
1682}
1683
1684/**
1685 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1686 * @uaddr1: source futex user address
1687 * @flags: futex flags (FLAGS_SHARED, etc.)
1688 * @uaddr2: target futex user address
1689 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1690 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1691 * @cmpval: @uaddr1 expected value (or %NULL)
1692 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1693 * pi futex (pi to pi requeue is not supported)
1694 *
1695 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1696 * uaddr2 atomically on behalf of the top waiter.
1697 *
1698 * Return:
1699 * >=0 - on success, the number of tasks requeued or woken;
1700 * <0 - on error
1701 */
1702static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1703 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1704 u32 *cmpval, int requeue_pi)
1705{
1706 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1707 int drop_count = 0, task_count = 0, ret;
1708 struct futex_pi_state *pi_state = NULL;
1709 struct futex_hash_bucket *hb1, *hb2;
1710 struct futex_q *this, *next;
1711 DEFINE_WAKE_Q(wake_q);
1712
1713 if (requeue_pi) {
1714 /*
1715 * Requeue PI only works on two distinct uaddrs. This
1716 * check is only valid for private futexes. See below.
1717 */
1718 if (uaddr1 == uaddr2)
1719 return -EINVAL;
1720
1721 /*
1722 * requeue_pi requires a pi_state, try to allocate it now
1723 * without any locks in case it fails.
1724 */
1725 if (refill_pi_state_cache())
1726 return -ENOMEM;
1727 /*
1728 * requeue_pi must wake as many tasks as it can, up to nr_wake
1729 * + nr_requeue, since it acquires the rt_mutex prior to
1730 * returning to userspace, so as to not leave the rt_mutex with
1731 * waiters and no owner. However, second and third wake-ups
1732 * cannot be predicted as they involve race conditions with the
1733 * first wake and a fault while looking up the pi_state. Both
1734 * pthread_cond_signal() and pthread_cond_broadcast() should
1735 * use nr_wake=1.
1736 */
1737 if (nr_wake != 1)
1738 return -EINVAL;
1739 }
1740
1741retry:
1742 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1743 if (unlikely(ret != 0))
1744 goto out;
1745 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1746 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1747 if (unlikely(ret != 0))
1748 goto out_put_key1;
1749
1750 /*
1751 * The check above which compares uaddrs is not sufficient for
1752 * shared futexes. We need to compare the keys:
1753 */
1754 if (requeue_pi && match_futex(&key1, &key2)) {
1755 ret = -EINVAL;
1756 goto out_put_keys;
1757 }
1758
1759 hb1 = hash_futex(&key1);
1760 hb2 = hash_futex(&key2);
1761
1762retry_private:
1763 hb_waiters_inc(hb2);
1764 double_lock_hb(hb1, hb2);
1765
1766 if (likely(cmpval != NULL)) {
1767 u32 curval;
1768
1769 ret = get_futex_value_locked(&curval, uaddr1);
1770
1771 if (unlikely(ret)) {
1772 double_unlock_hb(hb1, hb2);
1773 hb_waiters_dec(hb2);
1774
1775 ret = get_user(curval, uaddr1);
1776 if (ret)
1777 goto out_put_keys;
1778
1779 if (!(flags & FLAGS_SHARED))
1780 goto retry_private;
1781
1782 put_futex_key(&key2);
1783 put_futex_key(&key1);
1784 goto retry;
1785 }
1786 if (curval != *cmpval) {
1787 ret = -EAGAIN;
1788 goto out_unlock;
1789 }
1790 }
1791
1792 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1793 /*
1794 * Attempt to acquire uaddr2 and wake the top waiter. If we
1795 * intend to requeue waiters, force setting the FUTEX_WAITERS
1796 * bit. We force this here where we are able to easily handle
1797 * faults rather in the requeue loop below.
1798 */
1799 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1800 &key2, &pi_state, nr_requeue);
1801
1802 /*
1803 * At this point the top_waiter has either taken uaddr2 or is
1804 * waiting on it. If the former, then the pi_state will not
1805 * exist yet, look it up one more time to ensure we have a
1806 * reference to it. If the lock was taken, ret contains the
1807 * vpid of the top waiter task.
1808 * If the lock was not taken, we have pi_state and an initial
1809 * refcount on it. In case of an error we have nothing.
1810 */
1811 if (ret > 0) {
1812 WARN_ON(pi_state);
1813 drop_count++;
1814 task_count++;
1815 /*
1816 * If we acquired the lock, then the user space value
1817 * of uaddr2 should be vpid. It cannot be changed by
1818 * the top waiter as it is blocked on hb2 lock if it
1819 * tries to do so. If something fiddled with it behind
1820 * our back the pi state lookup might unearth it. So
1821 * we rather use the known value than rereading and
1822 * handing potential crap to lookup_pi_state.
1823 *
1824 * If that call succeeds then we have pi_state and an
1825 * initial refcount on it.
1826 */
1827 ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1828 }
1829
1830 switch (ret) {
1831 case 0:
1832 /* We hold a reference on the pi state. */
1833 break;
1834
1835 /* If the above failed, then pi_state is NULL */
1836 case -EFAULT:
1837 double_unlock_hb(hb1, hb2);
1838 hb_waiters_dec(hb2);
1839 put_futex_key(&key2);
1840 put_futex_key(&key1);
1841 ret = fault_in_user_writeable(uaddr2);
1842 if (!ret)
1843 goto retry;
1844 goto out;
1845 case -EAGAIN:
1846 /*
1847 * Two reasons for this:
1848 * - Owner is exiting and we just wait for the
1849 * exit to complete.
1850 * - The user space value changed.
1851 */
1852 double_unlock_hb(hb1, hb2);
1853 hb_waiters_dec(hb2);
1854 put_futex_key(&key2);
1855 put_futex_key(&key1);
1856 cond_resched();
1857 goto retry;
1858 default:
1859 goto out_unlock;
1860 }
1861 }
1862
1863 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1864 if (task_count - nr_wake >= nr_requeue)
1865 break;
1866
1867 if (!match_futex(&this->key, &key1))
1868 continue;
1869
1870 /*
1871 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1872 * be paired with each other and no other futex ops.
1873 *
1874 * We should never be requeueing a futex_q with a pi_state,
1875 * which is awaiting a futex_unlock_pi().
1876 */
1877 if ((requeue_pi && !this->rt_waiter) ||
1878 (!requeue_pi && this->rt_waiter) ||
1879 this->pi_state) {
1880 ret = -EINVAL;
1881 break;
1882 }
1883
1884 /*
1885 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1886 * lock, we already woke the top_waiter. If not, it will be
1887 * woken by futex_unlock_pi().
1888 */
1889 if (++task_count <= nr_wake && !requeue_pi) {
1890 mark_wake_futex(&wake_q, this);
1891 continue;
1892 }
1893
1894 /* Ensure we requeue to the expected futex for requeue_pi. */
1895 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1896 ret = -EINVAL;
1897 break;
1898 }
1899
1900 /*
1901 * Requeue nr_requeue waiters and possibly one more in the case
1902 * of requeue_pi if we couldn't acquire the lock atomically.
1903 */
1904 if (requeue_pi) {
1905 /*
1906 * Prepare the waiter to take the rt_mutex. Take a
1907 * refcount on the pi_state and store the pointer in
1908 * the futex_q object of the waiter.
1909 */
1910 atomic_inc(&pi_state->refcount);
1911 this->pi_state = pi_state;
1912 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1913 this->rt_waiter,
1914 this->task);
1915 if (ret == 1) {
1916 /*
1917 * We got the lock. We do neither drop the
1918 * refcount on pi_state nor clear
1919 * this->pi_state because the waiter needs the
1920 * pi_state for cleaning up the user space
1921 * value. It will drop the refcount after
1922 * doing so.
1923 */
1924 requeue_pi_wake_futex(this, &key2, hb2);
1925 drop_count++;
1926 continue;
1927 } else if (ret) {
1928 /*
1929 * rt_mutex_start_proxy_lock() detected a
1930 * potential deadlock when we tried to queue
1931 * that waiter. Drop the pi_state reference
1932 * which we took above and remove the pointer
1933 * to the state from the waiters futex_q
1934 * object.
1935 */
1936 this->pi_state = NULL;
1937 put_pi_state(pi_state);
1938 /*
1939 * We stop queueing more waiters and let user
1940 * space deal with the mess.
1941 */
1942 break;
1943 }
1944 }
1945 requeue_futex(this, hb1, hb2, &key2);
1946 drop_count++;
1947 }
1948
1949 /*
1950 * We took an extra initial reference to the pi_state either
1951 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
1952 * need to drop it here again.
1953 */
1954 put_pi_state(pi_state);
1955
1956out_unlock:
1957 double_unlock_hb(hb1, hb2);
1958 wake_up_q(&wake_q);
1959 hb_waiters_dec(hb2);
1960
1961 /*
1962 * drop_futex_key_refs() must be called outside the spinlocks. During
1963 * the requeue we moved futex_q's from the hash bucket at key1 to the
1964 * one at key2 and updated their key pointer. We no longer need to
1965 * hold the references to key1.
1966 */
1967 while (--drop_count >= 0)
1968 drop_futex_key_refs(&key1);
1969
1970out_put_keys:
1971 put_futex_key(&key2);
1972out_put_key1:
1973 put_futex_key(&key1);
1974out:
1975 return ret ? ret : task_count;
1976}
1977
1978/* The key must be already stored in q->key. */
1979static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1980 __acquires(&hb->lock)
1981{
1982 struct futex_hash_bucket *hb;
1983
1984 hb = hash_futex(&q->key);
1985
1986 /*
1987 * Increment the counter before taking the lock so that
1988 * a potential waker won't miss a to-be-slept task that is
1989 * waiting for the spinlock. This is safe as all queue_lock()
1990 * users end up calling queue_me(). Similarly, for housekeeping,
1991 * decrement the counter at queue_unlock() when some error has
1992 * occurred and we don't end up adding the task to the list.
1993 */
1994 hb_waiters_inc(hb);
1995
1996 q->lock_ptr = &hb->lock;
1997
1998 spin_lock(&hb->lock); /* implies smp_mb(); (A) */
1999 return hb;
2000}
2001
2002static inline void
2003queue_unlock(struct futex_hash_bucket *hb)
2004 __releases(&hb->lock)
2005{
2006 spin_unlock(&hb->lock);
2007 hb_waiters_dec(hb);
2008}
2009
2010/**
2011 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2012 * @q: The futex_q to enqueue
2013 * @hb: The destination hash bucket
2014 *
2015 * The hb->lock must be held by the caller, and is released here. A call to
2016 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2017 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2018 * or nothing if the unqueue is done as part of the wake process and the unqueue
2019 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2020 * an example).
2021 */
2022static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2023 __releases(&hb->lock)
2024{
2025 int prio;
2026
2027 /*
2028 * The priority used to register this element is
2029 * - either the real thread-priority for the real-time threads
2030 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2031 * - or MAX_RT_PRIO for non-RT threads.
2032 * Thus, all RT-threads are woken first in priority order, and
2033 * the others are woken last, in FIFO order.
2034 */
2035 prio = min(current->normal_prio, MAX_RT_PRIO);
2036
2037 plist_node_init(&q->list, prio);
2038 plist_add(&q->list, &hb->chain);
2039 q->task = current;
2040 spin_unlock(&hb->lock);
2041}
2042
2043/**
2044 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2045 * @q: The futex_q to unqueue
2046 *
2047 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2048 * be paired with exactly one earlier call to queue_me().
2049 *
2050 * Return:
2051 * 1 - if the futex_q was still queued (and we removed unqueued it);
2052 * 0 - if the futex_q was already removed by the waking thread
2053 */
2054static int unqueue_me(struct futex_q *q)
2055{
2056 spinlock_t *lock_ptr;
2057 int ret = 0;
2058
2059 /* In the common case we don't take the spinlock, which is nice. */
2060retry:
2061 /*
2062 * q->lock_ptr can change between this read and the following spin_lock.
2063 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2064 * optimizing lock_ptr out of the logic below.
2065 */
2066 lock_ptr = READ_ONCE(q->lock_ptr);
2067 if (lock_ptr != NULL) {
2068 spin_lock(lock_ptr);
2069 /*
2070 * q->lock_ptr can change between reading it and
2071 * spin_lock(), causing us to take the wrong lock. This
2072 * corrects the race condition.
2073 *
2074 * Reasoning goes like this: if we have the wrong lock,
2075 * q->lock_ptr must have changed (maybe several times)
2076 * between reading it and the spin_lock(). It can
2077 * change again after the spin_lock() but only if it was
2078 * already changed before the spin_lock(). It cannot,
2079 * however, change back to the original value. Therefore
2080 * we can detect whether we acquired the correct lock.
2081 */
2082 if (unlikely(lock_ptr != q->lock_ptr)) {
2083 spin_unlock(lock_ptr);
2084 goto retry;
2085 }
2086 __unqueue_futex(q);
2087
2088 BUG_ON(q->pi_state);
2089
2090 spin_unlock(lock_ptr);
2091 ret = 1;
2092 }
2093
2094 drop_futex_key_refs(&q->key);
2095 return ret;
2096}
2097
2098/*
2099 * PI futexes can not be requeued and must remove themself from the
2100 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2101 * and dropped here.
2102 */
2103static void unqueue_me_pi(struct futex_q *q)
2104 __releases(q->lock_ptr)
2105{
2106 __unqueue_futex(q);
2107
2108 BUG_ON(!q->pi_state);
2109 put_pi_state(q->pi_state);
2110 q->pi_state = NULL;
2111
2112 spin_unlock(q->lock_ptr);
2113}
2114
2115/*
2116 * Fixup the pi_state owner with the new owner.
2117 *
2118 * Must be called with hash bucket lock held and mm->sem held for non
2119 * private futexes.
2120 */
2121static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2122 struct task_struct *newowner)
2123{
2124 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2125 struct futex_pi_state *pi_state = q->pi_state;
2126 struct task_struct *oldowner = pi_state->owner;
2127 u32 uval, uninitialized_var(curval), newval;
2128 int ret;
2129
2130 /* Owner died? */
2131 if (!pi_state->owner)
2132 newtid |= FUTEX_OWNER_DIED;
2133
2134 /*
2135 * We are here either because we stole the rtmutex from the
2136 * previous highest priority waiter or we are the highest priority
2137 * waiter but failed to get the rtmutex the first time.
2138 * We have to replace the newowner TID in the user space variable.
2139 * This must be atomic as we have to preserve the owner died bit here.
2140 *
2141 * Note: We write the user space value _before_ changing the pi_state
2142 * because we can fault here. Imagine swapped out pages or a fork
2143 * that marked all the anonymous memory readonly for cow.
2144 *
2145 * Modifying pi_state _before_ the user space value would
2146 * leave the pi_state in an inconsistent state when we fault
2147 * here, because we need to drop the hash bucket lock to
2148 * handle the fault. This might be observed in the PID check
2149 * in lookup_pi_state.
2150 */
2151retry:
2152 if (get_futex_value_locked(&uval, uaddr))
2153 goto handle_fault;
2154
2155 while (1) {
2156 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2157
2158 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2159 goto handle_fault;
2160 if (curval == uval)
2161 break;
2162 uval = curval;
2163 }
2164
2165 /*
2166 * We fixed up user space. Now we need to fix the pi_state
2167 * itself.
2168 */
2169 if (pi_state->owner != NULL) {
2170 raw_spin_lock_irq(&pi_state->owner->pi_lock);
2171 WARN_ON(list_empty(&pi_state->list));
2172 list_del_init(&pi_state->list);
2173 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
2174 }
2175
2176 pi_state->owner = newowner;
2177
2178 raw_spin_lock_irq(&newowner->pi_lock);
2179 WARN_ON(!list_empty(&pi_state->list));
2180 list_add(&pi_state->list, &newowner->pi_state_list);
2181 raw_spin_unlock_irq(&newowner->pi_lock);
2182 return 0;
2183
2184 /*
2185 * To handle the page fault we need to drop the hash bucket
2186 * lock here. That gives the other task (either the highest priority
2187 * waiter itself or the task which stole the rtmutex) the
2188 * chance to try the fixup of the pi_state. So once we are
2189 * back from handling the fault we need to check the pi_state
2190 * after reacquiring the hash bucket lock and before trying to
2191 * do another fixup. When the fixup has been done already we
2192 * simply return.
2193 */
2194handle_fault:
2195 spin_unlock(q->lock_ptr);
2196
2197 ret = fault_in_user_writeable(uaddr);
2198
2199 spin_lock(q->lock_ptr);
2200
2201 /*
2202 * Check if someone else fixed it for us:
2203 */
2204 if (pi_state->owner != oldowner)
2205 return 0;
2206
2207 if (ret)
2208 return ret;
2209
2210 goto retry;
2211}
2212
2213static long futex_wait_restart(struct restart_block *restart);
2214
2215/**
2216 * fixup_owner() - Post lock pi_state and corner case management
2217 * @uaddr: user address of the futex
2218 * @q: futex_q (contains pi_state and access to the rt_mutex)
2219 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2220 *
2221 * After attempting to lock an rt_mutex, this function is called to cleanup
2222 * the pi_state owner as well as handle race conditions that may allow us to
2223 * acquire the lock. Must be called with the hb lock held.
2224 *
2225 * Return:
2226 * 1 - success, lock taken;
2227 * 0 - success, lock not taken;
2228 * <0 - on error (-EFAULT)
2229 */
2230static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2231{
2232 struct task_struct *owner;
2233 int ret = 0;
2234
2235 if (locked) {
2236 /*
2237 * Got the lock. We might not be the anticipated owner if we
2238 * did a lock-steal - fix up the PI-state in that case:
2239 */
2240 if (q->pi_state->owner != current)
2241 ret = fixup_pi_state_owner(uaddr, q, current);
2242 goto out;
2243 }
2244
2245 /*
2246 * Catch the rare case, where the lock was released when we were on the
2247 * way back before we locked the hash bucket.
2248 */
2249 if (q->pi_state->owner == current) {
2250 /*
2251 * Try to get the rt_mutex now. This might fail as some other
2252 * task acquired the rt_mutex after we removed ourself from the
2253 * rt_mutex waiters list.
2254 */
2255 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2256 locked = 1;
2257 goto out;
2258 }
2259
2260 /*
2261 * pi_state is incorrect, some other task did a lock steal and
2262 * we returned due to timeout or signal without taking the
2263 * rt_mutex. Too late.
2264 */
2265 raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
2266 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2267 if (!owner)
2268 owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2269 raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
2270 ret = fixup_pi_state_owner(uaddr, q, owner);
2271 goto out;
2272 }
2273
2274 /*
2275 * Paranoia check. If we did not take the lock, then we should not be
2276 * the owner of the rt_mutex.
2277 */
2278 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2279 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2280 "pi-state %p\n", ret,
2281 q->pi_state->pi_mutex.owner,
2282 q->pi_state->owner);
2283
2284out:
2285 return ret ? ret : locked;
2286}
2287
2288/**
2289 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2290 * @hb: the futex hash bucket, must be locked by the caller
2291 * @q: the futex_q to queue up on
2292 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2293 */
2294static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2295 struct hrtimer_sleeper *timeout)
2296{
2297 /*
2298 * The task state is guaranteed to be set before another task can
2299 * wake it. set_current_state() is implemented using smp_store_mb() and
2300 * queue_me() calls spin_unlock() upon completion, both serializing
2301 * access to the hash list and forcing another memory barrier.
2302 */
2303 set_current_state(TASK_INTERRUPTIBLE);
2304 queue_me(q, hb);
2305
2306 /* Arm the timer */
2307 if (timeout)
2308 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2309
2310 /*
2311 * If we have been removed from the hash list, then another task
2312 * has tried to wake us, and we can skip the call to schedule().
2313 */
2314 if (likely(!plist_node_empty(&q->list))) {
2315 /*
2316 * If the timer has already expired, current will already be
2317 * flagged for rescheduling. Only call schedule if there
2318 * is no timeout, or if it has yet to expire.
2319 */
2320 if (!timeout || timeout->task)
2321 freezable_schedule();
2322 }
2323 __set_current_state(TASK_RUNNING);
2324}
2325
2326/**
2327 * futex_wait_setup() - Prepare to wait on a futex
2328 * @uaddr: the futex userspace address
2329 * @val: the expected value
2330 * @flags: futex flags (FLAGS_SHARED, etc.)
2331 * @q: the associated futex_q
2332 * @hb: storage for hash_bucket pointer to be returned to caller
2333 *
2334 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2335 * compare it with the expected value. Handle atomic faults internally.
2336 * Return with the hb lock held and a q.key reference on success, and unlocked
2337 * with no q.key reference on failure.
2338 *
2339 * Return:
2340 * 0 - uaddr contains val and hb has been locked;
2341 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2342 */
2343static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2344 struct futex_q *q, struct futex_hash_bucket **hb)
2345{
2346 u32 uval;
2347 int ret;
2348
2349 /*
2350 * Access the page AFTER the hash-bucket is locked.
2351 * Order is important:
2352 *
2353 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2354 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2355 *
2356 * The basic logical guarantee of a futex is that it blocks ONLY
2357 * if cond(var) is known to be true at the time of blocking, for
2358 * any cond. If we locked the hash-bucket after testing *uaddr, that
2359 * would open a race condition where we could block indefinitely with
2360 * cond(var) false, which would violate the guarantee.
2361 *
2362 * On the other hand, we insert q and release the hash-bucket only
2363 * after testing *uaddr. This guarantees that futex_wait() will NOT
2364 * absorb a wakeup if *uaddr does not match the desired values
2365 * while the syscall executes.
2366 */
2367retry:
2368 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2369 if (unlikely(ret != 0))
2370 return ret;
2371
2372retry_private:
2373 *hb = queue_lock(q);
2374
2375 ret = get_futex_value_locked(&uval, uaddr);
2376
2377 if (ret) {
2378 queue_unlock(*hb);
2379
2380 ret = get_user(uval, uaddr);
2381 if (ret)
2382 goto out;
2383
2384 if (!(flags & FLAGS_SHARED))
2385 goto retry_private;
2386
2387 put_futex_key(&q->key);
2388 goto retry;
2389 }
2390
2391 if (uval != val) {
2392 queue_unlock(*hb);
2393 ret = -EWOULDBLOCK;
2394 }
2395
2396out:
2397 if (ret)
2398 put_futex_key(&q->key);
2399 return ret;
2400}
2401
2402static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2403 ktime_t *abs_time, u32 bitset)
2404{
2405 struct hrtimer_sleeper timeout, *to = NULL;
2406 struct restart_block *restart;
2407 struct futex_hash_bucket *hb;
2408 struct futex_q q = futex_q_init;
2409 int ret;
2410
2411 if (!bitset)
2412 return -EINVAL;
2413 q.bitset = bitset;
2414
2415 if (abs_time) {
2416 to = &timeout;
2417
2418 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2419 CLOCK_REALTIME : CLOCK_MONOTONIC,
2420 HRTIMER_MODE_ABS);
2421 hrtimer_init_sleeper(to, current);
2422 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2423 current->timer_slack_ns);
2424 }
2425
2426retry:
2427 /*
2428 * Prepare to wait on uaddr. On success, holds hb lock and increments
2429 * q.key refs.
2430 */
2431 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2432 if (ret)
2433 goto out;
2434
2435 /* queue_me and wait for wakeup, timeout, or a signal. */
2436 futex_wait_queue_me(hb, &q, to);
2437
2438 /* If we were woken (and unqueued), we succeeded, whatever. */
2439 ret = 0;
2440 /* unqueue_me() drops q.key ref */
2441 if (!unqueue_me(&q))
2442 goto out;
2443 ret = -ETIMEDOUT;
2444 if (to && !to->task)
2445 goto out;
2446
2447 /*
2448 * We expect signal_pending(current), but we might be the
2449 * victim of a spurious wakeup as well.
2450 */
2451 if (!signal_pending(current))
2452 goto retry;
2453
2454 ret = -ERESTARTSYS;
2455 if (!abs_time)
2456 goto out;
2457
2458 restart = ¤t->restart_block;
2459 restart->fn = futex_wait_restart;
2460 restart->futex.uaddr = uaddr;
2461 restart->futex.val = val;
2462 restart->futex.time = *abs_time;
2463 restart->futex.bitset = bitset;
2464 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2465
2466 ret = -ERESTART_RESTARTBLOCK;
2467
2468out:
2469 if (to) {
2470 hrtimer_cancel(&to->timer);
2471 destroy_hrtimer_on_stack(&to->timer);
2472 }
2473 return ret;
2474}
2475
2476
2477static long futex_wait_restart(struct restart_block *restart)
2478{
2479 u32 __user *uaddr = restart->futex.uaddr;
2480 ktime_t t, *tp = NULL;
2481
2482 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2483 t = restart->futex.time;
2484 tp = &t;
2485 }
2486 restart->fn = do_no_restart_syscall;
2487
2488 return (long)futex_wait(uaddr, restart->futex.flags,
2489 restart->futex.val, tp, restart->futex.bitset);
2490}
2491
2492
2493/*
2494 * Userspace tried a 0 -> TID atomic transition of the futex value
2495 * and failed. The kernel side here does the whole locking operation:
2496 * if there are waiters then it will block as a consequence of relying
2497 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2498 * a 0 value of the futex too.).
2499 *
2500 * Also serves as futex trylock_pi()'ing, and due semantics.
2501 */
2502static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2503 ktime_t *time, int trylock)
2504{
2505 struct hrtimer_sleeper timeout, *to = NULL;
2506 struct futex_hash_bucket *hb;
2507 struct futex_q q = futex_q_init;
2508 int res, ret;
2509
2510 if (refill_pi_state_cache())
2511 return -ENOMEM;
2512
2513 if (time) {
2514 to = &timeout;
2515 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2516 HRTIMER_MODE_ABS);
2517 hrtimer_init_sleeper(to, current);
2518 hrtimer_set_expires(&to->timer, *time);
2519 }
2520
2521retry:
2522 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2523 if (unlikely(ret != 0))
2524 goto out;
2525
2526retry_private:
2527 hb = queue_lock(&q);
2528
2529 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2530 if (unlikely(ret)) {
2531 /*
2532 * Atomic work succeeded and we got the lock,
2533 * or failed. Either way, we do _not_ block.
2534 */
2535 switch (ret) {
2536 case 1:
2537 /* We got the lock. */
2538 ret = 0;
2539 goto out_unlock_put_key;
2540 case -EFAULT:
2541 goto uaddr_faulted;
2542 case -EAGAIN:
2543 /*
2544 * Two reasons for this:
2545 * - Task is exiting and we just wait for the
2546 * exit to complete.
2547 * - The user space value changed.
2548 */
2549 queue_unlock(hb);
2550 put_futex_key(&q.key);
2551 cond_resched();
2552 goto retry;
2553 default:
2554 goto out_unlock_put_key;
2555 }
2556 }
2557
2558 /*
2559 * Only actually queue now that the atomic ops are done:
2560 */
2561 queue_me(&q, hb);
2562
2563 WARN_ON(!q.pi_state);
2564 /*
2565 * Block on the PI mutex:
2566 */
2567 if (!trylock) {
2568 ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2569 } else {
2570 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2571 /* Fixup the trylock return value: */
2572 ret = ret ? 0 : -EWOULDBLOCK;
2573 }
2574
2575 spin_lock(q.lock_ptr);
2576 /*
2577 * Fixup the pi_state owner and possibly acquire the lock if we
2578 * haven't already.
2579 */
2580 res = fixup_owner(uaddr, &q, !ret);
2581 /*
2582 * If fixup_owner() returned an error, proprogate that. If it acquired
2583 * the lock, clear our -ETIMEDOUT or -EINTR.
2584 */
2585 if (res)
2586 ret = (res < 0) ? res : 0;
2587
2588 /*
2589 * If fixup_owner() faulted and was unable to handle the fault, unlock
2590 * it and return the fault to userspace.
2591 */
2592 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2593 rt_mutex_unlock(&q.pi_state->pi_mutex);
2594
2595 /* Unqueue and drop the lock */
2596 unqueue_me_pi(&q);
2597
2598 goto out_put_key;
2599
2600out_unlock_put_key:
2601 queue_unlock(hb);
2602
2603out_put_key:
2604 put_futex_key(&q.key);
2605out:
2606 if (to)
2607 destroy_hrtimer_on_stack(&to->timer);
2608 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2609
2610uaddr_faulted:
2611 queue_unlock(hb);
2612
2613 ret = fault_in_user_writeable(uaddr);
2614 if (ret)
2615 goto out_put_key;
2616
2617 if (!(flags & FLAGS_SHARED))
2618 goto retry_private;
2619
2620 put_futex_key(&q.key);
2621 goto retry;
2622}
2623
2624/*
2625 * Userspace attempted a TID -> 0 atomic transition, and failed.
2626 * This is the in-kernel slowpath: we look up the PI state (if any),
2627 * and do the rt-mutex unlock.
2628 */
2629static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2630{
2631 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2632 union futex_key key = FUTEX_KEY_INIT;
2633 struct futex_hash_bucket *hb;
2634 struct futex_q *match;
2635 int ret;
2636
2637retry:
2638 if (get_user(uval, uaddr))
2639 return -EFAULT;
2640 /*
2641 * We release only a lock we actually own:
2642 */
2643 if ((uval & FUTEX_TID_MASK) != vpid)
2644 return -EPERM;
2645
2646 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2647 if (ret)
2648 return ret;
2649
2650 hb = hash_futex(&key);
2651 spin_lock(&hb->lock);
2652
2653 /*
2654 * Check waiters first. We do not trust user space values at
2655 * all and we at least want to know if user space fiddled
2656 * with the futex value instead of blindly unlocking.
2657 */
2658 match = futex_top_waiter(hb, &key);
2659 if (match) {
2660 ret = wake_futex_pi(uaddr, uval, match, hb);
2661 /*
2662 * In case of success wake_futex_pi dropped the hash
2663 * bucket lock.
2664 */
2665 if (!ret)
2666 goto out_putkey;
2667 /*
2668 * The atomic access to the futex value generated a
2669 * pagefault, so retry the user-access and the wakeup:
2670 */
2671 if (ret == -EFAULT)
2672 goto pi_faulted;
2673 /*
2674 * A unconditional UNLOCK_PI op raced against a waiter
2675 * setting the FUTEX_WAITERS bit. Try again.
2676 */
2677 if (ret == -EAGAIN) {
2678 spin_unlock(&hb->lock);
2679 put_futex_key(&key);
2680 goto retry;
2681 }
2682 /*
2683 * wake_futex_pi has detected invalid state. Tell user
2684 * space.
2685 */
2686 goto out_unlock;
2687 }
2688
2689 /*
2690 * We have no kernel internal state, i.e. no waiters in the
2691 * kernel. Waiters which are about to queue themselves are stuck
2692 * on hb->lock. So we can safely ignore them. We do neither
2693 * preserve the WAITERS bit not the OWNER_DIED one. We are the
2694 * owner.
2695 */
2696 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
2697 goto pi_faulted;
2698
2699 /*
2700 * If uval has changed, let user space handle it.
2701 */
2702 ret = (curval == uval) ? 0 : -EAGAIN;
2703
2704out_unlock:
2705 spin_unlock(&hb->lock);
2706out_putkey:
2707 put_futex_key(&key);
2708 return ret;
2709
2710pi_faulted:
2711 spin_unlock(&hb->lock);
2712 put_futex_key(&key);
2713
2714 ret = fault_in_user_writeable(uaddr);
2715 if (!ret)
2716 goto retry;
2717
2718 return ret;
2719}
2720
2721/**
2722 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2723 * @hb: the hash_bucket futex_q was original enqueued on
2724 * @q: the futex_q woken while waiting to be requeued
2725 * @key2: the futex_key of the requeue target futex
2726 * @timeout: the timeout associated with the wait (NULL if none)
2727 *
2728 * Detect if the task was woken on the initial futex as opposed to the requeue
2729 * target futex. If so, determine if it was a timeout or a signal that caused
2730 * the wakeup and return the appropriate error code to the caller. Must be
2731 * called with the hb lock held.
2732 *
2733 * Return:
2734 * 0 = no early wakeup detected;
2735 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2736 */
2737static inline
2738int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2739 struct futex_q *q, union futex_key *key2,
2740 struct hrtimer_sleeper *timeout)
2741{
2742 int ret = 0;
2743
2744 /*
2745 * With the hb lock held, we avoid races while we process the wakeup.
2746 * We only need to hold hb (and not hb2) to ensure atomicity as the
2747 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2748 * It can't be requeued from uaddr2 to something else since we don't
2749 * support a PI aware source futex for requeue.
2750 */
2751 if (!match_futex(&q->key, key2)) {
2752 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2753 /*
2754 * We were woken prior to requeue by a timeout or a signal.
2755 * Unqueue the futex_q and determine which it was.
2756 */
2757 plist_del(&q->list, &hb->chain);
2758 hb_waiters_dec(hb);
2759
2760 /* Handle spurious wakeups gracefully */
2761 ret = -EWOULDBLOCK;
2762 if (timeout && !timeout->task)
2763 ret = -ETIMEDOUT;
2764 else if (signal_pending(current))
2765 ret = -ERESTARTNOINTR;
2766 }
2767 return ret;
2768}
2769
2770/**
2771 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2772 * @uaddr: the futex we initially wait on (non-pi)
2773 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2774 * the same type, no requeueing from private to shared, etc.
2775 * @val: the expected value of uaddr
2776 * @abs_time: absolute timeout
2777 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2778 * @uaddr2: the pi futex we will take prior to returning to user-space
2779 *
2780 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2781 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2782 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2783 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2784 * without one, the pi logic would not know which task to boost/deboost, if
2785 * there was a need to.
2786 *
2787 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2788 * via the following--
2789 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2790 * 2) wakeup on uaddr2 after a requeue
2791 * 3) signal
2792 * 4) timeout
2793 *
2794 * If 3, cleanup and return -ERESTARTNOINTR.
2795 *
2796 * If 2, we may then block on trying to take the rt_mutex and return via:
2797 * 5) successful lock
2798 * 6) signal
2799 * 7) timeout
2800 * 8) other lock acquisition failure
2801 *
2802 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2803 *
2804 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2805 *
2806 * Return:
2807 * 0 - On success;
2808 * <0 - On error
2809 */
2810static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2811 u32 val, ktime_t *abs_time, u32 bitset,
2812 u32 __user *uaddr2)
2813{
2814 struct hrtimer_sleeper timeout, *to = NULL;
2815 struct rt_mutex_waiter rt_waiter;
2816 struct futex_hash_bucket *hb;
2817 union futex_key key2 = FUTEX_KEY_INIT;
2818 struct futex_q q = futex_q_init;
2819 int res, ret;
2820
2821 if (uaddr == uaddr2)
2822 return -EINVAL;
2823
2824 if (!bitset)
2825 return -EINVAL;
2826
2827 if (abs_time) {
2828 to = &timeout;
2829 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2830 CLOCK_REALTIME : CLOCK_MONOTONIC,
2831 HRTIMER_MODE_ABS);
2832 hrtimer_init_sleeper(to, current);
2833 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2834 current->timer_slack_ns);
2835 }
2836
2837 /*
2838 * The waiter is allocated on our stack, manipulated by the requeue
2839 * code while we sleep on uaddr.
2840 */
2841 debug_rt_mutex_init_waiter(&rt_waiter);
2842 RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2843 RB_CLEAR_NODE(&rt_waiter.tree_entry);
2844 rt_waiter.task = NULL;
2845
2846 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2847 if (unlikely(ret != 0))
2848 goto out;
2849
2850 q.bitset = bitset;
2851 q.rt_waiter = &rt_waiter;
2852 q.requeue_pi_key = &key2;
2853
2854 /*
2855 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2856 * count.
2857 */
2858 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2859 if (ret)
2860 goto out_key2;
2861
2862 /*
2863 * The check above which compares uaddrs is not sufficient for
2864 * shared futexes. We need to compare the keys:
2865 */
2866 if (match_futex(&q.key, &key2)) {
2867 queue_unlock(hb);
2868 ret = -EINVAL;
2869 goto out_put_keys;
2870 }
2871
2872 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2873 futex_wait_queue_me(hb, &q, to);
2874
2875 spin_lock(&hb->lock);
2876 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2877 spin_unlock(&hb->lock);
2878 if (ret)
2879 goto out_put_keys;
2880
2881 /*
2882 * In order for us to be here, we know our q.key == key2, and since
2883 * we took the hb->lock above, we also know that futex_requeue() has
2884 * completed and we no longer have to concern ourselves with a wakeup
2885 * race with the atomic proxy lock acquisition by the requeue code. The
2886 * futex_requeue dropped our key1 reference and incremented our key2
2887 * reference count.
2888 */
2889
2890 /* Check if the requeue code acquired the second futex for us. */
2891 if (!q.rt_waiter) {
2892 /*
2893 * Got the lock. We might not be the anticipated owner if we
2894 * did a lock-steal - fix up the PI-state in that case.
2895 */
2896 if (q.pi_state && (q.pi_state->owner != current)) {
2897 spin_lock(q.lock_ptr);
2898 ret = fixup_pi_state_owner(uaddr2, &q, current);
2899 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
2900 rt_mutex_unlock(&q.pi_state->pi_mutex);
2901 /*
2902 * Drop the reference to the pi state which
2903 * the requeue_pi() code acquired for us.
2904 */
2905 put_pi_state(q.pi_state);
2906 spin_unlock(q.lock_ptr);
2907 }
2908 } else {
2909 struct rt_mutex *pi_mutex;
2910
2911 /*
2912 * We have been woken up by futex_unlock_pi(), a timeout, or a
2913 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2914 * the pi_state.
2915 */
2916 WARN_ON(!q.pi_state);
2917 pi_mutex = &q.pi_state->pi_mutex;
2918 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2919 debug_rt_mutex_free_waiter(&rt_waiter);
2920
2921 spin_lock(q.lock_ptr);
2922 /*
2923 * Fixup the pi_state owner and possibly acquire the lock if we
2924 * haven't already.
2925 */
2926 res = fixup_owner(uaddr2, &q, !ret);
2927 /*
2928 * If fixup_owner() returned an error, proprogate that. If it
2929 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2930 */
2931 if (res)
2932 ret = (res < 0) ? res : 0;
2933
2934 /*
2935 * If fixup_pi_state_owner() faulted and was unable to handle
2936 * the fault, unlock the rt_mutex and return the fault to
2937 * userspace.
2938 */
2939 if (ret && rt_mutex_owner(pi_mutex) == current)
2940 rt_mutex_unlock(pi_mutex);
2941
2942 /* Unqueue and drop the lock. */
2943 unqueue_me_pi(&q);
2944 }
2945
2946 if (ret == -EINTR) {
2947 /*
2948 * We've already been requeued, but cannot restart by calling
2949 * futex_lock_pi() directly. We could restart this syscall, but
2950 * it would detect that the user space "val" changed and return
2951 * -EWOULDBLOCK. Save the overhead of the restart and return
2952 * -EWOULDBLOCK directly.
2953 */
2954 ret = -EWOULDBLOCK;
2955 }
2956
2957out_put_keys:
2958 put_futex_key(&q.key);
2959out_key2:
2960 put_futex_key(&key2);
2961
2962out:
2963 if (to) {
2964 hrtimer_cancel(&to->timer);
2965 destroy_hrtimer_on_stack(&to->timer);
2966 }
2967 return ret;
2968}
2969
2970/*
2971 * Support for robust futexes: the kernel cleans up held futexes at
2972 * thread exit time.
2973 *
2974 * Implementation: user-space maintains a per-thread list of locks it
2975 * is holding. Upon do_exit(), the kernel carefully walks this list,
2976 * and marks all locks that are owned by this thread with the
2977 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2978 * always manipulated with the lock held, so the list is private and
2979 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2980 * field, to allow the kernel to clean up if the thread dies after
2981 * acquiring the lock, but just before it could have added itself to
2982 * the list. There can only be one such pending lock.
2983 */
2984
2985/**
2986 * sys_set_robust_list() - Set the robust-futex list head of a task
2987 * @head: pointer to the list-head
2988 * @len: length of the list-head, as userspace expects
2989 */
2990SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2991 size_t, len)
2992{
2993 if (!futex_cmpxchg_enabled)
2994 return -ENOSYS;
2995 /*
2996 * The kernel knows only one size for now:
2997 */
2998 if (unlikely(len != sizeof(*head)))
2999 return -EINVAL;
3000
3001 current->robust_list = head;
3002
3003 return 0;
3004}
3005
3006/**
3007 * sys_get_robust_list() - Get the robust-futex list head of a task
3008 * @pid: pid of the process [zero for current task]
3009 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3010 * @len_ptr: pointer to a length field, the kernel fills in the header size
3011 */
3012SYSCALL_DEFINE3(get_robust_list, int, pid,
3013 struct robust_list_head __user * __user *, head_ptr,
3014 size_t __user *, len_ptr)
3015{
3016 struct robust_list_head __user *head;
3017 unsigned long ret;
3018 struct task_struct *p;
3019
3020 if (!futex_cmpxchg_enabled)
3021 return -ENOSYS;
3022
3023 rcu_read_lock();
3024
3025 ret = -ESRCH;
3026 if (!pid)
3027 p = current;
3028 else {
3029 p = find_task_by_vpid(pid);
3030 if (!p)
3031 goto err_unlock;
3032 }
3033
3034 ret = -EPERM;
3035 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3036 goto err_unlock;
3037
3038 head = p->robust_list;
3039 rcu_read_unlock();
3040
3041 if (put_user(sizeof(*head), len_ptr))
3042 return -EFAULT;
3043 return put_user(head, head_ptr);
3044
3045err_unlock:
3046 rcu_read_unlock();
3047
3048 return ret;
3049}
3050
3051/*
3052 * Process a futex-list entry, check whether it's owned by the
3053 * dying task, and do notification if so:
3054 */
3055int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3056{
3057 u32 uval, uninitialized_var(nval), mval;
3058
3059retry:
3060 if (get_user(uval, uaddr))
3061 return -1;
3062
3063 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
3064 /*
3065 * Ok, this dying thread is truly holding a futex
3066 * of interest. Set the OWNER_DIED bit atomically
3067 * via cmpxchg, and if the value had FUTEX_WAITERS
3068 * set, wake up a waiter (if any). (We have to do a
3069 * futex_wake() even if OWNER_DIED is already set -
3070 * to handle the rare but possible case of recursive
3071 * thread-death.) The rest of the cleanup is done in
3072 * userspace.
3073 */
3074 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3075 /*
3076 * We are not holding a lock here, but we want to have
3077 * the pagefault_disable/enable() protection because
3078 * we want to handle the fault gracefully. If the
3079 * access fails we try to fault in the futex with R/W
3080 * verification via get_user_pages. get_user() above
3081 * does not guarantee R/W access. If that fails we
3082 * give up and leave the futex locked.
3083 */
3084 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
3085 if (fault_in_user_writeable(uaddr))
3086 return -1;
3087 goto retry;
3088 }
3089 if (nval != uval)
3090 goto retry;
3091
3092 /*
3093 * Wake robust non-PI futexes here. The wakeup of
3094 * PI futexes happens in exit_pi_state():
3095 */
3096 if (!pi && (uval & FUTEX_WAITERS))
3097 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3098 }
3099 return 0;
3100}
3101
3102/*
3103 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3104 */
3105static inline int fetch_robust_entry(struct robust_list __user **entry,
3106 struct robust_list __user * __user *head,
3107 unsigned int *pi)
3108{
3109 unsigned long uentry;
3110
3111 if (get_user(uentry, (unsigned long __user *)head))
3112 return -EFAULT;
3113
3114 *entry = (void __user *)(uentry & ~1UL);
3115 *pi = uentry & 1;
3116
3117 return 0;
3118}
3119
3120/*
3121 * Walk curr->robust_list (very carefully, it's a userspace list!)
3122 * and mark any locks found there dead, and notify any waiters.
3123 *
3124 * We silently return on any sign of list-walking problem.
3125 */
3126void exit_robust_list(struct task_struct *curr)
3127{
3128 struct robust_list_head __user *head = curr->robust_list;
3129 struct robust_list __user *entry, *next_entry, *pending;
3130 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3131 unsigned int uninitialized_var(next_pi);
3132 unsigned long futex_offset;
3133 int rc;
3134
3135 if (!futex_cmpxchg_enabled)
3136 return;
3137
3138 /*
3139 * Fetch the list head (which was registered earlier, via
3140 * sys_set_robust_list()):
3141 */
3142 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3143 return;
3144 /*
3145 * Fetch the relative futex offset:
3146 */
3147 if (get_user(futex_offset, &head->futex_offset))
3148 return;
3149 /*
3150 * Fetch any possibly pending lock-add first, and handle it
3151 * if it exists:
3152 */
3153 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3154 return;
3155
3156 next_entry = NULL; /* avoid warning with gcc */
3157 while (entry != &head->list) {
3158 /*
3159 * Fetch the next entry in the list before calling
3160 * handle_futex_death:
3161 */
3162 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3163 /*
3164 * A pending lock might already be on the list, so
3165 * don't process it twice:
3166 */
3167 if (entry != pending)
3168 if (handle_futex_death((void __user *)entry + futex_offset,
3169 curr, pi))
3170 return;
3171 if (rc)
3172 return;
3173 entry = next_entry;
3174 pi = next_pi;
3175 /*
3176 * Avoid excessively long or circular lists:
3177 */
3178 if (!--limit)
3179 break;
3180
3181 cond_resched();
3182 }
3183
3184 if (pending)
3185 handle_futex_death((void __user *)pending + futex_offset,
3186 curr, pip);
3187}
3188
3189long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3190 u32 __user *uaddr2, u32 val2, u32 val3)
3191{
3192 int cmd = op & FUTEX_CMD_MASK;
3193 unsigned int flags = 0;
3194
3195 if (!(op & FUTEX_PRIVATE_FLAG))
3196 flags |= FLAGS_SHARED;
3197
3198 if (op & FUTEX_CLOCK_REALTIME) {
3199 flags |= FLAGS_CLOCKRT;
3200 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3201 cmd != FUTEX_WAIT_REQUEUE_PI)
3202 return -ENOSYS;
3203 }
3204
3205 switch (cmd) {
3206 case FUTEX_LOCK_PI:
3207 case FUTEX_UNLOCK_PI:
3208 case FUTEX_TRYLOCK_PI:
3209 case FUTEX_WAIT_REQUEUE_PI:
3210 case FUTEX_CMP_REQUEUE_PI:
3211 if (!futex_cmpxchg_enabled)
3212 return -ENOSYS;
3213 }
3214
3215 switch (cmd) {
3216 case FUTEX_WAIT:
3217 val3 = FUTEX_BITSET_MATCH_ANY;
3218 case FUTEX_WAIT_BITSET:
3219 return futex_wait(uaddr, flags, val, timeout, val3);
3220 case FUTEX_WAKE:
3221 val3 = FUTEX_BITSET_MATCH_ANY;
3222 case FUTEX_WAKE_BITSET:
3223 return futex_wake(uaddr, flags, val, val3);
3224 case FUTEX_REQUEUE:
3225 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3226 case FUTEX_CMP_REQUEUE:
3227 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3228 case FUTEX_WAKE_OP:
3229 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3230 case FUTEX_LOCK_PI:
3231 return futex_lock_pi(uaddr, flags, timeout, 0);
3232 case FUTEX_UNLOCK_PI:
3233 return futex_unlock_pi(uaddr, flags);
3234 case FUTEX_TRYLOCK_PI:
3235 return futex_lock_pi(uaddr, flags, NULL, 1);
3236 case FUTEX_WAIT_REQUEUE_PI:
3237 val3 = FUTEX_BITSET_MATCH_ANY;
3238 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3239 uaddr2);
3240 case FUTEX_CMP_REQUEUE_PI:
3241 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3242 }
3243 return -ENOSYS;
3244}
3245
3246
3247SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3248 struct timespec __user *, utime, u32 __user *, uaddr2,
3249 u32, val3)
3250{
3251 struct timespec ts;
3252 ktime_t t, *tp = NULL;
3253 u32 val2 = 0;
3254 int cmd = op & FUTEX_CMD_MASK;
3255
3256 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3257 cmd == FUTEX_WAIT_BITSET ||
3258 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3259 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3260 return -EFAULT;
3261 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
3262 return -EFAULT;
3263 if (!timespec_valid(&ts))
3264 return -EINVAL;
3265
3266 t = timespec_to_ktime(ts);
3267 if (cmd == FUTEX_WAIT)
3268 t = ktime_add_safe(ktime_get(), t);
3269 tp = &t;
3270 }
3271 /*
3272 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3273 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3274 */
3275 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3276 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3277 val2 = (u32) (unsigned long) utime;
3278
3279 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3280}
3281
3282static void __init futex_detect_cmpxchg(void)
3283{
3284#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3285 u32 curval;
3286
3287 /*
3288 * This will fail and we want it. Some arch implementations do
3289 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3290 * functionality. We want to know that before we call in any
3291 * of the complex code paths. Also we want to prevent
3292 * registration of robust lists in that case. NULL is
3293 * guaranteed to fault and we get -EFAULT on functional
3294 * implementation, the non-functional ones will return
3295 * -ENOSYS.
3296 */
3297 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3298 futex_cmpxchg_enabled = 1;
3299#endif
3300}
3301
3302static int __init futex_init(void)
3303{
3304 unsigned int futex_shift;
3305 unsigned long i;
3306
3307#if CONFIG_BASE_SMALL
3308 futex_hashsize = 16;
3309#else
3310 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3311#endif
3312
3313 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3314 futex_hashsize, 0,
3315 futex_hashsize < 256 ? HASH_SMALL : 0,
3316 &futex_shift, NULL,
3317 futex_hashsize, futex_hashsize);
3318 futex_hashsize = 1UL << futex_shift;
3319
3320 futex_detect_cmpxchg();
3321
3322 for (i = 0; i < futex_hashsize; i++) {
3323 atomic_set(&futex_queues[i].waiters, 0);
3324 plist_head_init(&futex_queues[i].chain);
3325 spin_lock_init(&futex_queues[i].lock);
3326 }
3327
3328 return 0;
3329}
3330core_initcall(futex_init);
1/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
57#include <linux/signal.h>
58#include <linux/export.h>
59#include <linux/magic.h>
60#include <linux/pid.h>
61#include <linux/nsproxy.h>
62#include <linux/ptrace.h>
63#include <linux/sched/rt.h>
64#include <linux/hugetlb.h>
65#include <linux/freezer.h>
66#include <linux/bootmem.h>
67#include <linux/fault-inject.h>
68
69#include <asm/futex.h>
70
71#include "locking/rtmutex_common.h"
72
73/*
74 * READ this before attempting to hack on futexes!
75 *
76 * Basic futex operation and ordering guarantees
77 * =============================================
78 *
79 * The waiter reads the futex value in user space and calls
80 * futex_wait(). This function computes the hash bucket and acquires
81 * the hash bucket lock. After that it reads the futex user space value
82 * again and verifies that the data has not changed. If it has not changed
83 * it enqueues itself into the hash bucket, releases the hash bucket lock
84 * and schedules.
85 *
86 * The waker side modifies the user space value of the futex and calls
87 * futex_wake(). This function computes the hash bucket and acquires the
88 * hash bucket lock. Then it looks for waiters on that futex in the hash
89 * bucket and wakes them.
90 *
91 * In futex wake up scenarios where no tasks are blocked on a futex, taking
92 * the hb spinlock can be avoided and simply return. In order for this
93 * optimization to work, ordering guarantees must exist so that the waiter
94 * being added to the list is acknowledged when the list is concurrently being
95 * checked by the waker, avoiding scenarios like the following:
96 *
97 * CPU 0 CPU 1
98 * val = *futex;
99 * sys_futex(WAIT, futex, val);
100 * futex_wait(futex, val);
101 * uval = *futex;
102 * *futex = newval;
103 * sys_futex(WAKE, futex);
104 * futex_wake(futex);
105 * if (queue_empty())
106 * return;
107 * if (uval == val)
108 * lock(hash_bucket(futex));
109 * queue();
110 * unlock(hash_bucket(futex));
111 * schedule();
112 *
113 * This would cause the waiter on CPU 0 to wait forever because it
114 * missed the transition of the user space value from val to newval
115 * and the waker did not find the waiter in the hash bucket queue.
116 *
117 * The correct serialization ensures that a waiter either observes
118 * the changed user space value before blocking or is woken by a
119 * concurrent waker:
120 *
121 * CPU 0 CPU 1
122 * val = *futex;
123 * sys_futex(WAIT, futex, val);
124 * futex_wait(futex, val);
125 *
126 * waiters++; (a)
127 * smp_mb(); (A) <-- paired with -.
128 * |
129 * lock(hash_bucket(futex)); |
130 * |
131 * uval = *futex; |
132 * | *futex = newval;
133 * | sys_futex(WAKE, futex);
134 * | futex_wake(futex);
135 * |
136 * `--------> smp_mb(); (B)
137 * if (uval == val)
138 * queue();
139 * unlock(hash_bucket(futex));
140 * schedule(); if (waiters)
141 * lock(hash_bucket(futex));
142 * else wake_waiters(futex);
143 * waiters--; (b) unlock(hash_bucket(futex));
144 *
145 * Where (A) orders the waiters increment and the futex value read through
146 * atomic operations (see hb_waiters_inc) and where (B) orders the write
147 * to futex and the waiters read -- this is done by the barriers for both
148 * shared and private futexes in get_futex_key_refs().
149 *
150 * This yields the following case (where X:=waiters, Y:=futex):
151 *
152 * X = Y = 0
153 *
154 * w[X]=1 w[Y]=1
155 * MB MB
156 * r[Y]=y r[X]=x
157 *
158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
159 * the guarantee that we cannot both miss the futex variable change and the
160 * enqueue.
161 *
162 * Note that a new waiter is accounted for in (a) even when it is possible that
163 * the wait call can return error, in which case we backtrack from it in (b).
164 * Refer to the comment in queue_lock().
165 *
166 * Similarly, in order to account for waiters being requeued on another
167 * address we always increment the waiters for the destination bucket before
168 * acquiring the lock. It then decrements them again after releasing it -
169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170 * will do the additional required waiter count housekeeping. This is done for
171 * double_lock_hb() and double_unlock_hb(), respectively.
172 */
173
174#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175int __read_mostly futex_cmpxchg_enabled;
176#endif
177
178/*
179 * Futex flags used to encode options to functions and preserve them across
180 * restarts.
181 */
182#define FLAGS_SHARED 0x01
183#define FLAGS_CLOCKRT 0x02
184#define FLAGS_HAS_TIMEOUT 0x04
185
186/*
187 * Priority Inheritance state:
188 */
189struct futex_pi_state {
190 /*
191 * list of 'owned' pi_state instances - these have to be
192 * cleaned up in do_exit() if the task exits prematurely:
193 */
194 struct list_head list;
195
196 /*
197 * The PI object:
198 */
199 struct rt_mutex pi_mutex;
200
201 struct task_struct *owner;
202 atomic_t refcount;
203
204 union futex_key key;
205};
206
207/**
208 * struct futex_q - The hashed futex queue entry, one per waiting task
209 * @list: priority-sorted list of tasks waiting on this futex
210 * @task: the task waiting on the futex
211 * @lock_ptr: the hash bucket lock
212 * @key: the key the futex is hashed on
213 * @pi_state: optional priority inheritance state
214 * @rt_waiter: rt_waiter storage for use with requeue_pi
215 * @requeue_pi_key: the requeue_pi target futex key
216 * @bitset: bitset for the optional bitmasked wakeup
217 *
218 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
219 * we can wake only the relevant ones (hashed queues may be shared).
220 *
221 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
222 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
223 * The order of wakeup is always to make the first condition true, then
224 * the second.
225 *
226 * PI futexes are typically woken before they are removed from the hash list via
227 * the rt_mutex code. See unqueue_me_pi().
228 */
229struct futex_q {
230 struct plist_node list;
231
232 struct task_struct *task;
233 spinlock_t *lock_ptr;
234 union futex_key key;
235 struct futex_pi_state *pi_state;
236 struct rt_mutex_waiter *rt_waiter;
237 union futex_key *requeue_pi_key;
238 u32 bitset;
239};
240
241static const struct futex_q futex_q_init = {
242 /* list gets initialized in queue_me()*/
243 .key = FUTEX_KEY_INIT,
244 .bitset = FUTEX_BITSET_MATCH_ANY
245};
246
247/*
248 * Hash buckets are shared by all the futex_keys that hash to the same
249 * location. Each key may have multiple futex_q structures, one for each task
250 * waiting on a futex.
251 */
252struct futex_hash_bucket {
253 atomic_t waiters;
254 spinlock_t lock;
255 struct plist_head chain;
256} ____cacheline_aligned_in_smp;
257
258/*
259 * The base of the bucket array and its size are always used together
260 * (after initialization only in hash_futex()), so ensure that they
261 * reside in the same cacheline.
262 */
263static struct {
264 struct futex_hash_bucket *queues;
265 unsigned long hashsize;
266} __futex_data __read_mostly __aligned(2*sizeof(long));
267#define futex_queues (__futex_data.queues)
268#define futex_hashsize (__futex_data.hashsize)
269
270
271/*
272 * Fault injections for futexes.
273 */
274#ifdef CONFIG_FAIL_FUTEX
275
276static struct {
277 struct fault_attr attr;
278
279 bool ignore_private;
280} fail_futex = {
281 .attr = FAULT_ATTR_INITIALIZER,
282 .ignore_private = false,
283};
284
285static int __init setup_fail_futex(char *str)
286{
287 return setup_fault_attr(&fail_futex.attr, str);
288}
289__setup("fail_futex=", setup_fail_futex);
290
291static bool should_fail_futex(bool fshared)
292{
293 if (fail_futex.ignore_private && !fshared)
294 return false;
295
296 return should_fail(&fail_futex.attr, 1);
297}
298
299#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
300
301static int __init fail_futex_debugfs(void)
302{
303 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
304 struct dentry *dir;
305
306 dir = fault_create_debugfs_attr("fail_futex", NULL,
307 &fail_futex.attr);
308 if (IS_ERR(dir))
309 return PTR_ERR(dir);
310
311 if (!debugfs_create_bool("ignore-private", mode, dir,
312 &fail_futex.ignore_private)) {
313 debugfs_remove_recursive(dir);
314 return -ENOMEM;
315 }
316
317 return 0;
318}
319
320late_initcall(fail_futex_debugfs);
321
322#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
323
324#else
325static inline bool should_fail_futex(bool fshared)
326{
327 return false;
328}
329#endif /* CONFIG_FAIL_FUTEX */
330
331static inline void futex_get_mm(union futex_key *key)
332{
333 atomic_inc(&key->private.mm->mm_count);
334 /*
335 * Ensure futex_get_mm() implies a full barrier such that
336 * get_futex_key() implies a full barrier. This is relied upon
337 * as smp_mb(); (B), see the ordering comment above.
338 */
339 smp_mb__after_atomic();
340}
341
342/*
343 * Reflects a new waiter being added to the waitqueue.
344 */
345static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
346{
347#ifdef CONFIG_SMP
348 atomic_inc(&hb->waiters);
349 /*
350 * Full barrier (A), see the ordering comment above.
351 */
352 smp_mb__after_atomic();
353#endif
354}
355
356/*
357 * Reflects a waiter being removed from the waitqueue by wakeup
358 * paths.
359 */
360static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
361{
362#ifdef CONFIG_SMP
363 atomic_dec(&hb->waiters);
364#endif
365}
366
367static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
368{
369#ifdef CONFIG_SMP
370 return atomic_read(&hb->waiters);
371#else
372 return 1;
373#endif
374}
375
376/*
377 * We hash on the keys returned from get_futex_key (see below).
378 */
379static struct futex_hash_bucket *hash_futex(union futex_key *key)
380{
381 u32 hash = jhash2((u32*)&key->both.word,
382 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
383 key->both.offset);
384 return &futex_queues[hash & (futex_hashsize - 1)];
385}
386
387/*
388 * Return 1 if two futex_keys are equal, 0 otherwise.
389 */
390static inline int match_futex(union futex_key *key1, union futex_key *key2)
391{
392 return (key1 && key2
393 && key1->both.word == key2->both.word
394 && key1->both.ptr == key2->both.ptr
395 && key1->both.offset == key2->both.offset);
396}
397
398/*
399 * Take a reference to the resource addressed by a key.
400 * Can be called while holding spinlocks.
401 *
402 */
403static void get_futex_key_refs(union futex_key *key)
404{
405 if (!key->both.ptr)
406 return;
407
408 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
409 case FUT_OFF_INODE:
410 ihold(key->shared.inode); /* implies smp_mb(); (B) */
411 break;
412 case FUT_OFF_MMSHARED:
413 futex_get_mm(key); /* implies smp_mb(); (B) */
414 break;
415 default:
416 /*
417 * Private futexes do not hold reference on an inode or
418 * mm, therefore the only purpose of calling get_futex_key_refs
419 * is because we need the barrier for the lockless waiter check.
420 */
421 smp_mb(); /* explicit smp_mb(); (B) */
422 }
423}
424
425/*
426 * Drop a reference to the resource addressed by a key.
427 * The hash bucket spinlock must not be held. This is
428 * a no-op for private futexes, see comment in the get
429 * counterpart.
430 */
431static void drop_futex_key_refs(union futex_key *key)
432{
433 if (!key->both.ptr) {
434 /* If we're here then we tried to put a key we failed to get */
435 WARN_ON_ONCE(1);
436 return;
437 }
438
439 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
440 case FUT_OFF_INODE:
441 iput(key->shared.inode);
442 break;
443 case FUT_OFF_MMSHARED:
444 mmdrop(key->private.mm);
445 break;
446 }
447}
448
449/**
450 * get_futex_key() - Get parameters which are the keys for a futex
451 * @uaddr: virtual address of the futex
452 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
453 * @key: address where result is stored.
454 * @rw: mapping needs to be read/write (values: VERIFY_READ,
455 * VERIFY_WRITE)
456 *
457 * Return: a negative error code or 0
458 *
459 * The key words are stored in *key on success.
460 *
461 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
462 * offset_within_page). For private mappings, it's (uaddr, current->mm).
463 * We can usually work out the index without swapping in the page.
464 *
465 * lock_page() might sleep, the caller should not hold a spinlock.
466 */
467static int
468get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
469{
470 unsigned long address = (unsigned long)uaddr;
471 struct mm_struct *mm = current->mm;
472 struct page *page;
473 struct address_space *mapping;
474 int err, ro = 0;
475
476 /*
477 * The futex address must be "naturally" aligned.
478 */
479 key->both.offset = address % PAGE_SIZE;
480 if (unlikely((address % sizeof(u32)) != 0))
481 return -EINVAL;
482 address -= key->both.offset;
483
484 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
485 return -EFAULT;
486
487 if (unlikely(should_fail_futex(fshared)))
488 return -EFAULT;
489
490 /*
491 * PROCESS_PRIVATE futexes are fast.
492 * As the mm cannot disappear under us and the 'key' only needs
493 * virtual address, we dont even have to find the underlying vma.
494 * Note : We do have to check 'uaddr' is a valid user address,
495 * but access_ok() should be faster than find_vma()
496 */
497 if (!fshared) {
498 key->private.mm = mm;
499 key->private.address = address;
500 get_futex_key_refs(key); /* implies smp_mb(); (B) */
501 return 0;
502 }
503
504again:
505 /* Ignore any VERIFY_READ mapping (futex common case) */
506 if (unlikely(should_fail_futex(fshared)))
507 return -EFAULT;
508
509 err = get_user_pages_fast(address, 1, 1, &page);
510 /*
511 * If write access is not required (eg. FUTEX_WAIT), try
512 * and get read-only access.
513 */
514 if (err == -EFAULT && rw == VERIFY_READ) {
515 err = get_user_pages_fast(address, 1, 0, &page);
516 ro = 1;
517 }
518 if (err < 0)
519 return err;
520 else
521 err = 0;
522
523 /*
524 * The treatment of mapping from this point on is critical. The page
525 * lock protects many things but in this context the page lock
526 * stabilizes mapping, prevents inode freeing in the shared
527 * file-backed region case and guards against movement to swap cache.
528 *
529 * Strictly speaking the page lock is not needed in all cases being
530 * considered here and page lock forces unnecessarily serialization
531 * From this point on, mapping will be re-verified if necessary and
532 * page lock will be acquired only if it is unavoidable
533 */
534 page = compound_head(page);
535 mapping = READ_ONCE(page->mapping);
536
537 /*
538 * If page->mapping is NULL, then it cannot be a PageAnon
539 * page; but it might be the ZERO_PAGE or in the gate area or
540 * in a special mapping (all cases which we are happy to fail);
541 * or it may have been a good file page when get_user_pages_fast
542 * found it, but truncated or holepunched or subjected to
543 * invalidate_complete_page2 before we got the page lock (also
544 * cases which we are happy to fail). And we hold a reference,
545 * so refcount care in invalidate_complete_page's remove_mapping
546 * prevents drop_caches from setting mapping to NULL beneath us.
547 *
548 * The case we do have to guard against is when memory pressure made
549 * shmem_writepage move it from filecache to swapcache beneath us:
550 * an unlikely race, but we do need to retry for page->mapping.
551 */
552 if (unlikely(!mapping)) {
553 int shmem_swizzled;
554
555 /*
556 * Page lock is required to identify which special case above
557 * applies. If this is really a shmem page then the page lock
558 * will prevent unexpected transitions.
559 */
560 lock_page(page);
561 shmem_swizzled = PageSwapCache(page) || page->mapping;
562 unlock_page(page);
563 put_page(page);
564
565 if (shmem_swizzled)
566 goto again;
567
568 return -EFAULT;
569 }
570
571 /*
572 * Private mappings are handled in a simple way.
573 *
574 * If the futex key is stored on an anonymous page, then the associated
575 * object is the mm which is implicitly pinned by the calling process.
576 *
577 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
578 * it's a read-only handle, it's expected that futexes attach to
579 * the object not the particular process.
580 */
581 if (PageAnon(page)) {
582 /*
583 * A RO anonymous page will never change and thus doesn't make
584 * sense for futex operations.
585 */
586 if (unlikely(should_fail_futex(fshared)) || ro) {
587 err = -EFAULT;
588 goto out;
589 }
590
591 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
592 key->private.mm = mm;
593 key->private.address = address;
594
595 get_futex_key_refs(key); /* implies smp_mb(); (B) */
596
597 } else {
598 struct inode *inode;
599
600 /*
601 * The associated futex object in this case is the inode and
602 * the page->mapping must be traversed. Ordinarily this should
603 * be stabilised under page lock but it's not strictly
604 * necessary in this case as we just want to pin the inode, not
605 * update the radix tree or anything like that.
606 *
607 * The RCU read lock is taken as the inode is finally freed
608 * under RCU. If the mapping still matches expectations then the
609 * mapping->host can be safely accessed as being a valid inode.
610 */
611 rcu_read_lock();
612
613 if (READ_ONCE(page->mapping) != mapping) {
614 rcu_read_unlock();
615 put_page(page);
616
617 goto again;
618 }
619
620 inode = READ_ONCE(mapping->host);
621 if (!inode) {
622 rcu_read_unlock();
623 put_page(page);
624
625 goto again;
626 }
627
628 /*
629 * Take a reference unless it is about to be freed. Previously
630 * this reference was taken by ihold under the page lock
631 * pinning the inode in place so i_lock was unnecessary. The
632 * only way for this check to fail is if the inode was
633 * truncated in parallel so warn for now if this happens.
634 *
635 * We are not calling into get_futex_key_refs() in file-backed
636 * cases, therefore a successful atomic_inc return below will
637 * guarantee that get_futex_key() will still imply smp_mb(); (B).
638 */
639 if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
640 rcu_read_unlock();
641 put_page(page);
642
643 goto again;
644 }
645
646 /* Should be impossible but lets be paranoid for now */
647 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
648 err = -EFAULT;
649 rcu_read_unlock();
650 iput(inode);
651
652 goto out;
653 }
654
655 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
656 key->shared.inode = inode;
657 key->shared.pgoff = basepage_index(page);
658 rcu_read_unlock();
659 }
660
661out:
662 put_page(page);
663 return err;
664}
665
666static inline void put_futex_key(union futex_key *key)
667{
668 drop_futex_key_refs(key);
669}
670
671/**
672 * fault_in_user_writeable() - Fault in user address and verify RW access
673 * @uaddr: pointer to faulting user space address
674 *
675 * Slow path to fixup the fault we just took in the atomic write
676 * access to @uaddr.
677 *
678 * We have no generic implementation of a non-destructive write to the
679 * user address. We know that we faulted in the atomic pagefault
680 * disabled section so we can as well avoid the #PF overhead by
681 * calling get_user_pages() right away.
682 */
683static int fault_in_user_writeable(u32 __user *uaddr)
684{
685 struct mm_struct *mm = current->mm;
686 int ret;
687
688 down_read(&mm->mmap_sem);
689 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
690 FAULT_FLAG_WRITE, NULL);
691 up_read(&mm->mmap_sem);
692
693 return ret < 0 ? ret : 0;
694}
695
696/**
697 * futex_top_waiter() - Return the highest priority waiter on a futex
698 * @hb: the hash bucket the futex_q's reside in
699 * @key: the futex key (to distinguish it from other futex futex_q's)
700 *
701 * Must be called with the hb lock held.
702 */
703static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
704 union futex_key *key)
705{
706 struct futex_q *this;
707
708 plist_for_each_entry(this, &hb->chain, list) {
709 if (match_futex(&this->key, key))
710 return this;
711 }
712 return NULL;
713}
714
715static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
716 u32 uval, u32 newval)
717{
718 int ret;
719
720 pagefault_disable();
721 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
722 pagefault_enable();
723
724 return ret;
725}
726
727static int get_futex_value_locked(u32 *dest, u32 __user *from)
728{
729 int ret;
730
731 pagefault_disable();
732 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
733 pagefault_enable();
734
735 return ret ? -EFAULT : 0;
736}
737
738
739/*
740 * PI code:
741 */
742static int refill_pi_state_cache(void)
743{
744 struct futex_pi_state *pi_state;
745
746 if (likely(current->pi_state_cache))
747 return 0;
748
749 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
750
751 if (!pi_state)
752 return -ENOMEM;
753
754 INIT_LIST_HEAD(&pi_state->list);
755 /* pi_mutex gets initialized later */
756 pi_state->owner = NULL;
757 atomic_set(&pi_state->refcount, 1);
758 pi_state->key = FUTEX_KEY_INIT;
759
760 current->pi_state_cache = pi_state;
761
762 return 0;
763}
764
765static struct futex_pi_state * alloc_pi_state(void)
766{
767 struct futex_pi_state *pi_state = current->pi_state_cache;
768
769 WARN_ON(!pi_state);
770 current->pi_state_cache = NULL;
771
772 return pi_state;
773}
774
775/*
776 * Drops a reference to the pi_state object and frees or caches it
777 * when the last reference is gone.
778 *
779 * Must be called with the hb lock held.
780 */
781static void put_pi_state(struct futex_pi_state *pi_state)
782{
783 if (!pi_state)
784 return;
785
786 if (!atomic_dec_and_test(&pi_state->refcount))
787 return;
788
789 /*
790 * If pi_state->owner is NULL, the owner is most probably dying
791 * and has cleaned up the pi_state already
792 */
793 if (pi_state->owner) {
794 raw_spin_lock_irq(&pi_state->owner->pi_lock);
795 list_del_init(&pi_state->list);
796 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
797
798 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
799 }
800
801 if (current->pi_state_cache)
802 kfree(pi_state);
803 else {
804 /*
805 * pi_state->list is already empty.
806 * clear pi_state->owner.
807 * refcount is at 0 - put it back to 1.
808 */
809 pi_state->owner = NULL;
810 atomic_set(&pi_state->refcount, 1);
811 current->pi_state_cache = pi_state;
812 }
813}
814
815/*
816 * Look up the task based on what TID userspace gave us.
817 * We dont trust it.
818 */
819static struct task_struct * futex_find_get_task(pid_t pid)
820{
821 struct task_struct *p;
822
823 rcu_read_lock();
824 p = find_task_by_vpid(pid);
825 if (p)
826 get_task_struct(p);
827
828 rcu_read_unlock();
829
830 return p;
831}
832
833/*
834 * This task is holding PI mutexes at exit time => bad.
835 * Kernel cleans up PI-state, but userspace is likely hosed.
836 * (Robust-futex cleanup is separate and might save the day for userspace.)
837 */
838void exit_pi_state_list(struct task_struct *curr)
839{
840 struct list_head *next, *head = &curr->pi_state_list;
841 struct futex_pi_state *pi_state;
842 struct futex_hash_bucket *hb;
843 union futex_key key = FUTEX_KEY_INIT;
844
845 if (!futex_cmpxchg_enabled)
846 return;
847 /*
848 * We are a ZOMBIE and nobody can enqueue itself on
849 * pi_state_list anymore, but we have to be careful
850 * versus waiters unqueueing themselves:
851 */
852 raw_spin_lock_irq(&curr->pi_lock);
853 while (!list_empty(head)) {
854
855 next = head->next;
856 pi_state = list_entry(next, struct futex_pi_state, list);
857 key = pi_state->key;
858 hb = hash_futex(&key);
859 raw_spin_unlock_irq(&curr->pi_lock);
860
861 spin_lock(&hb->lock);
862
863 raw_spin_lock_irq(&curr->pi_lock);
864 /*
865 * We dropped the pi-lock, so re-check whether this
866 * task still owns the PI-state:
867 */
868 if (head->next != next) {
869 spin_unlock(&hb->lock);
870 continue;
871 }
872
873 WARN_ON(pi_state->owner != curr);
874 WARN_ON(list_empty(&pi_state->list));
875 list_del_init(&pi_state->list);
876 pi_state->owner = NULL;
877 raw_spin_unlock_irq(&curr->pi_lock);
878
879 rt_mutex_unlock(&pi_state->pi_mutex);
880
881 spin_unlock(&hb->lock);
882
883 raw_spin_lock_irq(&curr->pi_lock);
884 }
885 raw_spin_unlock_irq(&curr->pi_lock);
886}
887
888/*
889 * We need to check the following states:
890 *
891 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
892 *
893 * [1] NULL | --- | --- | 0 | 0/1 | Valid
894 * [2] NULL | --- | --- | >0 | 0/1 | Valid
895 *
896 * [3] Found | NULL | -- | Any | 0/1 | Invalid
897 *
898 * [4] Found | Found | NULL | 0 | 1 | Valid
899 * [5] Found | Found | NULL | >0 | 1 | Invalid
900 *
901 * [6] Found | Found | task | 0 | 1 | Valid
902 *
903 * [7] Found | Found | NULL | Any | 0 | Invalid
904 *
905 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
906 * [9] Found | Found | task | 0 | 0 | Invalid
907 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
908 *
909 * [1] Indicates that the kernel can acquire the futex atomically. We
910 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
911 *
912 * [2] Valid, if TID does not belong to a kernel thread. If no matching
913 * thread is found then it indicates that the owner TID has died.
914 *
915 * [3] Invalid. The waiter is queued on a non PI futex
916 *
917 * [4] Valid state after exit_robust_list(), which sets the user space
918 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
919 *
920 * [5] The user space value got manipulated between exit_robust_list()
921 * and exit_pi_state_list()
922 *
923 * [6] Valid state after exit_pi_state_list() which sets the new owner in
924 * the pi_state but cannot access the user space value.
925 *
926 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
927 *
928 * [8] Owner and user space value match
929 *
930 * [9] There is no transient state which sets the user space TID to 0
931 * except exit_robust_list(), but this is indicated by the
932 * FUTEX_OWNER_DIED bit. See [4]
933 *
934 * [10] There is no transient state which leaves owner and user space
935 * TID out of sync.
936 */
937
938/*
939 * Validate that the existing waiter has a pi_state and sanity check
940 * the pi_state against the user space value. If correct, attach to
941 * it.
942 */
943static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
944 struct futex_pi_state **ps)
945{
946 pid_t pid = uval & FUTEX_TID_MASK;
947
948 /*
949 * Userspace might have messed up non-PI and PI futexes [3]
950 */
951 if (unlikely(!pi_state))
952 return -EINVAL;
953
954 WARN_ON(!atomic_read(&pi_state->refcount));
955
956 /*
957 * Handle the owner died case:
958 */
959 if (uval & FUTEX_OWNER_DIED) {
960 /*
961 * exit_pi_state_list sets owner to NULL and wakes the
962 * topmost waiter. The task which acquires the
963 * pi_state->rt_mutex will fixup owner.
964 */
965 if (!pi_state->owner) {
966 /*
967 * No pi state owner, but the user space TID
968 * is not 0. Inconsistent state. [5]
969 */
970 if (pid)
971 return -EINVAL;
972 /*
973 * Take a ref on the state and return success. [4]
974 */
975 goto out_state;
976 }
977
978 /*
979 * If TID is 0, then either the dying owner has not
980 * yet executed exit_pi_state_list() or some waiter
981 * acquired the rtmutex in the pi state, but did not
982 * yet fixup the TID in user space.
983 *
984 * Take a ref on the state and return success. [6]
985 */
986 if (!pid)
987 goto out_state;
988 } else {
989 /*
990 * If the owner died bit is not set, then the pi_state
991 * must have an owner. [7]
992 */
993 if (!pi_state->owner)
994 return -EINVAL;
995 }
996
997 /*
998 * Bail out if user space manipulated the futex value. If pi
999 * state exists then the owner TID must be the same as the
1000 * user space TID. [9/10]
1001 */
1002 if (pid != task_pid_vnr(pi_state->owner))
1003 return -EINVAL;
1004out_state:
1005 atomic_inc(&pi_state->refcount);
1006 *ps = pi_state;
1007 return 0;
1008}
1009
1010/*
1011 * Lookup the task for the TID provided from user space and attach to
1012 * it after doing proper sanity checks.
1013 */
1014static int attach_to_pi_owner(u32 uval, union futex_key *key,
1015 struct futex_pi_state **ps)
1016{
1017 pid_t pid = uval & FUTEX_TID_MASK;
1018 struct futex_pi_state *pi_state;
1019 struct task_struct *p;
1020
1021 /*
1022 * We are the first waiter - try to look up the real owner and attach
1023 * the new pi_state to it, but bail out when TID = 0 [1]
1024 */
1025 if (!pid)
1026 return -ESRCH;
1027 p = futex_find_get_task(pid);
1028 if (!p)
1029 return -ESRCH;
1030
1031 if (unlikely(p->flags & PF_KTHREAD)) {
1032 put_task_struct(p);
1033 return -EPERM;
1034 }
1035
1036 /*
1037 * We need to look at the task state flags to figure out,
1038 * whether the task is exiting. To protect against the do_exit
1039 * change of the task flags, we do this protected by
1040 * p->pi_lock:
1041 */
1042 raw_spin_lock_irq(&p->pi_lock);
1043 if (unlikely(p->flags & PF_EXITING)) {
1044 /*
1045 * The task is on the way out. When PF_EXITPIDONE is
1046 * set, we know that the task has finished the
1047 * cleanup:
1048 */
1049 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1050
1051 raw_spin_unlock_irq(&p->pi_lock);
1052 put_task_struct(p);
1053 return ret;
1054 }
1055
1056 /*
1057 * No existing pi state. First waiter. [2]
1058 */
1059 pi_state = alloc_pi_state();
1060
1061 /*
1062 * Initialize the pi_mutex in locked state and make @p
1063 * the owner of it:
1064 */
1065 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1066
1067 /* Store the key for possible exit cleanups: */
1068 pi_state->key = *key;
1069
1070 WARN_ON(!list_empty(&pi_state->list));
1071 list_add(&pi_state->list, &p->pi_state_list);
1072 pi_state->owner = p;
1073 raw_spin_unlock_irq(&p->pi_lock);
1074
1075 put_task_struct(p);
1076
1077 *ps = pi_state;
1078
1079 return 0;
1080}
1081
1082static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
1083 union futex_key *key, struct futex_pi_state **ps)
1084{
1085 struct futex_q *match = futex_top_waiter(hb, key);
1086
1087 /*
1088 * If there is a waiter on that futex, validate it and
1089 * attach to the pi_state when the validation succeeds.
1090 */
1091 if (match)
1092 return attach_to_pi_state(uval, match->pi_state, ps);
1093
1094 /*
1095 * We are the first waiter - try to look up the owner based on
1096 * @uval and attach to it.
1097 */
1098 return attach_to_pi_owner(uval, key, ps);
1099}
1100
1101static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1102{
1103 u32 uninitialized_var(curval);
1104
1105 if (unlikely(should_fail_futex(true)))
1106 return -EFAULT;
1107
1108 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1109 return -EFAULT;
1110
1111 /*If user space value changed, let the caller retry */
1112 return curval != uval ? -EAGAIN : 0;
1113}
1114
1115/**
1116 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1117 * @uaddr: the pi futex user address
1118 * @hb: the pi futex hash bucket
1119 * @key: the futex key associated with uaddr and hb
1120 * @ps: the pi_state pointer where we store the result of the
1121 * lookup
1122 * @task: the task to perform the atomic lock work for. This will
1123 * be "current" except in the case of requeue pi.
1124 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1125 *
1126 * Return:
1127 * 0 - ready to wait;
1128 * 1 - acquired the lock;
1129 * <0 - error
1130 *
1131 * The hb->lock and futex_key refs shall be held by the caller.
1132 */
1133static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1134 union futex_key *key,
1135 struct futex_pi_state **ps,
1136 struct task_struct *task, int set_waiters)
1137{
1138 u32 uval, newval, vpid = task_pid_vnr(task);
1139 struct futex_q *match;
1140 int ret;
1141
1142 /*
1143 * Read the user space value first so we can validate a few
1144 * things before proceeding further.
1145 */
1146 if (get_futex_value_locked(&uval, uaddr))
1147 return -EFAULT;
1148
1149 if (unlikely(should_fail_futex(true)))
1150 return -EFAULT;
1151
1152 /*
1153 * Detect deadlocks.
1154 */
1155 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1156 return -EDEADLK;
1157
1158 if ((unlikely(should_fail_futex(true))))
1159 return -EDEADLK;
1160
1161 /*
1162 * Lookup existing state first. If it exists, try to attach to
1163 * its pi_state.
1164 */
1165 match = futex_top_waiter(hb, key);
1166 if (match)
1167 return attach_to_pi_state(uval, match->pi_state, ps);
1168
1169 /*
1170 * No waiter and user TID is 0. We are here because the
1171 * waiters or the owner died bit is set or called from
1172 * requeue_cmp_pi or for whatever reason something took the
1173 * syscall.
1174 */
1175 if (!(uval & FUTEX_TID_MASK)) {
1176 /*
1177 * We take over the futex. No other waiters and the user space
1178 * TID is 0. We preserve the owner died bit.
1179 */
1180 newval = uval & FUTEX_OWNER_DIED;
1181 newval |= vpid;
1182
1183 /* The futex requeue_pi code can enforce the waiters bit */
1184 if (set_waiters)
1185 newval |= FUTEX_WAITERS;
1186
1187 ret = lock_pi_update_atomic(uaddr, uval, newval);
1188 /* If the take over worked, return 1 */
1189 return ret < 0 ? ret : 1;
1190 }
1191
1192 /*
1193 * First waiter. Set the waiters bit before attaching ourself to
1194 * the owner. If owner tries to unlock, it will be forced into
1195 * the kernel and blocked on hb->lock.
1196 */
1197 newval = uval | FUTEX_WAITERS;
1198 ret = lock_pi_update_atomic(uaddr, uval, newval);
1199 if (ret)
1200 return ret;
1201 /*
1202 * If the update of the user space value succeeded, we try to
1203 * attach to the owner. If that fails, no harm done, we only
1204 * set the FUTEX_WAITERS bit in the user space variable.
1205 */
1206 return attach_to_pi_owner(uval, key, ps);
1207}
1208
1209/**
1210 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1211 * @q: The futex_q to unqueue
1212 *
1213 * The q->lock_ptr must not be NULL and must be held by the caller.
1214 */
1215static void __unqueue_futex(struct futex_q *q)
1216{
1217 struct futex_hash_bucket *hb;
1218
1219 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1220 || WARN_ON(plist_node_empty(&q->list)))
1221 return;
1222
1223 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1224 plist_del(&q->list, &hb->chain);
1225 hb_waiters_dec(hb);
1226}
1227
1228/*
1229 * The hash bucket lock must be held when this is called.
1230 * Afterwards, the futex_q must not be accessed. Callers
1231 * must ensure to later call wake_up_q() for the actual
1232 * wakeups to occur.
1233 */
1234static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1235{
1236 struct task_struct *p = q->task;
1237
1238 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1239 return;
1240
1241 /*
1242 * Queue the task for later wakeup for after we've released
1243 * the hb->lock. wake_q_add() grabs reference to p.
1244 */
1245 wake_q_add(wake_q, p);
1246 __unqueue_futex(q);
1247 /*
1248 * The waiting task can free the futex_q as soon as
1249 * q->lock_ptr = NULL is written, without taking any locks. A
1250 * memory barrier is required here to prevent the following
1251 * store to lock_ptr from getting ahead of the plist_del.
1252 */
1253 smp_wmb();
1254 q->lock_ptr = NULL;
1255}
1256
1257static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1258 struct futex_hash_bucket *hb)
1259{
1260 struct task_struct *new_owner;
1261 struct futex_pi_state *pi_state = this->pi_state;
1262 u32 uninitialized_var(curval), newval;
1263 WAKE_Q(wake_q);
1264 bool deboost;
1265 int ret = 0;
1266
1267 if (!pi_state)
1268 return -EINVAL;
1269
1270 /*
1271 * If current does not own the pi_state then the futex is
1272 * inconsistent and user space fiddled with the futex value.
1273 */
1274 if (pi_state->owner != current)
1275 return -EINVAL;
1276
1277 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1278 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1279
1280 /*
1281 * It is possible that the next waiter (the one that brought
1282 * this owner to the kernel) timed out and is no longer
1283 * waiting on the lock.
1284 */
1285 if (!new_owner)
1286 new_owner = this->task;
1287
1288 /*
1289 * We pass it to the next owner. The WAITERS bit is always
1290 * kept enabled while there is PI state around. We cleanup the
1291 * owner died bit, because we are the owner.
1292 */
1293 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1294
1295 if (unlikely(should_fail_futex(true)))
1296 ret = -EFAULT;
1297
1298 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1299 ret = -EFAULT;
1300 } else if (curval != uval) {
1301 /*
1302 * If a unconditional UNLOCK_PI operation (user space did not
1303 * try the TID->0 transition) raced with a waiter setting the
1304 * FUTEX_WAITERS flag between get_user() and locking the hash
1305 * bucket lock, retry the operation.
1306 */
1307 if ((FUTEX_TID_MASK & curval) == uval)
1308 ret = -EAGAIN;
1309 else
1310 ret = -EINVAL;
1311 }
1312 if (ret) {
1313 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1314 return ret;
1315 }
1316
1317 raw_spin_lock(&pi_state->owner->pi_lock);
1318 WARN_ON(list_empty(&pi_state->list));
1319 list_del_init(&pi_state->list);
1320 raw_spin_unlock(&pi_state->owner->pi_lock);
1321
1322 raw_spin_lock(&new_owner->pi_lock);
1323 WARN_ON(!list_empty(&pi_state->list));
1324 list_add(&pi_state->list, &new_owner->pi_state_list);
1325 pi_state->owner = new_owner;
1326 raw_spin_unlock(&new_owner->pi_lock);
1327
1328 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1329
1330 deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1331
1332 /*
1333 * First unlock HB so the waiter does not spin on it once he got woken
1334 * up. Second wake up the waiter before the priority is adjusted. If we
1335 * deboost first (and lose our higher priority), then the task might get
1336 * scheduled away before the wake up can take place.
1337 */
1338 spin_unlock(&hb->lock);
1339 wake_up_q(&wake_q);
1340 if (deboost)
1341 rt_mutex_adjust_prio(current);
1342
1343 return 0;
1344}
1345
1346/*
1347 * Express the locking dependencies for lockdep:
1348 */
1349static inline void
1350double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1351{
1352 if (hb1 <= hb2) {
1353 spin_lock(&hb1->lock);
1354 if (hb1 < hb2)
1355 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1356 } else { /* hb1 > hb2 */
1357 spin_lock(&hb2->lock);
1358 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1359 }
1360}
1361
1362static inline void
1363double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1364{
1365 spin_unlock(&hb1->lock);
1366 if (hb1 != hb2)
1367 spin_unlock(&hb2->lock);
1368}
1369
1370/*
1371 * Wake up waiters matching bitset queued on this futex (uaddr).
1372 */
1373static int
1374futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1375{
1376 struct futex_hash_bucket *hb;
1377 struct futex_q *this, *next;
1378 union futex_key key = FUTEX_KEY_INIT;
1379 int ret;
1380 WAKE_Q(wake_q);
1381
1382 if (!bitset)
1383 return -EINVAL;
1384
1385 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1386 if (unlikely(ret != 0))
1387 goto out;
1388
1389 hb = hash_futex(&key);
1390
1391 /* Make sure we really have tasks to wakeup */
1392 if (!hb_waiters_pending(hb))
1393 goto out_put_key;
1394
1395 spin_lock(&hb->lock);
1396
1397 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1398 if (match_futex (&this->key, &key)) {
1399 if (this->pi_state || this->rt_waiter) {
1400 ret = -EINVAL;
1401 break;
1402 }
1403
1404 /* Check if one of the bits is set in both bitsets */
1405 if (!(this->bitset & bitset))
1406 continue;
1407
1408 mark_wake_futex(&wake_q, this);
1409 if (++ret >= nr_wake)
1410 break;
1411 }
1412 }
1413
1414 spin_unlock(&hb->lock);
1415 wake_up_q(&wake_q);
1416out_put_key:
1417 put_futex_key(&key);
1418out:
1419 return ret;
1420}
1421
1422/*
1423 * Wake up all waiters hashed on the physical page that is mapped
1424 * to this virtual address:
1425 */
1426static int
1427futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1428 int nr_wake, int nr_wake2, int op)
1429{
1430 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1431 struct futex_hash_bucket *hb1, *hb2;
1432 struct futex_q *this, *next;
1433 int ret, op_ret;
1434 WAKE_Q(wake_q);
1435
1436retry:
1437 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1438 if (unlikely(ret != 0))
1439 goto out;
1440 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1441 if (unlikely(ret != 0))
1442 goto out_put_key1;
1443
1444 hb1 = hash_futex(&key1);
1445 hb2 = hash_futex(&key2);
1446
1447retry_private:
1448 double_lock_hb(hb1, hb2);
1449 op_ret = futex_atomic_op_inuser(op, uaddr2);
1450 if (unlikely(op_ret < 0)) {
1451
1452 double_unlock_hb(hb1, hb2);
1453
1454#ifndef CONFIG_MMU
1455 /*
1456 * we don't get EFAULT from MMU faults if we don't have an MMU,
1457 * but we might get them from range checking
1458 */
1459 ret = op_ret;
1460 goto out_put_keys;
1461#endif
1462
1463 if (unlikely(op_ret != -EFAULT)) {
1464 ret = op_ret;
1465 goto out_put_keys;
1466 }
1467
1468 ret = fault_in_user_writeable(uaddr2);
1469 if (ret)
1470 goto out_put_keys;
1471
1472 if (!(flags & FLAGS_SHARED))
1473 goto retry_private;
1474
1475 put_futex_key(&key2);
1476 put_futex_key(&key1);
1477 goto retry;
1478 }
1479
1480 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1481 if (match_futex (&this->key, &key1)) {
1482 if (this->pi_state || this->rt_waiter) {
1483 ret = -EINVAL;
1484 goto out_unlock;
1485 }
1486 mark_wake_futex(&wake_q, this);
1487 if (++ret >= nr_wake)
1488 break;
1489 }
1490 }
1491
1492 if (op_ret > 0) {
1493 op_ret = 0;
1494 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1495 if (match_futex (&this->key, &key2)) {
1496 if (this->pi_state || this->rt_waiter) {
1497 ret = -EINVAL;
1498 goto out_unlock;
1499 }
1500 mark_wake_futex(&wake_q, this);
1501 if (++op_ret >= nr_wake2)
1502 break;
1503 }
1504 }
1505 ret += op_ret;
1506 }
1507
1508out_unlock:
1509 double_unlock_hb(hb1, hb2);
1510 wake_up_q(&wake_q);
1511out_put_keys:
1512 put_futex_key(&key2);
1513out_put_key1:
1514 put_futex_key(&key1);
1515out:
1516 return ret;
1517}
1518
1519/**
1520 * requeue_futex() - Requeue a futex_q from one hb to another
1521 * @q: the futex_q to requeue
1522 * @hb1: the source hash_bucket
1523 * @hb2: the target hash_bucket
1524 * @key2: the new key for the requeued futex_q
1525 */
1526static inline
1527void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1528 struct futex_hash_bucket *hb2, union futex_key *key2)
1529{
1530
1531 /*
1532 * If key1 and key2 hash to the same bucket, no need to
1533 * requeue.
1534 */
1535 if (likely(&hb1->chain != &hb2->chain)) {
1536 plist_del(&q->list, &hb1->chain);
1537 hb_waiters_dec(hb1);
1538 hb_waiters_inc(hb2);
1539 plist_add(&q->list, &hb2->chain);
1540 q->lock_ptr = &hb2->lock;
1541 }
1542 get_futex_key_refs(key2);
1543 q->key = *key2;
1544}
1545
1546/**
1547 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1548 * @q: the futex_q
1549 * @key: the key of the requeue target futex
1550 * @hb: the hash_bucket of the requeue target futex
1551 *
1552 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1553 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1554 * to the requeue target futex so the waiter can detect the wakeup on the right
1555 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1556 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1557 * to protect access to the pi_state to fixup the owner later. Must be called
1558 * with both q->lock_ptr and hb->lock held.
1559 */
1560static inline
1561void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1562 struct futex_hash_bucket *hb)
1563{
1564 get_futex_key_refs(key);
1565 q->key = *key;
1566
1567 __unqueue_futex(q);
1568
1569 WARN_ON(!q->rt_waiter);
1570 q->rt_waiter = NULL;
1571
1572 q->lock_ptr = &hb->lock;
1573
1574 wake_up_state(q->task, TASK_NORMAL);
1575}
1576
1577/**
1578 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1579 * @pifutex: the user address of the to futex
1580 * @hb1: the from futex hash bucket, must be locked by the caller
1581 * @hb2: the to futex hash bucket, must be locked by the caller
1582 * @key1: the from futex key
1583 * @key2: the to futex key
1584 * @ps: address to store the pi_state pointer
1585 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1586 *
1587 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1588 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1589 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1590 * hb1 and hb2 must be held by the caller.
1591 *
1592 * Return:
1593 * 0 - failed to acquire the lock atomically;
1594 * >0 - acquired the lock, return value is vpid of the top_waiter
1595 * <0 - error
1596 */
1597static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1598 struct futex_hash_bucket *hb1,
1599 struct futex_hash_bucket *hb2,
1600 union futex_key *key1, union futex_key *key2,
1601 struct futex_pi_state **ps, int set_waiters)
1602{
1603 struct futex_q *top_waiter = NULL;
1604 u32 curval;
1605 int ret, vpid;
1606
1607 if (get_futex_value_locked(&curval, pifutex))
1608 return -EFAULT;
1609
1610 if (unlikely(should_fail_futex(true)))
1611 return -EFAULT;
1612
1613 /*
1614 * Find the top_waiter and determine if there are additional waiters.
1615 * If the caller intends to requeue more than 1 waiter to pifutex,
1616 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1617 * as we have means to handle the possible fault. If not, don't set
1618 * the bit unecessarily as it will force the subsequent unlock to enter
1619 * the kernel.
1620 */
1621 top_waiter = futex_top_waiter(hb1, key1);
1622
1623 /* There are no waiters, nothing for us to do. */
1624 if (!top_waiter)
1625 return 0;
1626
1627 /* Ensure we requeue to the expected futex. */
1628 if (!match_futex(top_waiter->requeue_pi_key, key2))
1629 return -EINVAL;
1630
1631 /*
1632 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1633 * the contended case or if set_waiters is 1. The pi_state is returned
1634 * in ps in contended cases.
1635 */
1636 vpid = task_pid_vnr(top_waiter->task);
1637 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1638 set_waiters);
1639 if (ret == 1) {
1640 requeue_pi_wake_futex(top_waiter, key2, hb2);
1641 return vpid;
1642 }
1643 return ret;
1644}
1645
1646/**
1647 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1648 * @uaddr1: source futex user address
1649 * @flags: futex flags (FLAGS_SHARED, etc.)
1650 * @uaddr2: target futex user address
1651 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1652 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1653 * @cmpval: @uaddr1 expected value (or %NULL)
1654 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1655 * pi futex (pi to pi requeue is not supported)
1656 *
1657 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1658 * uaddr2 atomically on behalf of the top waiter.
1659 *
1660 * Return:
1661 * >=0 - on success, the number of tasks requeued or woken;
1662 * <0 - on error
1663 */
1664static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1665 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1666 u32 *cmpval, int requeue_pi)
1667{
1668 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1669 int drop_count = 0, task_count = 0, ret;
1670 struct futex_pi_state *pi_state = NULL;
1671 struct futex_hash_bucket *hb1, *hb2;
1672 struct futex_q *this, *next;
1673 WAKE_Q(wake_q);
1674
1675 if (requeue_pi) {
1676 /*
1677 * Requeue PI only works on two distinct uaddrs. This
1678 * check is only valid for private futexes. See below.
1679 */
1680 if (uaddr1 == uaddr2)
1681 return -EINVAL;
1682
1683 /*
1684 * requeue_pi requires a pi_state, try to allocate it now
1685 * without any locks in case it fails.
1686 */
1687 if (refill_pi_state_cache())
1688 return -ENOMEM;
1689 /*
1690 * requeue_pi must wake as many tasks as it can, up to nr_wake
1691 * + nr_requeue, since it acquires the rt_mutex prior to
1692 * returning to userspace, so as to not leave the rt_mutex with
1693 * waiters and no owner. However, second and third wake-ups
1694 * cannot be predicted as they involve race conditions with the
1695 * first wake and a fault while looking up the pi_state. Both
1696 * pthread_cond_signal() and pthread_cond_broadcast() should
1697 * use nr_wake=1.
1698 */
1699 if (nr_wake != 1)
1700 return -EINVAL;
1701 }
1702
1703retry:
1704 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1705 if (unlikely(ret != 0))
1706 goto out;
1707 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1708 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1709 if (unlikely(ret != 0))
1710 goto out_put_key1;
1711
1712 /*
1713 * The check above which compares uaddrs is not sufficient for
1714 * shared futexes. We need to compare the keys:
1715 */
1716 if (requeue_pi && match_futex(&key1, &key2)) {
1717 ret = -EINVAL;
1718 goto out_put_keys;
1719 }
1720
1721 hb1 = hash_futex(&key1);
1722 hb2 = hash_futex(&key2);
1723
1724retry_private:
1725 hb_waiters_inc(hb2);
1726 double_lock_hb(hb1, hb2);
1727
1728 if (likely(cmpval != NULL)) {
1729 u32 curval;
1730
1731 ret = get_futex_value_locked(&curval, uaddr1);
1732
1733 if (unlikely(ret)) {
1734 double_unlock_hb(hb1, hb2);
1735 hb_waiters_dec(hb2);
1736
1737 ret = get_user(curval, uaddr1);
1738 if (ret)
1739 goto out_put_keys;
1740
1741 if (!(flags & FLAGS_SHARED))
1742 goto retry_private;
1743
1744 put_futex_key(&key2);
1745 put_futex_key(&key1);
1746 goto retry;
1747 }
1748 if (curval != *cmpval) {
1749 ret = -EAGAIN;
1750 goto out_unlock;
1751 }
1752 }
1753
1754 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1755 /*
1756 * Attempt to acquire uaddr2 and wake the top waiter. If we
1757 * intend to requeue waiters, force setting the FUTEX_WAITERS
1758 * bit. We force this here where we are able to easily handle
1759 * faults rather in the requeue loop below.
1760 */
1761 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1762 &key2, &pi_state, nr_requeue);
1763
1764 /*
1765 * At this point the top_waiter has either taken uaddr2 or is
1766 * waiting on it. If the former, then the pi_state will not
1767 * exist yet, look it up one more time to ensure we have a
1768 * reference to it. If the lock was taken, ret contains the
1769 * vpid of the top waiter task.
1770 * If the lock was not taken, we have pi_state and an initial
1771 * refcount on it. In case of an error we have nothing.
1772 */
1773 if (ret > 0) {
1774 WARN_ON(pi_state);
1775 drop_count++;
1776 task_count++;
1777 /*
1778 * If we acquired the lock, then the user space value
1779 * of uaddr2 should be vpid. It cannot be changed by
1780 * the top waiter as it is blocked on hb2 lock if it
1781 * tries to do so. If something fiddled with it behind
1782 * our back the pi state lookup might unearth it. So
1783 * we rather use the known value than rereading and
1784 * handing potential crap to lookup_pi_state.
1785 *
1786 * If that call succeeds then we have pi_state and an
1787 * initial refcount on it.
1788 */
1789 ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1790 }
1791
1792 switch (ret) {
1793 case 0:
1794 /* We hold a reference on the pi state. */
1795 break;
1796
1797 /* If the above failed, then pi_state is NULL */
1798 case -EFAULT:
1799 double_unlock_hb(hb1, hb2);
1800 hb_waiters_dec(hb2);
1801 put_futex_key(&key2);
1802 put_futex_key(&key1);
1803 ret = fault_in_user_writeable(uaddr2);
1804 if (!ret)
1805 goto retry;
1806 goto out;
1807 case -EAGAIN:
1808 /*
1809 * Two reasons for this:
1810 * - Owner is exiting and we just wait for the
1811 * exit to complete.
1812 * - The user space value changed.
1813 */
1814 double_unlock_hb(hb1, hb2);
1815 hb_waiters_dec(hb2);
1816 put_futex_key(&key2);
1817 put_futex_key(&key1);
1818 cond_resched();
1819 goto retry;
1820 default:
1821 goto out_unlock;
1822 }
1823 }
1824
1825 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1826 if (task_count - nr_wake >= nr_requeue)
1827 break;
1828
1829 if (!match_futex(&this->key, &key1))
1830 continue;
1831
1832 /*
1833 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1834 * be paired with each other and no other futex ops.
1835 *
1836 * We should never be requeueing a futex_q with a pi_state,
1837 * which is awaiting a futex_unlock_pi().
1838 */
1839 if ((requeue_pi && !this->rt_waiter) ||
1840 (!requeue_pi && this->rt_waiter) ||
1841 this->pi_state) {
1842 ret = -EINVAL;
1843 break;
1844 }
1845
1846 /*
1847 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1848 * lock, we already woke the top_waiter. If not, it will be
1849 * woken by futex_unlock_pi().
1850 */
1851 if (++task_count <= nr_wake && !requeue_pi) {
1852 mark_wake_futex(&wake_q, this);
1853 continue;
1854 }
1855
1856 /* Ensure we requeue to the expected futex for requeue_pi. */
1857 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1858 ret = -EINVAL;
1859 break;
1860 }
1861
1862 /*
1863 * Requeue nr_requeue waiters and possibly one more in the case
1864 * of requeue_pi if we couldn't acquire the lock atomically.
1865 */
1866 if (requeue_pi) {
1867 /*
1868 * Prepare the waiter to take the rt_mutex. Take a
1869 * refcount on the pi_state and store the pointer in
1870 * the futex_q object of the waiter.
1871 */
1872 atomic_inc(&pi_state->refcount);
1873 this->pi_state = pi_state;
1874 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1875 this->rt_waiter,
1876 this->task);
1877 if (ret == 1) {
1878 /*
1879 * We got the lock. We do neither drop the
1880 * refcount on pi_state nor clear
1881 * this->pi_state because the waiter needs the
1882 * pi_state for cleaning up the user space
1883 * value. It will drop the refcount after
1884 * doing so.
1885 */
1886 requeue_pi_wake_futex(this, &key2, hb2);
1887 drop_count++;
1888 continue;
1889 } else if (ret) {
1890 /*
1891 * rt_mutex_start_proxy_lock() detected a
1892 * potential deadlock when we tried to queue
1893 * that waiter. Drop the pi_state reference
1894 * which we took above and remove the pointer
1895 * to the state from the waiters futex_q
1896 * object.
1897 */
1898 this->pi_state = NULL;
1899 put_pi_state(pi_state);
1900 /*
1901 * We stop queueing more waiters and let user
1902 * space deal with the mess.
1903 */
1904 break;
1905 }
1906 }
1907 requeue_futex(this, hb1, hb2, &key2);
1908 drop_count++;
1909 }
1910
1911 /*
1912 * We took an extra initial reference to the pi_state either
1913 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
1914 * need to drop it here again.
1915 */
1916 put_pi_state(pi_state);
1917
1918out_unlock:
1919 double_unlock_hb(hb1, hb2);
1920 wake_up_q(&wake_q);
1921 hb_waiters_dec(hb2);
1922
1923 /*
1924 * drop_futex_key_refs() must be called outside the spinlocks. During
1925 * the requeue we moved futex_q's from the hash bucket at key1 to the
1926 * one at key2 and updated their key pointer. We no longer need to
1927 * hold the references to key1.
1928 */
1929 while (--drop_count >= 0)
1930 drop_futex_key_refs(&key1);
1931
1932out_put_keys:
1933 put_futex_key(&key2);
1934out_put_key1:
1935 put_futex_key(&key1);
1936out:
1937 return ret ? ret : task_count;
1938}
1939
1940/* The key must be already stored in q->key. */
1941static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1942 __acquires(&hb->lock)
1943{
1944 struct futex_hash_bucket *hb;
1945
1946 hb = hash_futex(&q->key);
1947
1948 /*
1949 * Increment the counter before taking the lock so that
1950 * a potential waker won't miss a to-be-slept task that is
1951 * waiting for the spinlock. This is safe as all queue_lock()
1952 * users end up calling queue_me(). Similarly, for housekeeping,
1953 * decrement the counter at queue_unlock() when some error has
1954 * occurred and we don't end up adding the task to the list.
1955 */
1956 hb_waiters_inc(hb);
1957
1958 q->lock_ptr = &hb->lock;
1959
1960 spin_lock(&hb->lock); /* implies smp_mb(); (A) */
1961 return hb;
1962}
1963
1964static inline void
1965queue_unlock(struct futex_hash_bucket *hb)
1966 __releases(&hb->lock)
1967{
1968 spin_unlock(&hb->lock);
1969 hb_waiters_dec(hb);
1970}
1971
1972/**
1973 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1974 * @q: The futex_q to enqueue
1975 * @hb: The destination hash bucket
1976 *
1977 * The hb->lock must be held by the caller, and is released here. A call to
1978 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1979 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1980 * or nothing if the unqueue is done as part of the wake process and the unqueue
1981 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1982 * an example).
1983 */
1984static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1985 __releases(&hb->lock)
1986{
1987 int prio;
1988
1989 /*
1990 * The priority used to register this element is
1991 * - either the real thread-priority for the real-time threads
1992 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1993 * - or MAX_RT_PRIO for non-RT threads.
1994 * Thus, all RT-threads are woken first in priority order, and
1995 * the others are woken last, in FIFO order.
1996 */
1997 prio = min(current->normal_prio, MAX_RT_PRIO);
1998
1999 plist_node_init(&q->list, prio);
2000 plist_add(&q->list, &hb->chain);
2001 q->task = current;
2002 spin_unlock(&hb->lock);
2003}
2004
2005/**
2006 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2007 * @q: The futex_q to unqueue
2008 *
2009 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2010 * be paired with exactly one earlier call to queue_me().
2011 *
2012 * Return:
2013 * 1 - if the futex_q was still queued (and we removed unqueued it);
2014 * 0 - if the futex_q was already removed by the waking thread
2015 */
2016static int unqueue_me(struct futex_q *q)
2017{
2018 spinlock_t *lock_ptr;
2019 int ret = 0;
2020
2021 /* In the common case we don't take the spinlock, which is nice. */
2022retry:
2023 /*
2024 * q->lock_ptr can change between this read and the following spin_lock.
2025 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2026 * optimizing lock_ptr out of the logic below.
2027 */
2028 lock_ptr = READ_ONCE(q->lock_ptr);
2029 if (lock_ptr != NULL) {
2030 spin_lock(lock_ptr);
2031 /*
2032 * q->lock_ptr can change between reading it and
2033 * spin_lock(), causing us to take the wrong lock. This
2034 * corrects the race condition.
2035 *
2036 * Reasoning goes like this: if we have the wrong lock,
2037 * q->lock_ptr must have changed (maybe several times)
2038 * between reading it and the spin_lock(). It can
2039 * change again after the spin_lock() but only if it was
2040 * already changed before the spin_lock(). It cannot,
2041 * however, change back to the original value. Therefore
2042 * we can detect whether we acquired the correct lock.
2043 */
2044 if (unlikely(lock_ptr != q->lock_ptr)) {
2045 spin_unlock(lock_ptr);
2046 goto retry;
2047 }
2048 __unqueue_futex(q);
2049
2050 BUG_ON(q->pi_state);
2051
2052 spin_unlock(lock_ptr);
2053 ret = 1;
2054 }
2055
2056 drop_futex_key_refs(&q->key);
2057 return ret;
2058}
2059
2060/*
2061 * PI futexes can not be requeued and must remove themself from the
2062 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2063 * and dropped here.
2064 */
2065static void unqueue_me_pi(struct futex_q *q)
2066 __releases(q->lock_ptr)
2067{
2068 __unqueue_futex(q);
2069
2070 BUG_ON(!q->pi_state);
2071 put_pi_state(q->pi_state);
2072 q->pi_state = NULL;
2073
2074 spin_unlock(q->lock_ptr);
2075}
2076
2077/*
2078 * Fixup the pi_state owner with the new owner.
2079 *
2080 * Must be called with hash bucket lock held and mm->sem held for non
2081 * private futexes.
2082 */
2083static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2084 struct task_struct *newowner)
2085{
2086 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2087 struct futex_pi_state *pi_state = q->pi_state;
2088 struct task_struct *oldowner = pi_state->owner;
2089 u32 uval, uninitialized_var(curval), newval;
2090 int ret;
2091
2092 /* Owner died? */
2093 if (!pi_state->owner)
2094 newtid |= FUTEX_OWNER_DIED;
2095
2096 /*
2097 * We are here either because we stole the rtmutex from the
2098 * previous highest priority waiter or we are the highest priority
2099 * waiter but failed to get the rtmutex the first time.
2100 * We have to replace the newowner TID in the user space variable.
2101 * This must be atomic as we have to preserve the owner died bit here.
2102 *
2103 * Note: We write the user space value _before_ changing the pi_state
2104 * because we can fault here. Imagine swapped out pages or a fork
2105 * that marked all the anonymous memory readonly for cow.
2106 *
2107 * Modifying pi_state _before_ the user space value would
2108 * leave the pi_state in an inconsistent state when we fault
2109 * here, because we need to drop the hash bucket lock to
2110 * handle the fault. This might be observed in the PID check
2111 * in lookup_pi_state.
2112 */
2113retry:
2114 if (get_futex_value_locked(&uval, uaddr))
2115 goto handle_fault;
2116
2117 while (1) {
2118 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2119
2120 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2121 goto handle_fault;
2122 if (curval == uval)
2123 break;
2124 uval = curval;
2125 }
2126
2127 /*
2128 * We fixed up user space. Now we need to fix the pi_state
2129 * itself.
2130 */
2131 if (pi_state->owner != NULL) {
2132 raw_spin_lock_irq(&pi_state->owner->pi_lock);
2133 WARN_ON(list_empty(&pi_state->list));
2134 list_del_init(&pi_state->list);
2135 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
2136 }
2137
2138 pi_state->owner = newowner;
2139
2140 raw_spin_lock_irq(&newowner->pi_lock);
2141 WARN_ON(!list_empty(&pi_state->list));
2142 list_add(&pi_state->list, &newowner->pi_state_list);
2143 raw_spin_unlock_irq(&newowner->pi_lock);
2144 return 0;
2145
2146 /*
2147 * To handle the page fault we need to drop the hash bucket
2148 * lock here. That gives the other task (either the highest priority
2149 * waiter itself or the task which stole the rtmutex) the
2150 * chance to try the fixup of the pi_state. So once we are
2151 * back from handling the fault we need to check the pi_state
2152 * after reacquiring the hash bucket lock and before trying to
2153 * do another fixup. When the fixup has been done already we
2154 * simply return.
2155 */
2156handle_fault:
2157 spin_unlock(q->lock_ptr);
2158
2159 ret = fault_in_user_writeable(uaddr);
2160
2161 spin_lock(q->lock_ptr);
2162
2163 /*
2164 * Check if someone else fixed it for us:
2165 */
2166 if (pi_state->owner != oldowner)
2167 return 0;
2168
2169 if (ret)
2170 return ret;
2171
2172 goto retry;
2173}
2174
2175static long futex_wait_restart(struct restart_block *restart);
2176
2177/**
2178 * fixup_owner() - Post lock pi_state and corner case management
2179 * @uaddr: user address of the futex
2180 * @q: futex_q (contains pi_state and access to the rt_mutex)
2181 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2182 *
2183 * After attempting to lock an rt_mutex, this function is called to cleanup
2184 * the pi_state owner as well as handle race conditions that may allow us to
2185 * acquire the lock. Must be called with the hb lock held.
2186 *
2187 * Return:
2188 * 1 - success, lock taken;
2189 * 0 - success, lock not taken;
2190 * <0 - on error (-EFAULT)
2191 */
2192static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2193{
2194 struct task_struct *owner;
2195 int ret = 0;
2196
2197 if (locked) {
2198 /*
2199 * Got the lock. We might not be the anticipated owner if we
2200 * did a lock-steal - fix up the PI-state in that case:
2201 */
2202 if (q->pi_state->owner != current)
2203 ret = fixup_pi_state_owner(uaddr, q, current);
2204 goto out;
2205 }
2206
2207 /*
2208 * Catch the rare case, where the lock was released when we were on the
2209 * way back before we locked the hash bucket.
2210 */
2211 if (q->pi_state->owner == current) {
2212 /*
2213 * Try to get the rt_mutex now. This might fail as some other
2214 * task acquired the rt_mutex after we removed ourself from the
2215 * rt_mutex waiters list.
2216 */
2217 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2218 locked = 1;
2219 goto out;
2220 }
2221
2222 /*
2223 * pi_state is incorrect, some other task did a lock steal and
2224 * we returned due to timeout or signal without taking the
2225 * rt_mutex. Too late.
2226 */
2227 raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
2228 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2229 if (!owner)
2230 owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2231 raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
2232 ret = fixup_pi_state_owner(uaddr, q, owner);
2233 goto out;
2234 }
2235
2236 /*
2237 * Paranoia check. If we did not take the lock, then we should not be
2238 * the owner of the rt_mutex.
2239 */
2240 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2241 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2242 "pi-state %p\n", ret,
2243 q->pi_state->pi_mutex.owner,
2244 q->pi_state->owner);
2245
2246out:
2247 return ret ? ret : locked;
2248}
2249
2250/**
2251 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2252 * @hb: the futex hash bucket, must be locked by the caller
2253 * @q: the futex_q to queue up on
2254 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2255 */
2256static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2257 struct hrtimer_sleeper *timeout)
2258{
2259 /*
2260 * The task state is guaranteed to be set before another task can
2261 * wake it. set_current_state() is implemented using smp_store_mb() and
2262 * queue_me() calls spin_unlock() upon completion, both serializing
2263 * access to the hash list and forcing another memory barrier.
2264 */
2265 set_current_state(TASK_INTERRUPTIBLE);
2266 queue_me(q, hb);
2267
2268 /* Arm the timer */
2269 if (timeout)
2270 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2271
2272 /*
2273 * If we have been removed from the hash list, then another task
2274 * has tried to wake us, and we can skip the call to schedule().
2275 */
2276 if (likely(!plist_node_empty(&q->list))) {
2277 /*
2278 * If the timer has already expired, current will already be
2279 * flagged for rescheduling. Only call schedule if there
2280 * is no timeout, or if it has yet to expire.
2281 */
2282 if (!timeout || timeout->task)
2283 freezable_schedule();
2284 }
2285 __set_current_state(TASK_RUNNING);
2286}
2287
2288/**
2289 * futex_wait_setup() - Prepare to wait on a futex
2290 * @uaddr: the futex userspace address
2291 * @val: the expected value
2292 * @flags: futex flags (FLAGS_SHARED, etc.)
2293 * @q: the associated futex_q
2294 * @hb: storage for hash_bucket pointer to be returned to caller
2295 *
2296 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2297 * compare it with the expected value. Handle atomic faults internally.
2298 * Return with the hb lock held and a q.key reference on success, and unlocked
2299 * with no q.key reference on failure.
2300 *
2301 * Return:
2302 * 0 - uaddr contains val and hb has been locked;
2303 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2304 */
2305static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2306 struct futex_q *q, struct futex_hash_bucket **hb)
2307{
2308 u32 uval;
2309 int ret;
2310
2311 /*
2312 * Access the page AFTER the hash-bucket is locked.
2313 * Order is important:
2314 *
2315 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2316 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2317 *
2318 * The basic logical guarantee of a futex is that it blocks ONLY
2319 * if cond(var) is known to be true at the time of blocking, for
2320 * any cond. If we locked the hash-bucket after testing *uaddr, that
2321 * would open a race condition where we could block indefinitely with
2322 * cond(var) false, which would violate the guarantee.
2323 *
2324 * On the other hand, we insert q and release the hash-bucket only
2325 * after testing *uaddr. This guarantees that futex_wait() will NOT
2326 * absorb a wakeup if *uaddr does not match the desired values
2327 * while the syscall executes.
2328 */
2329retry:
2330 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2331 if (unlikely(ret != 0))
2332 return ret;
2333
2334retry_private:
2335 *hb = queue_lock(q);
2336
2337 ret = get_futex_value_locked(&uval, uaddr);
2338
2339 if (ret) {
2340 queue_unlock(*hb);
2341
2342 ret = get_user(uval, uaddr);
2343 if (ret)
2344 goto out;
2345
2346 if (!(flags & FLAGS_SHARED))
2347 goto retry_private;
2348
2349 put_futex_key(&q->key);
2350 goto retry;
2351 }
2352
2353 if (uval != val) {
2354 queue_unlock(*hb);
2355 ret = -EWOULDBLOCK;
2356 }
2357
2358out:
2359 if (ret)
2360 put_futex_key(&q->key);
2361 return ret;
2362}
2363
2364static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2365 ktime_t *abs_time, u32 bitset)
2366{
2367 struct hrtimer_sleeper timeout, *to = NULL;
2368 struct restart_block *restart;
2369 struct futex_hash_bucket *hb;
2370 struct futex_q q = futex_q_init;
2371 int ret;
2372
2373 if (!bitset)
2374 return -EINVAL;
2375 q.bitset = bitset;
2376
2377 if (abs_time) {
2378 to = &timeout;
2379
2380 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2381 CLOCK_REALTIME : CLOCK_MONOTONIC,
2382 HRTIMER_MODE_ABS);
2383 hrtimer_init_sleeper(to, current);
2384 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2385 current->timer_slack_ns);
2386 }
2387
2388retry:
2389 /*
2390 * Prepare to wait on uaddr. On success, holds hb lock and increments
2391 * q.key refs.
2392 */
2393 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2394 if (ret)
2395 goto out;
2396
2397 /* queue_me and wait for wakeup, timeout, or a signal. */
2398 futex_wait_queue_me(hb, &q, to);
2399
2400 /* If we were woken (and unqueued), we succeeded, whatever. */
2401 ret = 0;
2402 /* unqueue_me() drops q.key ref */
2403 if (!unqueue_me(&q))
2404 goto out;
2405 ret = -ETIMEDOUT;
2406 if (to && !to->task)
2407 goto out;
2408
2409 /*
2410 * We expect signal_pending(current), but we might be the
2411 * victim of a spurious wakeup as well.
2412 */
2413 if (!signal_pending(current))
2414 goto retry;
2415
2416 ret = -ERESTARTSYS;
2417 if (!abs_time)
2418 goto out;
2419
2420 restart = ¤t->restart_block;
2421 restart->fn = futex_wait_restart;
2422 restart->futex.uaddr = uaddr;
2423 restart->futex.val = val;
2424 restart->futex.time = abs_time->tv64;
2425 restart->futex.bitset = bitset;
2426 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2427
2428 ret = -ERESTART_RESTARTBLOCK;
2429
2430out:
2431 if (to) {
2432 hrtimer_cancel(&to->timer);
2433 destroy_hrtimer_on_stack(&to->timer);
2434 }
2435 return ret;
2436}
2437
2438
2439static long futex_wait_restart(struct restart_block *restart)
2440{
2441 u32 __user *uaddr = restart->futex.uaddr;
2442 ktime_t t, *tp = NULL;
2443
2444 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2445 t.tv64 = restart->futex.time;
2446 tp = &t;
2447 }
2448 restart->fn = do_no_restart_syscall;
2449
2450 return (long)futex_wait(uaddr, restart->futex.flags,
2451 restart->futex.val, tp, restart->futex.bitset);
2452}
2453
2454
2455/*
2456 * Userspace tried a 0 -> TID atomic transition of the futex value
2457 * and failed. The kernel side here does the whole locking operation:
2458 * if there are waiters then it will block as a consequence of relying
2459 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2460 * a 0 value of the futex too.).
2461 *
2462 * Also serves as futex trylock_pi()'ing, and due semantics.
2463 */
2464static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2465 ktime_t *time, int trylock)
2466{
2467 struct hrtimer_sleeper timeout, *to = NULL;
2468 struct futex_hash_bucket *hb;
2469 struct futex_q q = futex_q_init;
2470 int res, ret;
2471
2472 if (refill_pi_state_cache())
2473 return -ENOMEM;
2474
2475 if (time) {
2476 to = &timeout;
2477 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2478 HRTIMER_MODE_ABS);
2479 hrtimer_init_sleeper(to, current);
2480 hrtimer_set_expires(&to->timer, *time);
2481 }
2482
2483retry:
2484 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2485 if (unlikely(ret != 0))
2486 goto out;
2487
2488retry_private:
2489 hb = queue_lock(&q);
2490
2491 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2492 if (unlikely(ret)) {
2493 /*
2494 * Atomic work succeeded and we got the lock,
2495 * or failed. Either way, we do _not_ block.
2496 */
2497 switch (ret) {
2498 case 1:
2499 /* We got the lock. */
2500 ret = 0;
2501 goto out_unlock_put_key;
2502 case -EFAULT:
2503 goto uaddr_faulted;
2504 case -EAGAIN:
2505 /*
2506 * Two reasons for this:
2507 * - Task is exiting and we just wait for the
2508 * exit to complete.
2509 * - The user space value changed.
2510 */
2511 queue_unlock(hb);
2512 put_futex_key(&q.key);
2513 cond_resched();
2514 goto retry;
2515 default:
2516 goto out_unlock_put_key;
2517 }
2518 }
2519
2520 /*
2521 * Only actually queue now that the atomic ops are done:
2522 */
2523 queue_me(&q, hb);
2524
2525 WARN_ON(!q.pi_state);
2526 /*
2527 * Block on the PI mutex:
2528 */
2529 if (!trylock) {
2530 ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2531 } else {
2532 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2533 /* Fixup the trylock return value: */
2534 ret = ret ? 0 : -EWOULDBLOCK;
2535 }
2536
2537 spin_lock(q.lock_ptr);
2538 /*
2539 * Fixup the pi_state owner and possibly acquire the lock if we
2540 * haven't already.
2541 */
2542 res = fixup_owner(uaddr, &q, !ret);
2543 /*
2544 * If fixup_owner() returned an error, proprogate that. If it acquired
2545 * the lock, clear our -ETIMEDOUT or -EINTR.
2546 */
2547 if (res)
2548 ret = (res < 0) ? res : 0;
2549
2550 /*
2551 * If fixup_owner() faulted and was unable to handle the fault, unlock
2552 * it and return the fault to userspace.
2553 */
2554 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2555 rt_mutex_unlock(&q.pi_state->pi_mutex);
2556
2557 /* Unqueue and drop the lock */
2558 unqueue_me_pi(&q);
2559
2560 goto out_put_key;
2561
2562out_unlock_put_key:
2563 queue_unlock(hb);
2564
2565out_put_key:
2566 put_futex_key(&q.key);
2567out:
2568 if (to)
2569 destroy_hrtimer_on_stack(&to->timer);
2570 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2571
2572uaddr_faulted:
2573 queue_unlock(hb);
2574
2575 ret = fault_in_user_writeable(uaddr);
2576 if (ret)
2577 goto out_put_key;
2578
2579 if (!(flags & FLAGS_SHARED))
2580 goto retry_private;
2581
2582 put_futex_key(&q.key);
2583 goto retry;
2584}
2585
2586/*
2587 * Userspace attempted a TID -> 0 atomic transition, and failed.
2588 * This is the in-kernel slowpath: we look up the PI state (if any),
2589 * and do the rt-mutex unlock.
2590 */
2591static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2592{
2593 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2594 union futex_key key = FUTEX_KEY_INIT;
2595 struct futex_hash_bucket *hb;
2596 struct futex_q *match;
2597 int ret;
2598
2599retry:
2600 if (get_user(uval, uaddr))
2601 return -EFAULT;
2602 /*
2603 * We release only a lock we actually own:
2604 */
2605 if ((uval & FUTEX_TID_MASK) != vpid)
2606 return -EPERM;
2607
2608 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2609 if (ret)
2610 return ret;
2611
2612 hb = hash_futex(&key);
2613 spin_lock(&hb->lock);
2614
2615 /*
2616 * Check waiters first. We do not trust user space values at
2617 * all and we at least want to know if user space fiddled
2618 * with the futex value instead of blindly unlocking.
2619 */
2620 match = futex_top_waiter(hb, &key);
2621 if (match) {
2622 ret = wake_futex_pi(uaddr, uval, match, hb);
2623 /*
2624 * In case of success wake_futex_pi dropped the hash
2625 * bucket lock.
2626 */
2627 if (!ret)
2628 goto out_putkey;
2629 /*
2630 * The atomic access to the futex value generated a
2631 * pagefault, so retry the user-access and the wakeup:
2632 */
2633 if (ret == -EFAULT)
2634 goto pi_faulted;
2635 /*
2636 * A unconditional UNLOCK_PI op raced against a waiter
2637 * setting the FUTEX_WAITERS bit. Try again.
2638 */
2639 if (ret == -EAGAIN) {
2640 spin_unlock(&hb->lock);
2641 put_futex_key(&key);
2642 goto retry;
2643 }
2644 /*
2645 * wake_futex_pi has detected invalid state. Tell user
2646 * space.
2647 */
2648 goto out_unlock;
2649 }
2650
2651 /*
2652 * We have no kernel internal state, i.e. no waiters in the
2653 * kernel. Waiters which are about to queue themselves are stuck
2654 * on hb->lock. So we can safely ignore them. We do neither
2655 * preserve the WAITERS bit not the OWNER_DIED one. We are the
2656 * owner.
2657 */
2658 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
2659 goto pi_faulted;
2660
2661 /*
2662 * If uval has changed, let user space handle it.
2663 */
2664 ret = (curval == uval) ? 0 : -EAGAIN;
2665
2666out_unlock:
2667 spin_unlock(&hb->lock);
2668out_putkey:
2669 put_futex_key(&key);
2670 return ret;
2671
2672pi_faulted:
2673 spin_unlock(&hb->lock);
2674 put_futex_key(&key);
2675
2676 ret = fault_in_user_writeable(uaddr);
2677 if (!ret)
2678 goto retry;
2679
2680 return ret;
2681}
2682
2683/**
2684 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2685 * @hb: the hash_bucket futex_q was original enqueued on
2686 * @q: the futex_q woken while waiting to be requeued
2687 * @key2: the futex_key of the requeue target futex
2688 * @timeout: the timeout associated with the wait (NULL if none)
2689 *
2690 * Detect if the task was woken on the initial futex as opposed to the requeue
2691 * target futex. If so, determine if it was a timeout or a signal that caused
2692 * the wakeup and return the appropriate error code to the caller. Must be
2693 * called with the hb lock held.
2694 *
2695 * Return:
2696 * 0 = no early wakeup detected;
2697 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2698 */
2699static inline
2700int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2701 struct futex_q *q, union futex_key *key2,
2702 struct hrtimer_sleeper *timeout)
2703{
2704 int ret = 0;
2705
2706 /*
2707 * With the hb lock held, we avoid races while we process the wakeup.
2708 * We only need to hold hb (and not hb2) to ensure atomicity as the
2709 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2710 * It can't be requeued from uaddr2 to something else since we don't
2711 * support a PI aware source futex for requeue.
2712 */
2713 if (!match_futex(&q->key, key2)) {
2714 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2715 /*
2716 * We were woken prior to requeue by a timeout or a signal.
2717 * Unqueue the futex_q and determine which it was.
2718 */
2719 plist_del(&q->list, &hb->chain);
2720 hb_waiters_dec(hb);
2721
2722 /* Handle spurious wakeups gracefully */
2723 ret = -EWOULDBLOCK;
2724 if (timeout && !timeout->task)
2725 ret = -ETIMEDOUT;
2726 else if (signal_pending(current))
2727 ret = -ERESTARTNOINTR;
2728 }
2729 return ret;
2730}
2731
2732/**
2733 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2734 * @uaddr: the futex we initially wait on (non-pi)
2735 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2736 * the same type, no requeueing from private to shared, etc.
2737 * @val: the expected value of uaddr
2738 * @abs_time: absolute timeout
2739 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2740 * @uaddr2: the pi futex we will take prior to returning to user-space
2741 *
2742 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2743 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2744 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2745 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2746 * without one, the pi logic would not know which task to boost/deboost, if
2747 * there was a need to.
2748 *
2749 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2750 * via the following--
2751 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2752 * 2) wakeup on uaddr2 after a requeue
2753 * 3) signal
2754 * 4) timeout
2755 *
2756 * If 3, cleanup and return -ERESTARTNOINTR.
2757 *
2758 * If 2, we may then block on trying to take the rt_mutex and return via:
2759 * 5) successful lock
2760 * 6) signal
2761 * 7) timeout
2762 * 8) other lock acquisition failure
2763 *
2764 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2765 *
2766 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2767 *
2768 * Return:
2769 * 0 - On success;
2770 * <0 - On error
2771 */
2772static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2773 u32 val, ktime_t *abs_time, u32 bitset,
2774 u32 __user *uaddr2)
2775{
2776 struct hrtimer_sleeper timeout, *to = NULL;
2777 struct rt_mutex_waiter rt_waiter;
2778 struct rt_mutex *pi_mutex = NULL;
2779 struct futex_hash_bucket *hb;
2780 union futex_key key2 = FUTEX_KEY_INIT;
2781 struct futex_q q = futex_q_init;
2782 int res, ret;
2783
2784 if (uaddr == uaddr2)
2785 return -EINVAL;
2786
2787 if (!bitset)
2788 return -EINVAL;
2789
2790 if (abs_time) {
2791 to = &timeout;
2792 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2793 CLOCK_REALTIME : CLOCK_MONOTONIC,
2794 HRTIMER_MODE_ABS);
2795 hrtimer_init_sleeper(to, current);
2796 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2797 current->timer_slack_ns);
2798 }
2799
2800 /*
2801 * The waiter is allocated on our stack, manipulated by the requeue
2802 * code while we sleep on uaddr.
2803 */
2804 debug_rt_mutex_init_waiter(&rt_waiter);
2805 RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2806 RB_CLEAR_NODE(&rt_waiter.tree_entry);
2807 rt_waiter.task = NULL;
2808
2809 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2810 if (unlikely(ret != 0))
2811 goto out;
2812
2813 q.bitset = bitset;
2814 q.rt_waiter = &rt_waiter;
2815 q.requeue_pi_key = &key2;
2816
2817 /*
2818 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2819 * count.
2820 */
2821 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2822 if (ret)
2823 goto out_key2;
2824
2825 /*
2826 * The check above which compares uaddrs is not sufficient for
2827 * shared futexes. We need to compare the keys:
2828 */
2829 if (match_futex(&q.key, &key2)) {
2830 queue_unlock(hb);
2831 ret = -EINVAL;
2832 goto out_put_keys;
2833 }
2834
2835 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2836 futex_wait_queue_me(hb, &q, to);
2837
2838 spin_lock(&hb->lock);
2839 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2840 spin_unlock(&hb->lock);
2841 if (ret)
2842 goto out_put_keys;
2843
2844 /*
2845 * In order for us to be here, we know our q.key == key2, and since
2846 * we took the hb->lock above, we also know that futex_requeue() has
2847 * completed and we no longer have to concern ourselves with a wakeup
2848 * race with the atomic proxy lock acquisition by the requeue code. The
2849 * futex_requeue dropped our key1 reference and incremented our key2
2850 * reference count.
2851 */
2852
2853 /* Check if the requeue code acquired the second futex for us. */
2854 if (!q.rt_waiter) {
2855 /*
2856 * Got the lock. We might not be the anticipated owner if we
2857 * did a lock-steal - fix up the PI-state in that case.
2858 */
2859 if (q.pi_state && (q.pi_state->owner != current)) {
2860 spin_lock(q.lock_ptr);
2861 ret = fixup_pi_state_owner(uaddr2, &q, current);
2862 /*
2863 * Drop the reference to the pi state which
2864 * the requeue_pi() code acquired for us.
2865 */
2866 put_pi_state(q.pi_state);
2867 spin_unlock(q.lock_ptr);
2868 }
2869 } else {
2870 /*
2871 * We have been woken up by futex_unlock_pi(), a timeout, or a
2872 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2873 * the pi_state.
2874 */
2875 WARN_ON(!q.pi_state);
2876 pi_mutex = &q.pi_state->pi_mutex;
2877 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2878 debug_rt_mutex_free_waiter(&rt_waiter);
2879
2880 spin_lock(q.lock_ptr);
2881 /*
2882 * Fixup the pi_state owner and possibly acquire the lock if we
2883 * haven't already.
2884 */
2885 res = fixup_owner(uaddr2, &q, !ret);
2886 /*
2887 * If fixup_owner() returned an error, proprogate that. If it
2888 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2889 */
2890 if (res)
2891 ret = (res < 0) ? res : 0;
2892
2893 /* Unqueue and drop the lock. */
2894 unqueue_me_pi(&q);
2895 }
2896
2897 /*
2898 * If fixup_pi_state_owner() faulted and was unable to handle the
2899 * fault, unlock the rt_mutex and return the fault to userspace.
2900 */
2901 if (ret == -EFAULT) {
2902 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2903 rt_mutex_unlock(pi_mutex);
2904 } else if (ret == -EINTR) {
2905 /*
2906 * We've already been requeued, but cannot restart by calling
2907 * futex_lock_pi() directly. We could restart this syscall, but
2908 * it would detect that the user space "val" changed and return
2909 * -EWOULDBLOCK. Save the overhead of the restart and return
2910 * -EWOULDBLOCK directly.
2911 */
2912 ret = -EWOULDBLOCK;
2913 }
2914
2915out_put_keys:
2916 put_futex_key(&q.key);
2917out_key2:
2918 put_futex_key(&key2);
2919
2920out:
2921 if (to) {
2922 hrtimer_cancel(&to->timer);
2923 destroy_hrtimer_on_stack(&to->timer);
2924 }
2925 return ret;
2926}
2927
2928/*
2929 * Support for robust futexes: the kernel cleans up held futexes at
2930 * thread exit time.
2931 *
2932 * Implementation: user-space maintains a per-thread list of locks it
2933 * is holding. Upon do_exit(), the kernel carefully walks this list,
2934 * and marks all locks that are owned by this thread with the
2935 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2936 * always manipulated with the lock held, so the list is private and
2937 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2938 * field, to allow the kernel to clean up if the thread dies after
2939 * acquiring the lock, but just before it could have added itself to
2940 * the list. There can only be one such pending lock.
2941 */
2942
2943/**
2944 * sys_set_robust_list() - Set the robust-futex list head of a task
2945 * @head: pointer to the list-head
2946 * @len: length of the list-head, as userspace expects
2947 */
2948SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2949 size_t, len)
2950{
2951 if (!futex_cmpxchg_enabled)
2952 return -ENOSYS;
2953 /*
2954 * The kernel knows only one size for now:
2955 */
2956 if (unlikely(len != sizeof(*head)))
2957 return -EINVAL;
2958
2959 current->robust_list = head;
2960
2961 return 0;
2962}
2963
2964/**
2965 * sys_get_robust_list() - Get the robust-futex list head of a task
2966 * @pid: pid of the process [zero for current task]
2967 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2968 * @len_ptr: pointer to a length field, the kernel fills in the header size
2969 */
2970SYSCALL_DEFINE3(get_robust_list, int, pid,
2971 struct robust_list_head __user * __user *, head_ptr,
2972 size_t __user *, len_ptr)
2973{
2974 struct robust_list_head __user *head;
2975 unsigned long ret;
2976 struct task_struct *p;
2977
2978 if (!futex_cmpxchg_enabled)
2979 return -ENOSYS;
2980
2981 rcu_read_lock();
2982
2983 ret = -ESRCH;
2984 if (!pid)
2985 p = current;
2986 else {
2987 p = find_task_by_vpid(pid);
2988 if (!p)
2989 goto err_unlock;
2990 }
2991
2992 ret = -EPERM;
2993 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
2994 goto err_unlock;
2995
2996 head = p->robust_list;
2997 rcu_read_unlock();
2998
2999 if (put_user(sizeof(*head), len_ptr))
3000 return -EFAULT;
3001 return put_user(head, head_ptr);
3002
3003err_unlock:
3004 rcu_read_unlock();
3005
3006 return ret;
3007}
3008
3009/*
3010 * Process a futex-list entry, check whether it's owned by the
3011 * dying task, and do notification if so:
3012 */
3013int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3014{
3015 u32 uval, uninitialized_var(nval), mval;
3016
3017retry:
3018 if (get_user(uval, uaddr))
3019 return -1;
3020
3021 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
3022 /*
3023 * Ok, this dying thread is truly holding a futex
3024 * of interest. Set the OWNER_DIED bit atomically
3025 * via cmpxchg, and if the value had FUTEX_WAITERS
3026 * set, wake up a waiter (if any). (We have to do a
3027 * futex_wake() even if OWNER_DIED is already set -
3028 * to handle the rare but possible case of recursive
3029 * thread-death.) The rest of the cleanup is done in
3030 * userspace.
3031 */
3032 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3033 /*
3034 * We are not holding a lock here, but we want to have
3035 * the pagefault_disable/enable() protection because
3036 * we want to handle the fault gracefully. If the
3037 * access fails we try to fault in the futex with R/W
3038 * verification via get_user_pages. get_user() above
3039 * does not guarantee R/W access. If that fails we
3040 * give up and leave the futex locked.
3041 */
3042 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
3043 if (fault_in_user_writeable(uaddr))
3044 return -1;
3045 goto retry;
3046 }
3047 if (nval != uval)
3048 goto retry;
3049
3050 /*
3051 * Wake robust non-PI futexes here. The wakeup of
3052 * PI futexes happens in exit_pi_state():
3053 */
3054 if (!pi && (uval & FUTEX_WAITERS))
3055 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3056 }
3057 return 0;
3058}
3059
3060/*
3061 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3062 */
3063static inline int fetch_robust_entry(struct robust_list __user **entry,
3064 struct robust_list __user * __user *head,
3065 unsigned int *pi)
3066{
3067 unsigned long uentry;
3068
3069 if (get_user(uentry, (unsigned long __user *)head))
3070 return -EFAULT;
3071
3072 *entry = (void __user *)(uentry & ~1UL);
3073 *pi = uentry & 1;
3074
3075 return 0;
3076}
3077
3078/*
3079 * Walk curr->robust_list (very carefully, it's a userspace list!)
3080 * and mark any locks found there dead, and notify any waiters.
3081 *
3082 * We silently return on any sign of list-walking problem.
3083 */
3084void exit_robust_list(struct task_struct *curr)
3085{
3086 struct robust_list_head __user *head = curr->robust_list;
3087 struct robust_list __user *entry, *next_entry, *pending;
3088 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3089 unsigned int uninitialized_var(next_pi);
3090 unsigned long futex_offset;
3091 int rc;
3092
3093 if (!futex_cmpxchg_enabled)
3094 return;
3095
3096 /*
3097 * Fetch the list head (which was registered earlier, via
3098 * sys_set_robust_list()):
3099 */
3100 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3101 return;
3102 /*
3103 * Fetch the relative futex offset:
3104 */
3105 if (get_user(futex_offset, &head->futex_offset))
3106 return;
3107 /*
3108 * Fetch any possibly pending lock-add first, and handle it
3109 * if it exists:
3110 */
3111 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3112 return;
3113
3114 next_entry = NULL; /* avoid warning with gcc */
3115 while (entry != &head->list) {
3116 /*
3117 * Fetch the next entry in the list before calling
3118 * handle_futex_death:
3119 */
3120 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3121 /*
3122 * A pending lock might already be on the list, so
3123 * don't process it twice:
3124 */
3125 if (entry != pending)
3126 if (handle_futex_death((void __user *)entry + futex_offset,
3127 curr, pi))
3128 return;
3129 if (rc)
3130 return;
3131 entry = next_entry;
3132 pi = next_pi;
3133 /*
3134 * Avoid excessively long or circular lists:
3135 */
3136 if (!--limit)
3137 break;
3138
3139 cond_resched();
3140 }
3141
3142 if (pending)
3143 handle_futex_death((void __user *)pending + futex_offset,
3144 curr, pip);
3145}
3146
3147long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3148 u32 __user *uaddr2, u32 val2, u32 val3)
3149{
3150 int cmd = op & FUTEX_CMD_MASK;
3151 unsigned int flags = 0;
3152
3153 if (!(op & FUTEX_PRIVATE_FLAG))
3154 flags |= FLAGS_SHARED;
3155
3156 if (op & FUTEX_CLOCK_REALTIME) {
3157 flags |= FLAGS_CLOCKRT;
3158 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3159 cmd != FUTEX_WAIT_REQUEUE_PI)
3160 return -ENOSYS;
3161 }
3162
3163 switch (cmd) {
3164 case FUTEX_LOCK_PI:
3165 case FUTEX_UNLOCK_PI:
3166 case FUTEX_TRYLOCK_PI:
3167 case FUTEX_WAIT_REQUEUE_PI:
3168 case FUTEX_CMP_REQUEUE_PI:
3169 if (!futex_cmpxchg_enabled)
3170 return -ENOSYS;
3171 }
3172
3173 switch (cmd) {
3174 case FUTEX_WAIT:
3175 val3 = FUTEX_BITSET_MATCH_ANY;
3176 case FUTEX_WAIT_BITSET:
3177 return futex_wait(uaddr, flags, val, timeout, val3);
3178 case FUTEX_WAKE:
3179 val3 = FUTEX_BITSET_MATCH_ANY;
3180 case FUTEX_WAKE_BITSET:
3181 return futex_wake(uaddr, flags, val, val3);
3182 case FUTEX_REQUEUE:
3183 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3184 case FUTEX_CMP_REQUEUE:
3185 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3186 case FUTEX_WAKE_OP:
3187 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3188 case FUTEX_LOCK_PI:
3189 return futex_lock_pi(uaddr, flags, timeout, 0);
3190 case FUTEX_UNLOCK_PI:
3191 return futex_unlock_pi(uaddr, flags);
3192 case FUTEX_TRYLOCK_PI:
3193 return futex_lock_pi(uaddr, flags, NULL, 1);
3194 case FUTEX_WAIT_REQUEUE_PI:
3195 val3 = FUTEX_BITSET_MATCH_ANY;
3196 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3197 uaddr2);
3198 case FUTEX_CMP_REQUEUE_PI:
3199 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3200 }
3201 return -ENOSYS;
3202}
3203
3204
3205SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3206 struct timespec __user *, utime, u32 __user *, uaddr2,
3207 u32, val3)
3208{
3209 struct timespec ts;
3210 ktime_t t, *tp = NULL;
3211 u32 val2 = 0;
3212 int cmd = op & FUTEX_CMD_MASK;
3213
3214 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3215 cmd == FUTEX_WAIT_BITSET ||
3216 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3217 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3218 return -EFAULT;
3219 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
3220 return -EFAULT;
3221 if (!timespec_valid(&ts))
3222 return -EINVAL;
3223
3224 t = timespec_to_ktime(ts);
3225 if (cmd == FUTEX_WAIT)
3226 t = ktime_add_safe(ktime_get(), t);
3227 tp = &t;
3228 }
3229 /*
3230 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3231 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3232 */
3233 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3234 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3235 val2 = (u32) (unsigned long) utime;
3236
3237 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3238}
3239
3240static void __init futex_detect_cmpxchg(void)
3241{
3242#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3243 u32 curval;
3244
3245 /*
3246 * This will fail and we want it. Some arch implementations do
3247 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3248 * functionality. We want to know that before we call in any
3249 * of the complex code paths. Also we want to prevent
3250 * registration of robust lists in that case. NULL is
3251 * guaranteed to fault and we get -EFAULT on functional
3252 * implementation, the non-functional ones will return
3253 * -ENOSYS.
3254 */
3255 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3256 futex_cmpxchg_enabled = 1;
3257#endif
3258}
3259
3260static int __init futex_init(void)
3261{
3262 unsigned int futex_shift;
3263 unsigned long i;
3264
3265#if CONFIG_BASE_SMALL
3266 futex_hashsize = 16;
3267#else
3268 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3269#endif
3270
3271 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3272 futex_hashsize, 0,
3273 futex_hashsize < 256 ? HASH_SMALL : 0,
3274 &futex_shift, NULL,
3275 futex_hashsize, futex_hashsize);
3276 futex_hashsize = 1UL << futex_shift;
3277
3278 futex_detect_cmpxchg();
3279
3280 for (i = 0; i < futex_hashsize; i++) {
3281 atomic_set(&futex_queues[i].waiters, 0);
3282 plist_head_init(&futex_queues[i].chain);
3283 spin_lock_init(&futex_queues[i].lock);
3284 }
3285
3286 return 0;
3287}
3288__initcall(futex_init);