Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
5 *
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 *
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
11 *
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 *
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 *
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 *
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 *
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
30 *
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
33 */
34#include <linux/compat.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fs.h>
38#include <linux/file.h>
39#include <linux/jhash.h>
40#include <linux/init.h>
41#include <linux/futex.h>
42#include <linux/mount.h>
43#include <linux/pagemap.h>
44#include <linux/syscalls.h>
45#include <linux/signal.h>
46#include <linux/export.h>
47#include <linux/magic.h>
48#include <linux/pid.h>
49#include <linux/nsproxy.h>
50#include <linux/ptrace.h>
51#include <linux/sched/rt.h>
52#include <linux/sched/wake_q.h>
53#include <linux/sched/mm.h>
54#include <linux/hugetlb.h>
55#include <linux/freezer.h>
56#include <linux/memblock.h>
57#include <linux/fault-inject.h>
58#include <linux/refcount.h>
59
60#include <asm/futex.h>
61
62#include "locking/rtmutex_common.h"
63
64/*
65 * READ this before attempting to hack on futexes!
66 *
67 * Basic futex operation and ordering guarantees
68 * =============================================
69 *
70 * The waiter reads the futex value in user space and calls
71 * futex_wait(). This function computes the hash bucket and acquires
72 * the hash bucket lock. After that it reads the futex user space value
73 * again and verifies that the data has not changed. If it has not changed
74 * it enqueues itself into the hash bucket, releases the hash bucket lock
75 * and schedules.
76 *
77 * The waker side modifies the user space value of the futex and calls
78 * futex_wake(). This function computes the hash bucket and acquires the
79 * hash bucket lock. Then it looks for waiters on that futex in the hash
80 * bucket and wakes them.
81 *
82 * In futex wake up scenarios where no tasks are blocked on a futex, taking
83 * the hb spinlock can be avoided and simply return. In order for this
84 * optimization to work, ordering guarantees must exist so that the waiter
85 * being added to the list is acknowledged when the list is concurrently being
86 * checked by the waker, avoiding scenarios like the following:
87 *
88 * CPU 0 CPU 1
89 * val = *futex;
90 * sys_futex(WAIT, futex, val);
91 * futex_wait(futex, val);
92 * uval = *futex;
93 * *futex = newval;
94 * sys_futex(WAKE, futex);
95 * futex_wake(futex);
96 * if (queue_empty())
97 * return;
98 * if (uval == val)
99 * lock(hash_bucket(futex));
100 * queue();
101 * unlock(hash_bucket(futex));
102 * schedule();
103 *
104 * This would cause the waiter on CPU 0 to wait forever because it
105 * missed the transition of the user space value from val to newval
106 * and the waker did not find the waiter in the hash bucket queue.
107 *
108 * The correct serialization ensures that a waiter either observes
109 * the changed user space value before blocking or is woken by a
110 * concurrent waker:
111 *
112 * CPU 0 CPU 1
113 * val = *futex;
114 * sys_futex(WAIT, futex, val);
115 * futex_wait(futex, val);
116 *
117 * waiters++; (a)
118 * smp_mb(); (A) <-- paired with -.
119 * |
120 * lock(hash_bucket(futex)); |
121 * |
122 * uval = *futex; |
123 * | *futex = newval;
124 * | sys_futex(WAKE, futex);
125 * | futex_wake(futex);
126 * |
127 * `--------> smp_mb(); (B)
128 * if (uval == val)
129 * queue();
130 * unlock(hash_bucket(futex));
131 * schedule(); if (waiters)
132 * lock(hash_bucket(futex));
133 * else wake_waiters(futex);
134 * waiters--; (b) unlock(hash_bucket(futex));
135 *
136 * Where (A) orders the waiters increment and the futex value read through
137 * atomic operations (see hb_waiters_inc) and where (B) orders the write
138 * to futex and the waiters read -- this is done by the barriers for both
139 * shared and private futexes in get_futex_key_refs().
140 *
141 * This yields the following case (where X:=waiters, Y:=futex):
142 *
143 * X = Y = 0
144 *
145 * w[X]=1 w[Y]=1
146 * MB MB
147 * r[Y]=y r[X]=x
148 *
149 * Which guarantees that x==0 && y==0 is impossible; which translates back into
150 * the guarantee that we cannot both miss the futex variable change and the
151 * enqueue.
152 *
153 * Note that a new waiter is accounted for in (a) even when it is possible that
154 * the wait call can return error, in which case we backtrack from it in (b).
155 * Refer to the comment in queue_lock().
156 *
157 * Similarly, in order to account for waiters being requeued on another
158 * address we always increment the waiters for the destination bucket before
159 * acquiring the lock. It then decrements them again after releasing it -
160 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
161 * will do the additional required waiter count housekeeping. This is done for
162 * double_lock_hb() and double_unlock_hb(), respectively.
163 */
164
165#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
166#define futex_cmpxchg_enabled 1
167#else
168static int __read_mostly futex_cmpxchg_enabled;
169#endif
170
171/*
172 * Futex flags used to encode options to functions and preserve them across
173 * restarts.
174 */
175#ifdef CONFIG_MMU
176# define FLAGS_SHARED 0x01
177#else
178/*
179 * NOMMU does not have per process address space. Let the compiler optimize
180 * code away.
181 */
182# define FLAGS_SHARED 0x00
183#endif
184#define FLAGS_CLOCKRT 0x02
185#define FLAGS_HAS_TIMEOUT 0x04
186
187/*
188 * Priority Inheritance state:
189 */
190struct futex_pi_state {
191 /*
192 * list of 'owned' pi_state instances - these have to be
193 * cleaned up in do_exit() if the task exits prematurely:
194 */
195 struct list_head list;
196
197 /*
198 * The PI object:
199 */
200 struct rt_mutex pi_mutex;
201
202 struct task_struct *owner;
203 refcount_t refcount;
204
205 union futex_key key;
206} __randomize_layout;
207
208/**
209 * struct futex_q - The hashed futex queue entry, one per waiting task
210 * @list: priority-sorted list of tasks waiting on this futex
211 * @task: the task waiting on the futex
212 * @lock_ptr: the hash bucket lock
213 * @key: the key the futex is hashed on
214 * @pi_state: optional priority inheritance state
215 * @rt_waiter: rt_waiter storage for use with requeue_pi
216 * @requeue_pi_key: the requeue_pi target futex key
217 * @bitset: bitset for the optional bitmasked wakeup
218 *
219 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
220 * we can wake only the relevant ones (hashed queues may be shared).
221 *
222 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
223 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
224 * The order of wakeup is always to make the first condition true, then
225 * the second.
226 *
227 * PI futexes are typically woken before they are removed from the hash list via
228 * the rt_mutex code. See unqueue_me_pi().
229 */
230struct futex_q {
231 struct plist_node list;
232
233 struct task_struct *task;
234 spinlock_t *lock_ptr;
235 union futex_key key;
236 struct futex_pi_state *pi_state;
237 struct rt_mutex_waiter *rt_waiter;
238 union futex_key *requeue_pi_key;
239 u32 bitset;
240} __randomize_layout;
241
242static const struct futex_q futex_q_init = {
243 /* list gets initialized in queue_me()*/
244 .key = FUTEX_KEY_INIT,
245 .bitset = FUTEX_BITSET_MATCH_ANY
246};
247
248/*
249 * Hash buckets are shared by all the futex_keys that hash to the same
250 * location. Each key may have multiple futex_q structures, one for each task
251 * waiting on a futex.
252 */
253struct futex_hash_bucket {
254 atomic_t waiters;
255 spinlock_t lock;
256 struct plist_head chain;
257} ____cacheline_aligned_in_smp;
258
259/*
260 * The base of the bucket array and its size are always used together
261 * (after initialization only in hash_futex()), so ensure that they
262 * reside in the same cacheline.
263 */
264static struct {
265 struct futex_hash_bucket *queues;
266 unsigned long hashsize;
267} __futex_data __read_mostly __aligned(2*sizeof(long));
268#define futex_queues (__futex_data.queues)
269#define futex_hashsize (__futex_data.hashsize)
270
271
272/*
273 * Fault injections for futexes.
274 */
275#ifdef CONFIG_FAIL_FUTEX
276
277static struct {
278 struct fault_attr attr;
279
280 bool ignore_private;
281} fail_futex = {
282 .attr = FAULT_ATTR_INITIALIZER,
283 .ignore_private = false,
284};
285
286static int __init setup_fail_futex(char *str)
287{
288 return setup_fault_attr(&fail_futex.attr, str);
289}
290__setup("fail_futex=", setup_fail_futex);
291
292static bool should_fail_futex(bool fshared)
293{
294 if (fail_futex.ignore_private && !fshared)
295 return false;
296
297 return should_fail(&fail_futex.attr, 1);
298}
299
300#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
301
302static int __init fail_futex_debugfs(void)
303{
304 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
305 struct dentry *dir;
306
307 dir = fault_create_debugfs_attr("fail_futex", NULL,
308 &fail_futex.attr);
309 if (IS_ERR(dir))
310 return PTR_ERR(dir);
311
312 debugfs_create_bool("ignore-private", mode, dir,
313 &fail_futex.ignore_private);
314 return 0;
315}
316
317late_initcall(fail_futex_debugfs);
318
319#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
320
321#else
322static inline bool should_fail_futex(bool fshared)
323{
324 return false;
325}
326#endif /* CONFIG_FAIL_FUTEX */
327
328static inline void futex_get_mm(union futex_key *key)
329{
330 mmgrab(key->private.mm);
331 /*
332 * Ensure futex_get_mm() implies a full barrier such that
333 * get_futex_key() implies a full barrier. This is relied upon
334 * as smp_mb(); (B), see the ordering comment above.
335 */
336 smp_mb__after_atomic();
337}
338
339/*
340 * Reflects a new waiter being added to the waitqueue.
341 */
342static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
343{
344#ifdef CONFIG_SMP
345 atomic_inc(&hb->waiters);
346 /*
347 * Full barrier (A), see the ordering comment above.
348 */
349 smp_mb__after_atomic();
350#endif
351}
352
353/*
354 * Reflects a waiter being removed from the waitqueue by wakeup
355 * paths.
356 */
357static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
358{
359#ifdef CONFIG_SMP
360 atomic_dec(&hb->waiters);
361#endif
362}
363
364static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
365{
366#ifdef CONFIG_SMP
367 return atomic_read(&hb->waiters);
368#else
369 return 1;
370#endif
371}
372
373/**
374 * hash_futex - Return the hash bucket in the global hash
375 * @key: Pointer to the futex key for which the hash is calculated
376 *
377 * We hash on the keys returned from get_futex_key (see below) and return the
378 * corresponding hash bucket in the global hash.
379 */
380static struct futex_hash_bucket *hash_futex(union futex_key *key)
381{
382 u32 hash = jhash2((u32*)&key->both.word,
383 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
384 key->both.offset);
385 return &futex_queues[hash & (futex_hashsize - 1)];
386}
387
388
389/**
390 * match_futex - Check whether two futex keys are equal
391 * @key1: Pointer to key1
392 * @key2: Pointer to key2
393 *
394 * Return 1 if two futex_keys are equal, 0 otherwise.
395 */
396static inline int match_futex(union futex_key *key1, union futex_key *key2)
397{
398 return (key1 && key2
399 && key1->both.word == key2->both.word
400 && key1->both.ptr == key2->both.ptr
401 && key1->both.offset == key2->both.offset);
402}
403
404/*
405 * Take a reference to the resource addressed by a key.
406 * Can be called while holding spinlocks.
407 *
408 */
409static void get_futex_key_refs(union futex_key *key)
410{
411 if (!key->both.ptr)
412 return;
413
414 /*
415 * On MMU less systems futexes are always "private" as there is no per
416 * process address space. We need the smp wmb nevertheless - yes,
417 * arch/blackfin has MMU less SMP ...
418 */
419 if (!IS_ENABLED(CONFIG_MMU)) {
420 smp_mb(); /* explicit smp_mb(); (B) */
421 return;
422 }
423
424 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
425 case FUT_OFF_INODE:
426 ihold(key->shared.inode); /* implies smp_mb(); (B) */
427 break;
428 case FUT_OFF_MMSHARED:
429 futex_get_mm(key); /* implies smp_mb(); (B) */
430 break;
431 default:
432 /*
433 * Private futexes do not hold reference on an inode or
434 * mm, therefore the only purpose of calling get_futex_key_refs
435 * is because we need the barrier for the lockless waiter check.
436 */
437 smp_mb(); /* explicit smp_mb(); (B) */
438 }
439}
440
441/*
442 * Drop a reference to the resource addressed by a key.
443 * The hash bucket spinlock must not be held. This is
444 * a no-op for private futexes, see comment in the get
445 * counterpart.
446 */
447static void drop_futex_key_refs(union futex_key *key)
448{
449 if (!key->both.ptr) {
450 /* If we're here then we tried to put a key we failed to get */
451 WARN_ON_ONCE(1);
452 return;
453 }
454
455 if (!IS_ENABLED(CONFIG_MMU))
456 return;
457
458 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
459 case FUT_OFF_INODE:
460 iput(key->shared.inode);
461 break;
462 case FUT_OFF_MMSHARED:
463 mmdrop(key->private.mm);
464 break;
465 }
466}
467
468enum futex_access {
469 FUTEX_READ,
470 FUTEX_WRITE
471};
472
473/**
474 * futex_setup_timer - set up the sleeping hrtimer.
475 * @time: ptr to the given timeout value
476 * @timeout: the hrtimer_sleeper structure to be set up
477 * @flags: futex flags
478 * @range_ns: optional range in ns
479 *
480 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
481 * value given
482 */
483static inline struct hrtimer_sleeper *
484futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
485 int flags, u64 range_ns)
486{
487 if (!time)
488 return NULL;
489
490 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
491 CLOCK_REALTIME : CLOCK_MONOTONIC,
492 HRTIMER_MODE_ABS);
493 /*
494 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
495 * effectively the same as calling hrtimer_set_expires().
496 */
497 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
498
499 return timeout;
500}
501
502/**
503 * get_futex_key() - Get parameters which are the keys for a futex
504 * @uaddr: virtual address of the futex
505 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
506 * @key: address where result is stored.
507 * @rw: mapping needs to be read/write (values: FUTEX_READ,
508 * FUTEX_WRITE)
509 *
510 * Return: a negative error code or 0
511 *
512 * The key words are stored in @key on success.
513 *
514 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
515 * offset_within_page). For private mappings, it's (uaddr, current->mm).
516 * We can usually work out the index without swapping in the page.
517 *
518 * lock_page() might sleep, the caller should not hold a spinlock.
519 */
520static int
521get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw)
522{
523 unsigned long address = (unsigned long)uaddr;
524 struct mm_struct *mm = current->mm;
525 struct page *page, *tail;
526 struct address_space *mapping;
527 int err, ro = 0;
528
529 /*
530 * The futex address must be "naturally" aligned.
531 */
532 key->both.offset = address % PAGE_SIZE;
533 if (unlikely((address % sizeof(u32)) != 0))
534 return -EINVAL;
535 address -= key->both.offset;
536
537 if (unlikely(!access_ok(uaddr, sizeof(u32))))
538 return -EFAULT;
539
540 if (unlikely(should_fail_futex(fshared)))
541 return -EFAULT;
542
543 /*
544 * PROCESS_PRIVATE futexes are fast.
545 * As the mm cannot disappear under us and the 'key' only needs
546 * virtual address, we dont even have to find the underlying vma.
547 * Note : We do have to check 'uaddr' is a valid user address,
548 * but access_ok() should be faster than find_vma()
549 */
550 if (!fshared) {
551 key->private.mm = mm;
552 key->private.address = address;
553 get_futex_key_refs(key); /* implies smp_mb(); (B) */
554 return 0;
555 }
556
557again:
558 /* Ignore any VERIFY_READ mapping (futex common case) */
559 if (unlikely(should_fail_futex(fshared)))
560 return -EFAULT;
561
562 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
563 /*
564 * If write access is not required (eg. FUTEX_WAIT), try
565 * and get read-only access.
566 */
567 if (err == -EFAULT && rw == FUTEX_READ) {
568 err = get_user_pages_fast(address, 1, 0, &page);
569 ro = 1;
570 }
571 if (err < 0)
572 return err;
573 else
574 err = 0;
575
576 /*
577 * The treatment of mapping from this point on is critical. The page
578 * lock protects many things but in this context the page lock
579 * stabilizes mapping, prevents inode freeing in the shared
580 * file-backed region case and guards against movement to swap cache.
581 *
582 * Strictly speaking the page lock is not needed in all cases being
583 * considered here and page lock forces unnecessarily serialization
584 * From this point on, mapping will be re-verified if necessary and
585 * page lock will be acquired only if it is unavoidable
586 *
587 * Mapping checks require the head page for any compound page so the
588 * head page and mapping is looked up now. For anonymous pages, it
589 * does not matter if the page splits in the future as the key is
590 * based on the address. For filesystem-backed pages, the tail is
591 * required as the index of the page determines the key. For
592 * base pages, there is no tail page and tail == page.
593 */
594 tail = page;
595 page = compound_head(page);
596 mapping = READ_ONCE(page->mapping);
597
598 /*
599 * If page->mapping is NULL, then it cannot be a PageAnon
600 * page; but it might be the ZERO_PAGE or in the gate area or
601 * in a special mapping (all cases which we are happy to fail);
602 * or it may have been a good file page when get_user_pages_fast
603 * found it, but truncated or holepunched or subjected to
604 * invalidate_complete_page2 before we got the page lock (also
605 * cases which we are happy to fail). And we hold a reference,
606 * so refcount care in invalidate_complete_page's remove_mapping
607 * prevents drop_caches from setting mapping to NULL beneath us.
608 *
609 * The case we do have to guard against is when memory pressure made
610 * shmem_writepage move it from filecache to swapcache beneath us:
611 * an unlikely race, but we do need to retry for page->mapping.
612 */
613 if (unlikely(!mapping)) {
614 int shmem_swizzled;
615
616 /*
617 * Page lock is required to identify which special case above
618 * applies. If this is really a shmem page then the page lock
619 * will prevent unexpected transitions.
620 */
621 lock_page(page);
622 shmem_swizzled = PageSwapCache(page) || page->mapping;
623 unlock_page(page);
624 put_page(page);
625
626 if (shmem_swizzled)
627 goto again;
628
629 return -EFAULT;
630 }
631
632 /*
633 * Private mappings are handled in a simple way.
634 *
635 * If the futex key is stored on an anonymous page, then the associated
636 * object is the mm which is implicitly pinned by the calling process.
637 *
638 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
639 * it's a read-only handle, it's expected that futexes attach to
640 * the object not the particular process.
641 */
642 if (PageAnon(page)) {
643 /*
644 * A RO anonymous page will never change and thus doesn't make
645 * sense for futex operations.
646 */
647 if (unlikely(should_fail_futex(fshared)) || ro) {
648 err = -EFAULT;
649 goto out;
650 }
651
652 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
653 key->private.mm = mm;
654 key->private.address = address;
655
656 get_futex_key_refs(key); /* implies smp_mb(); (B) */
657
658 } else {
659 struct inode *inode;
660
661 /*
662 * The associated futex object in this case is the inode and
663 * the page->mapping must be traversed. Ordinarily this should
664 * be stabilised under page lock but it's not strictly
665 * necessary in this case as we just want to pin the inode, not
666 * update the radix tree or anything like that.
667 *
668 * The RCU read lock is taken as the inode is finally freed
669 * under RCU. If the mapping still matches expectations then the
670 * mapping->host can be safely accessed as being a valid inode.
671 */
672 rcu_read_lock();
673
674 if (READ_ONCE(page->mapping) != mapping) {
675 rcu_read_unlock();
676 put_page(page);
677
678 goto again;
679 }
680
681 inode = READ_ONCE(mapping->host);
682 if (!inode) {
683 rcu_read_unlock();
684 put_page(page);
685
686 goto again;
687 }
688
689 /*
690 * Take a reference unless it is about to be freed. Previously
691 * this reference was taken by ihold under the page lock
692 * pinning the inode in place so i_lock was unnecessary. The
693 * only way for this check to fail is if the inode was
694 * truncated in parallel which is almost certainly an
695 * application bug. In such a case, just retry.
696 *
697 * We are not calling into get_futex_key_refs() in file-backed
698 * cases, therefore a successful atomic_inc return below will
699 * guarantee that get_futex_key() will still imply smp_mb(); (B).
700 */
701 if (!atomic_inc_not_zero(&inode->i_count)) {
702 rcu_read_unlock();
703 put_page(page);
704
705 goto again;
706 }
707
708 /* Should be impossible but lets be paranoid for now */
709 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
710 err = -EFAULT;
711 rcu_read_unlock();
712 iput(inode);
713
714 goto out;
715 }
716
717 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
718 key->shared.inode = inode;
719 key->shared.pgoff = basepage_index(tail);
720 rcu_read_unlock();
721 }
722
723out:
724 put_page(page);
725 return err;
726}
727
728static inline void put_futex_key(union futex_key *key)
729{
730 drop_futex_key_refs(key);
731}
732
733/**
734 * fault_in_user_writeable() - Fault in user address and verify RW access
735 * @uaddr: pointer to faulting user space address
736 *
737 * Slow path to fixup the fault we just took in the atomic write
738 * access to @uaddr.
739 *
740 * We have no generic implementation of a non-destructive write to the
741 * user address. We know that we faulted in the atomic pagefault
742 * disabled section so we can as well avoid the #PF overhead by
743 * calling get_user_pages() right away.
744 */
745static int fault_in_user_writeable(u32 __user *uaddr)
746{
747 struct mm_struct *mm = current->mm;
748 int ret;
749
750 down_read(&mm->mmap_sem);
751 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
752 FAULT_FLAG_WRITE, NULL);
753 up_read(&mm->mmap_sem);
754
755 return ret < 0 ? ret : 0;
756}
757
758/**
759 * futex_top_waiter() - Return the highest priority waiter on a futex
760 * @hb: the hash bucket the futex_q's reside in
761 * @key: the futex key (to distinguish it from other futex futex_q's)
762 *
763 * Must be called with the hb lock held.
764 */
765static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
766 union futex_key *key)
767{
768 struct futex_q *this;
769
770 plist_for_each_entry(this, &hb->chain, list) {
771 if (match_futex(&this->key, key))
772 return this;
773 }
774 return NULL;
775}
776
777static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
778 u32 uval, u32 newval)
779{
780 int ret;
781
782 pagefault_disable();
783 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
784 pagefault_enable();
785
786 return ret;
787}
788
789static int get_futex_value_locked(u32 *dest, u32 __user *from)
790{
791 int ret;
792
793 pagefault_disable();
794 ret = __get_user(*dest, from);
795 pagefault_enable();
796
797 return ret ? -EFAULT : 0;
798}
799
800
801/*
802 * PI code:
803 */
804static int refill_pi_state_cache(void)
805{
806 struct futex_pi_state *pi_state;
807
808 if (likely(current->pi_state_cache))
809 return 0;
810
811 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
812
813 if (!pi_state)
814 return -ENOMEM;
815
816 INIT_LIST_HEAD(&pi_state->list);
817 /* pi_mutex gets initialized later */
818 pi_state->owner = NULL;
819 refcount_set(&pi_state->refcount, 1);
820 pi_state->key = FUTEX_KEY_INIT;
821
822 current->pi_state_cache = pi_state;
823
824 return 0;
825}
826
827static struct futex_pi_state *alloc_pi_state(void)
828{
829 struct futex_pi_state *pi_state = current->pi_state_cache;
830
831 WARN_ON(!pi_state);
832 current->pi_state_cache = NULL;
833
834 return pi_state;
835}
836
837static void get_pi_state(struct futex_pi_state *pi_state)
838{
839 WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
840}
841
842/*
843 * Drops a reference to the pi_state object and frees or caches it
844 * when the last reference is gone.
845 */
846static void put_pi_state(struct futex_pi_state *pi_state)
847{
848 if (!pi_state)
849 return;
850
851 if (!refcount_dec_and_test(&pi_state->refcount))
852 return;
853
854 /*
855 * If pi_state->owner is NULL, the owner is most probably dying
856 * and has cleaned up the pi_state already
857 */
858 if (pi_state->owner) {
859 struct task_struct *owner;
860
861 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
862 owner = pi_state->owner;
863 if (owner) {
864 raw_spin_lock(&owner->pi_lock);
865 list_del_init(&pi_state->list);
866 raw_spin_unlock(&owner->pi_lock);
867 }
868 rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
869 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
870 }
871
872 if (current->pi_state_cache) {
873 kfree(pi_state);
874 } else {
875 /*
876 * pi_state->list is already empty.
877 * clear pi_state->owner.
878 * refcount is at 0 - put it back to 1.
879 */
880 pi_state->owner = NULL;
881 refcount_set(&pi_state->refcount, 1);
882 current->pi_state_cache = pi_state;
883 }
884}
885
886#ifdef CONFIG_FUTEX_PI
887
888/*
889 * This task is holding PI mutexes at exit time => bad.
890 * Kernel cleans up PI-state, but userspace is likely hosed.
891 * (Robust-futex cleanup is separate and might save the day for userspace.)
892 */
893void exit_pi_state_list(struct task_struct *curr)
894{
895 struct list_head *next, *head = &curr->pi_state_list;
896 struct futex_pi_state *pi_state;
897 struct futex_hash_bucket *hb;
898 union futex_key key = FUTEX_KEY_INIT;
899
900 if (!futex_cmpxchg_enabled)
901 return;
902 /*
903 * We are a ZOMBIE and nobody can enqueue itself on
904 * pi_state_list anymore, but we have to be careful
905 * versus waiters unqueueing themselves:
906 */
907 raw_spin_lock_irq(&curr->pi_lock);
908 while (!list_empty(head)) {
909 next = head->next;
910 pi_state = list_entry(next, struct futex_pi_state, list);
911 key = pi_state->key;
912 hb = hash_futex(&key);
913
914 /*
915 * We can race against put_pi_state() removing itself from the
916 * list (a waiter going away). put_pi_state() will first
917 * decrement the reference count and then modify the list, so
918 * its possible to see the list entry but fail this reference
919 * acquire.
920 *
921 * In that case; drop the locks to let put_pi_state() make
922 * progress and retry the loop.
923 */
924 if (!refcount_inc_not_zero(&pi_state->refcount)) {
925 raw_spin_unlock_irq(&curr->pi_lock);
926 cpu_relax();
927 raw_spin_lock_irq(&curr->pi_lock);
928 continue;
929 }
930 raw_spin_unlock_irq(&curr->pi_lock);
931
932 spin_lock(&hb->lock);
933 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
934 raw_spin_lock(&curr->pi_lock);
935 /*
936 * We dropped the pi-lock, so re-check whether this
937 * task still owns the PI-state:
938 */
939 if (head->next != next) {
940 /* retain curr->pi_lock for the loop invariant */
941 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
942 spin_unlock(&hb->lock);
943 put_pi_state(pi_state);
944 continue;
945 }
946
947 WARN_ON(pi_state->owner != curr);
948 WARN_ON(list_empty(&pi_state->list));
949 list_del_init(&pi_state->list);
950 pi_state->owner = NULL;
951
952 raw_spin_unlock(&curr->pi_lock);
953 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
954 spin_unlock(&hb->lock);
955
956 rt_mutex_futex_unlock(&pi_state->pi_mutex);
957 put_pi_state(pi_state);
958
959 raw_spin_lock_irq(&curr->pi_lock);
960 }
961 raw_spin_unlock_irq(&curr->pi_lock);
962}
963
964#endif
965
966/*
967 * We need to check the following states:
968 *
969 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
970 *
971 * [1] NULL | --- | --- | 0 | 0/1 | Valid
972 * [2] NULL | --- | --- | >0 | 0/1 | Valid
973 *
974 * [3] Found | NULL | -- | Any | 0/1 | Invalid
975 *
976 * [4] Found | Found | NULL | 0 | 1 | Valid
977 * [5] Found | Found | NULL | >0 | 1 | Invalid
978 *
979 * [6] Found | Found | task | 0 | 1 | Valid
980 *
981 * [7] Found | Found | NULL | Any | 0 | Invalid
982 *
983 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
984 * [9] Found | Found | task | 0 | 0 | Invalid
985 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
986 *
987 * [1] Indicates that the kernel can acquire the futex atomically. We
988 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
989 *
990 * [2] Valid, if TID does not belong to a kernel thread. If no matching
991 * thread is found then it indicates that the owner TID has died.
992 *
993 * [3] Invalid. The waiter is queued on a non PI futex
994 *
995 * [4] Valid state after exit_robust_list(), which sets the user space
996 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
997 *
998 * [5] The user space value got manipulated between exit_robust_list()
999 * and exit_pi_state_list()
1000 *
1001 * [6] Valid state after exit_pi_state_list() which sets the new owner in
1002 * the pi_state but cannot access the user space value.
1003 *
1004 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
1005 *
1006 * [8] Owner and user space value match
1007 *
1008 * [9] There is no transient state which sets the user space TID to 0
1009 * except exit_robust_list(), but this is indicated by the
1010 * FUTEX_OWNER_DIED bit. See [4]
1011 *
1012 * [10] There is no transient state which leaves owner and user space
1013 * TID out of sync.
1014 *
1015 *
1016 * Serialization and lifetime rules:
1017 *
1018 * hb->lock:
1019 *
1020 * hb -> futex_q, relation
1021 * futex_q -> pi_state, relation
1022 *
1023 * (cannot be raw because hb can contain arbitrary amount
1024 * of futex_q's)
1025 *
1026 * pi_mutex->wait_lock:
1027 *
1028 * {uval, pi_state}
1029 *
1030 * (and pi_mutex 'obviously')
1031 *
1032 * p->pi_lock:
1033 *
1034 * p->pi_state_list -> pi_state->list, relation
1035 *
1036 * pi_state->refcount:
1037 *
1038 * pi_state lifetime
1039 *
1040 *
1041 * Lock order:
1042 *
1043 * hb->lock
1044 * pi_mutex->wait_lock
1045 * p->pi_lock
1046 *
1047 */
1048
1049/*
1050 * Validate that the existing waiter has a pi_state and sanity check
1051 * the pi_state against the user space value. If correct, attach to
1052 * it.
1053 */
1054static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1055 struct futex_pi_state *pi_state,
1056 struct futex_pi_state **ps)
1057{
1058 pid_t pid = uval & FUTEX_TID_MASK;
1059 u32 uval2;
1060 int ret;
1061
1062 /*
1063 * Userspace might have messed up non-PI and PI futexes [3]
1064 */
1065 if (unlikely(!pi_state))
1066 return -EINVAL;
1067
1068 /*
1069 * We get here with hb->lock held, and having found a
1070 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
1071 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1072 * which in turn means that futex_lock_pi() still has a reference on
1073 * our pi_state.
1074 *
1075 * The waiter holding a reference on @pi_state also protects against
1076 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1077 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1078 * free pi_state before we can take a reference ourselves.
1079 */
1080 WARN_ON(!refcount_read(&pi_state->refcount));
1081
1082 /*
1083 * Now that we have a pi_state, we can acquire wait_lock
1084 * and do the state validation.
1085 */
1086 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1087
1088 /*
1089 * Since {uval, pi_state} is serialized by wait_lock, and our current
1090 * uval was read without holding it, it can have changed. Verify it
1091 * still is what we expect it to be, otherwise retry the entire
1092 * operation.
1093 */
1094 if (get_futex_value_locked(&uval2, uaddr))
1095 goto out_efault;
1096
1097 if (uval != uval2)
1098 goto out_eagain;
1099
1100 /*
1101 * Handle the owner died case:
1102 */
1103 if (uval & FUTEX_OWNER_DIED) {
1104 /*
1105 * exit_pi_state_list sets owner to NULL and wakes the
1106 * topmost waiter. The task which acquires the
1107 * pi_state->rt_mutex will fixup owner.
1108 */
1109 if (!pi_state->owner) {
1110 /*
1111 * No pi state owner, but the user space TID
1112 * is not 0. Inconsistent state. [5]
1113 */
1114 if (pid)
1115 goto out_einval;
1116 /*
1117 * Take a ref on the state and return success. [4]
1118 */
1119 goto out_attach;
1120 }
1121
1122 /*
1123 * If TID is 0, then either the dying owner has not
1124 * yet executed exit_pi_state_list() or some waiter
1125 * acquired the rtmutex in the pi state, but did not
1126 * yet fixup the TID in user space.
1127 *
1128 * Take a ref on the state and return success. [6]
1129 */
1130 if (!pid)
1131 goto out_attach;
1132 } else {
1133 /*
1134 * If the owner died bit is not set, then the pi_state
1135 * must have an owner. [7]
1136 */
1137 if (!pi_state->owner)
1138 goto out_einval;
1139 }
1140
1141 /*
1142 * Bail out if user space manipulated the futex value. If pi
1143 * state exists then the owner TID must be the same as the
1144 * user space TID. [9/10]
1145 */
1146 if (pid != task_pid_vnr(pi_state->owner))
1147 goto out_einval;
1148
1149out_attach:
1150 get_pi_state(pi_state);
1151 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1152 *ps = pi_state;
1153 return 0;
1154
1155out_einval:
1156 ret = -EINVAL;
1157 goto out_error;
1158
1159out_eagain:
1160 ret = -EAGAIN;
1161 goto out_error;
1162
1163out_efault:
1164 ret = -EFAULT;
1165 goto out_error;
1166
1167out_error:
1168 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1169 return ret;
1170}
1171
1172static int handle_exit_race(u32 __user *uaddr, u32 uval,
1173 struct task_struct *tsk)
1174{
1175 u32 uval2;
1176
1177 /*
1178 * If PF_EXITPIDONE is not yet set, then try again.
1179 */
1180 if (tsk && !(tsk->flags & PF_EXITPIDONE))
1181 return -EAGAIN;
1182
1183 /*
1184 * Reread the user space value to handle the following situation:
1185 *
1186 * CPU0 CPU1
1187 *
1188 * sys_exit() sys_futex()
1189 * do_exit() futex_lock_pi()
1190 * futex_lock_pi_atomic()
1191 * exit_signals(tsk) No waiters:
1192 * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
1193 * mm_release(tsk) Set waiter bit
1194 * exit_robust_list(tsk) { *uaddr = 0x80000PID;
1195 * Set owner died attach_to_pi_owner() {
1196 * *uaddr = 0xC0000000; tsk = get_task(PID);
1197 * } if (!tsk->flags & PF_EXITING) {
1198 * ... attach();
1199 * tsk->flags |= PF_EXITPIDONE; } else {
1200 * if (!(tsk->flags & PF_EXITPIDONE))
1201 * return -EAGAIN;
1202 * return -ESRCH; <--- FAIL
1203 * }
1204 *
1205 * Returning ESRCH unconditionally is wrong here because the
1206 * user space value has been changed by the exiting task.
1207 *
1208 * The same logic applies to the case where the exiting task is
1209 * already gone.
1210 */
1211 if (get_futex_value_locked(&uval2, uaddr))
1212 return -EFAULT;
1213
1214 /* If the user space value has changed, try again. */
1215 if (uval2 != uval)
1216 return -EAGAIN;
1217
1218 /*
1219 * The exiting task did not have a robust list, the robust list was
1220 * corrupted or the user space value in *uaddr is simply bogus.
1221 * Give up and tell user space.
1222 */
1223 return -ESRCH;
1224}
1225
1226/*
1227 * Lookup the task for the TID provided from user space and attach to
1228 * it after doing proper sanity checks.
1229 */
1230static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1231 struct futex_pi_state **ps)
1232{
1233 pid_t pid = uval & FUTEX_TID_MASK;
1234 struct futex_pi_state *pi_state;
1235 struct task_struct *p;
1236
1237 /*
1238 * We are the first waiter - try to look up the real owner and attach
1239 * the new pi_state to it, but bail out when TID = 0 [1]
1240 *
1241 * The !pid check is paranoid. None of the call sites should end up
1242 * with pid == 0, but better safe than sorry. Let the caller retry
1243 */
1244 if (!pid)
1245 return -EAGAIN;
1246 p = find_get_task_by_vpid(pid);
1247 if (!p)
1248 return handle_exit_race(uaddr, uval, NULL);
1249
1250 if (unlikely(p->flags & PF_KTHREAD)) {
1251 put_task_struct(p);
1252 return -EPERM;
1253 }
1254
1255 /*
1256 * We need to look at the task state flags to figure out,
1257 * whether the task is exiting. To protect against the do_exit
1258 * change of the task flags, we do this protected by
1259 * p->pi_lock:
1260 */
1261 raw_spin_lock_irq(&p->pi_lock);
1262 if (unlikely(p->flags & PF_EXITING)) {
1263 /*
1264 * The task is on the way out. When PF_EXITPIDONE is
1265 * set, we know that the task has finished the
1266 * cleanup:
1267 */
1268 int ret = handle_exit_race(uaddr, uval, p);
1269
1270 raw_spin_unlock_irq(&p->pi_lock);
1271 put_task_struct(p);
1272 return ret;
1273 }
1274
1275 /*
1276 * No existing pi state. First waiter. [2]
1277 *
1278 * This creates pi_state, we have hb->lock held, this means nothing can
1279 * observe this state, wait_lock is irrelevant.
1280 */
1281 pi_state = alloc_pi_state();
1282
1283 /*
1284 * Initialize the pi_mutex in locked state and make @p
1285 * the owner of it:
1286 */
1287 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1288
1289 /* Store the key for possible exit cleanups: */
1290 pi_state->key = *key;
1291
1292 WARN_ON(!list_empty(&pi_state->list));
1293 list_add(&pi_state->list, &p->pi_state_list);
1294 /*
1295 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1296 * because there is no concurrency as the object is not published yet.
1297 */
1298 pi_state->owner = p;
1299 raw_spin_unlock_irq(&p->pi_lock);
1300
1301 put_task_struct(p);
1302
1303 *ps = pi_state;
1304
1305 return 0;
1306}
1307
1308static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1309 struct futex_hash_bucket *hb,
1310 union futex_key *key, struct futex_pi_state **ps)
1311{
1312 struct futex_q *top_waiter = futex_top_waiter(hb, key);
1313
1314 /*
1315 * If there is a waiter on that futex, validate it and
1316 * attach to the pi_state when the validation succeeds.
1317 */
1318 if (top_waiter)
1319 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1320
1321 /*
1322 * We are the first waiter - try to look up the owner based on
1323 * @uval and attach to it.
1324 */
1325 return attach_to_pi_owner(uaddr, uval, key, ps);
1326}
1327
1328static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1329{
1330 int err;
1331 u32 uninitialized_var(curval);
1332
1333 if (unlikely(should_fail_futex(true)))
1334 return -EFAULT;
1335
1336 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1337 if (unlikely(err))
1338 return err;
1339
1340 /* If user space value changed, let the caller retry */
1341 return curval != uval ? -EAGAIN : 0;
1342}
1343
1344/**
1345 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1346 * @uaddr: the pi futex user address
1347 * @hb: the pi futex hash bucket
1348 * @key: the futex key associated with uaddr and hb
1349 * @ps: the pi_state pointer where we store the result of the
1350 * lookup
1351 * @task: the task to perform the atomic lock work for. This will
1352 * be "current" except in the case of requeue pi.
1353 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1354 *
1355 * Return:
1356 * - 0 - ready to wait;
1357 * - 1 - acquired the lock;
1358 * - <0 - error
1359 *
1360 * The hb->lock and futex_key refs shall be held by the caller.
1361 */
1362static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1363 union futex_key *key,
1364 struct futex_pi_state **ps,
1365 struct task_struct *task, int set_waiters)
1366{
1367 u32 uval, newval, vpid = task_pid_vnr(task);
1368 struct futex_q *top_waiter;
1369 int ret;
1370
1371 /*
1372 * Read the user space value first so we can validate a few
1373 * things before proceeding further.
1374 */
1375 if (get_futex_value_locked(&uval, uaddr))
1376 return -EFAULT;
1377
1378 if (unlikely(should_fail_futex(true)))
1379 return -EFAULT;
1380
1381 /*
1382 * Detect deadlocks.
1383 */
1384 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1385 return -EDEADLK;
1386
1387 if ((unlikely(should_fail_futex(true))))
1388 return -EDEADLK;
1389
1390 /*
1391 * Lookup existing state first. If it exists, try to attach to
1392 * its pi_state.
1393 */
1394 top_waiter = futex_top_waiter(hb, key);
1395 if (top_waiter)
1396 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1397
1398 /*
1399 * No waiter and user TID is 0. We are here because the
1400 * waiters or the owner died bit is set or called from
1401 * requeue_cmp_pi or for whatever reason something took the
1402 * syscall.
1403 */
1404 if (!(uval & FUTEX_TID_MASK)) {
1405 /*
1406 * We take over the futex. No other waiters and the user space
1407 * TID is 0. We preserve the owner died bit.
1408 */
1409 newval = uval & FUTEX_OWNER_DIED;
1410 newval |= vpid;
1411
1412 /* The futex requeue_pi code can enforce the waiters bit */
1413 if (set_waiters)
1414 newval |= FUTEX_WAITERS;
1415
1416 ret = lock_pi_update_atomic(uaddr, uval, newval);
1417 /* If the take over worked, return 1 */
1418 return ret < 0 ? ret : 1;
1419 }
1420
1421 /*
1422 * First waiter. Set the waiters bit before attaching ourself to
1423 * the owner. If owner tries to unlock, it will be forced into
1424 * the kernel and blocked on hb->lock.
1425 */
1426 newval = uval | FUTEX_WAITERS;
1427 ret = lock_pi_update_atomic(uaddr, uval, newval);
1428 if (ret)
1429 return ret;
1430 /*
1431 * If the update of the user space value succeeded, we try to
1432 * attach to the owner. If that fails, no harm done, we only
1433 * set the FUTEX_WAITERS bit in the user space variable.
1434 */
1435 return attach_to_pi_owner(uaddr, newval, key, ps);
1436}
1437
1438/**
1439 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1440 * @q: The futex_q to unqueue
1441 *
1442 * The q->lock_ptr must not be NULL and must be held by the caller.
1443 */
1444static void __unqueue_futex(struct futex_q *q)
1445{
1446 struct futex_hash_bucket *hb;
1447
1448 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1449 return;
1450 lockdep_assert_held(q->lock_ptr);
1451
1452 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1453 plist_del(&q->list, &hb->chain);
1454 hb_waiters_dec(hb);
1455}
1456
1457/*
1458 * The hash bucket lock must be held when this is called.
1459 * Afterwards, the futex_q must not be accessed. Callers
1460 * must ensure to later call wake_up_q() for the actual
1461 * wakeups to occur.
1462 */
1463static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1464{
1465 struct task_struct *p = q->task;
1466
1467 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1468 return;
1469
1470 get_task_struct(p);
1471 __unqueue_futex(q);
1472 /*
1473 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
1474 * is written, without taking any locks. This is possible in the event
1475 * of a spurious wakeup, for example. A memory barrier is required here
1476 * to prevent the following store to lock_ptr from getting ahead of the
1477 * plist_del in __unqueue_futex().
1478 */
1479 smp_store_release(&q->lock_ptr, NULL);
1480
1481 /*
1482 * Queue the task for later wakeup for after we've released
1483 * the hb->lock. wake_q_add() grabs reference to p.
1484 */
1485 wake_q_add_safe(wake_q, p);
1486}
1487
1488/*
1489 * Caller must hold a reference on @pi_state.
1490 */
1491static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1492{
1493 u32 uninitialized_var(curval), newval;
1494 struct task_struct *new_owner;
1495 bool postunlock = false;
1496 DEFINE_WAKE_Q(wake_q);
1497 int ret = 0;
1498
1499 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1500 if (WARN_ON_ONCE(!new_owner)) {
1501 /*
1502 * As per the comment in futex_unlock_pi() this should not happen.
1503 *
1504 * When this happens, give up our locks and try again, giving
1505 * the futex_lock_pi() instance time to complete, either by
1506 * waiting on the rtmutex or removing itself from the futex
1507 * queue.
1508 */
1509 ret = -EAGAIN;
1510 goto out_unlock;
1511 }
1512
1513 /*
1514 * We pass it to the next owner. The WAITERS bit is always kept
1515 * enabled while there is PI state around. We cleanup the owner
1516 * died bit, because we are the owner.
1517 */
1518 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1519
1520 if (unlikely(should_fail_futex(true)))
1521 ret = -EFAULT;
1522
1523 ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1524 if (!ret && (curval != uval)) {
1525 /*
1526 * If a unconditional UNLOCK_PI operation (user space did not
1527 * try the TID->0 transition) raced with a waiter setting the
1528 * FUTEX_WAITERS flag between get_user() and locking the hash
1529 * bucket lock, retry the operation.
1530 */
1531 if ((FUTEX_TID_MASK & curval) == uval)
1532 ret = -EAGAIN;
1533 else
1534 ret = -EINVAL;
1535 }
1536
1537 if (ret)
1538 goto out_unlock;
1539
1540 /*
1541 * This is a point of no return; once we modify the uval there is no
1542 * going back and subsequent operations must not fail.
1543 */
1544
1545 raw_spin_lock(&pi_state->owner->pi_lock);
1546 WARN_ON(list_empty(&pi_state->list));
1547 list_del_init(&pi_state->list);
1548 raw_spin_unlock(&pi_state->owner->pi_lock);
1549
1550 raw_spin_lock(&new_owner->pi_lock);
1551 WARN_ON(!list_empty(&pi_state->list));
1552 list_add(&pi_state->list, &new_owner->pi_state_list);
1553 pi_state->owner = new_owner;
1554 raw_spin_unlock(&new_owner->pi_lock);
1555
1556 postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1557
1558out_unlock:
1559 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1560
1561 if (postunlock)
1562 rt_mutex_postunlock(&wake_q);
1563
1564 return ret;
1565}
1566
1567/*
1568 * Express the locking dependencies for lockdep:
1569 */
1570static inline void
1571double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1572{
1573 if (hb1 <= hb2) {
1574 spin_lock(&hb1->lock);
1575 if (hb1 < hb2)
1576 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1577 } else { /* hb1 > hb2 */
1578 spin_lock(&hb2->lock);
1579 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1580 }
1581}
1582
1583static inline void
1584double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1585{
1586 spin_unlock(&hb1->lock);
1587 if (hb1 != hb2)
1588 spin_unlock(&hb2->lock);
1589}
1590
1591/*
1592 * Wake up waiters matching bitset queued on this futex (uaddr).
1593 */
1594static int
1595futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1596{
1597 struct futex_hash_bucket *hb;
1598 struct futex_q *this, *next;
1599 union futex_key key = FUTEX_KEY_INIT;
1600 int ret;
1601 DEFINE_WAKE_Q(wake_q);
1602
1603 if (!bitset)
1604 return -EINVAL;
1605
1606 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
1607 if (unlikely(ret != 0))
1608 goto out;
1609
1610 hb = hash_futex(&key);
1611
1612 /* Make sure we really have tasks to wakeup */
1613 if (!hb_waiters_pending(hb))
1614 goto out_put_key;
1615
1616 spin_lock(&hb->lock);
1617
1618 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1619 if (match_futex (&this->key, &key)) {
1620 if (this->pi_state || this->rt_waiter) {
1621 ret = -EINVAL;
1622 break;
1623 }
1624
1625 /* Check if one of the bits is set in both bitsets */
1626 if (!(this->bitset & bitset))
1627 continue;
1628
1629 mark_wake_futex(&wake_q, this);
1630 if (++ret >= nr_wake)
1631 break;
1632 }
1633 }
1634
1635 spin_unlock(&hb->lock);
1636 wake_up_q(&wake_q);
1637out_put_key:
1638 put_futex_key(&key);
1639out:
1640 return ret;
1641}
1642
1643static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1644{
1645 unsigned int op = (encoded_op & 0x70000000) >> 28;
1646 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
1647 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1648 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1649 int oldval, ret;
1650
1651 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1652 if (oparg < 0 || oparg > 31) {
1653 char comm[sizeof(current->comm)];
1654 /*
1655 * kill this print and return -EINVAL when userspace
1656 * is sane again
1657 */
1658 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1659 get_task_comm(comm, current), oparg);
1660 oparg &= 31;
1661 }
1662 oparg = 1 << oparg;
1663 }
1664
1665 if (!access_ok(uaddr, sizeof(u32)))
1666 return -EFAULT;
1667
1668 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1669 if (ret)
1670 return ret;
1671
1672 switch (cmp) {
1673 case FUTEX_OP_CMP_EQ:
1674 return oldval == cmparg;
1675 case FUTEX_OP_CMP_NE:
1676 return oldval != cmparg;
1677 case FUTEX_OP_CMP_LT:
1678 return oldval < cmparg;
1679 case FUTEX_OP_CMP_GE:
1680 return oldval >= cmparg;
1681 case FUTEX_OP_CMP_LE:
1682 return oldval <= cmparg;
1683 case FUTEX_OP_CMP_GT:
1684 return oldval > cmparg;
1685 default:
1686 return -ENOSYS;
1687 }
1688}
1689
1690/*
1691 * Wake up all waiters hashed on the physical page that is mapped
1692 * to this virtual address:
1693 */
1694static int
1695futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1696 int nr_wake, int nr_wake2, int op)
1697{
1698 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1699 struct futex_hash_bucket *hb1, *hb2;
1700 struct futex_q *this, *next;
1701 int ret, op_ret;
1702 DEFINE_WAKE_Q(wake_q);
1703
1704retry:
1705 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1706 if (unlikely(ret != 0))
1707 goto out;
1708 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1709 if (unlikely(ret != 0))
1710 goto out_put_key1;
1711
1712 hb1 = hash_futex(&key1);
1713 hb2 = hash_futex(&key2);
1714
1715retry_private:
1716 double_lock_hb(hb1, hb2);
1717 op_ret = futex_atomic_op_inuser(op, uaddr2);
1718 if (unlikely(op_ret < 0)) {
1719 double_unlock_hb(hb1, hb2);
1720
1721 if (!IS_ENABLED(CONFIG_MMU) ||
1722 unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
1723 /*
1724 * we don't get EFAULT from MMU faults if we don't have
1725 * an MMU, but we might get them from range checking
1726 */
1727 ret = op_ret;
1728 goto out_put_keys;
1729 }
1730
1731 if (op_ret == -EFAULT) {
1732 ret = fault_in_user_writeable(uaddr2);
1733 if (ret)
1734 goto out_put_keys;
1735 }
1736
1737 if (!(flags & FLAGS_SHARED)) {
1738 cond_resched();
1739 goto retry_private;
1740 }
1741
1742 put_futex_key(&key2);
1743 put_futex_key(&key1);
1744 cond_resched();
1745 goto retry;
1746 }
1747
1748 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1749 if (match_futex (&this->key, &key1)) {
1750 if (this->pi_state || this->rt_waiter) {
1751 ret = -EINVAL;
1752 goto out_unlock;
1753 }
1754 mark_wake_futex(&wake_q, this);
1755 if (++ret >= nr_wake)
1756 break;
1757 }
1758 }
1759
1760 if (op_ret > 0) {
1761 op_ret = 0;
1762 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1763 if (match_futex (&this->key, &key2)) {
1764 if (this->pi_state || this->rt_waiter) {
1765 ret = -EINVAL;
1766 goto out_unlock;
1767 }
1768 mark_wake_futex(&wake_q, this);
1769 if (++op_ret >= nr_wake2)
1770 break;
1771 }
1772 }
1773 ret += op_ret;
1774 }
1775
1776out_unlock:
1777 double_unlock_hb(hb1, hb2);
1778 wake_up_q(&wake_q);
1779out_put_keys:
1780 put_futex_key(&key2);
1781out_put_key1:
1782 put_futex_key(&key1);
1783out:
1784 return ret;
1785}
1786
1787/**
1788 * requeue_futex() - Requeue a futex_q from one hb to another
1789 * @q: the futex_q to requeue
1790 * @hb1: the source hash_bucket
1791 * @hb2: the target hash_bucket
1792 * @key2: the new key for the requeued futex_q
1793 */
1794static inline
1795void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1796 struct futex_hash_bucket *hb2, union futex_key *key2)
1797{
1798
1799 /*
1800 * If key1 and key2 hash to the same bucket, no need to
1801 * requeue.
1802 */
1803 if (likely(&hb1->chain != &hb2->chain)) {
1804 plist_del(&q->list, &hb1->chain);
1805 hb_waiters_dec(hb1);
1806 hb_waiters_inc(hb2);
1807 plist_add(&q->list, &hb2->chain);
1808 q->lock_ptr = &hb2->lock;
1809 }
1810 get_futex_key_refs(key2);
1811 q->key = *key2;
1812}
1813
1814/**
1815 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1816 * @q: the futex_q
1817 * @key: the key of the requeue target futex
1818 * @hb: the hash_bucket of the requeue target futex
1819 *
1820 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1821 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1822 * to the requeue target futex so the waiter can detect the wakeup on the right
1823 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1824 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1825 * to protect access to the pi_state to fixup the owner later. Must be called
1826 * with both q->lock_ptr and hb->lock held.
1827 */
1828static inline
1829void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1830 struct futex_hash_bucket *hb)
1831{
1832 get_futex_key_refs(key);
1833 q->key = *key;
1834
1835 __unqueue_futex(q);
1836
1837 WARN_ON(!q->rt_waiter);
1838 q->rt_waiter = NULL;
1839
1840 q->lock_ptr = &hb->lock;
1841
1842 wake_up_state(q->task, TASK_NORMAL);
1843}
1844
1845/**
1846 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1847 * @pifutex: the user address of the to futex
1848 * @hb1: the from futex hash bucket, must be locked by the caller
1849 * @hb2: the to futex hash bucket, must be locked by the caller
1850 * @key1: the from futex key
1851 * @key2: the to futex key
1852 * @ps: address to store the pi_state pointer
1853 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1854 *
1855 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1856 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1857 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1858 * hb1 and hb2 must be held by the caller.
1859 *
1860 * Return:
1861 * - 0 - failed to acquire the lock atomically;
1862 * - >0 - acquired the lock, return value is vpid of the top_waiter
1863 * - <0 - error
1864 */
1865static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1866 struct futex_hash_bucket *hb1,
1867 struct futex_hash_bucket *hb2,
1868 union futex_key *key1, union futex_key *key2,
1869 struct futex_pi_state **ps, int set_waiters)
1870{
1871 struct futex_q *top_waiter = NULL;
1872 u32 curval;
1873 int ret, vpid;
1874
1875 if (get_futex_value_locked(&curval, pifutex))
1876 return -EFAULT;
1877
1878 if (unlikely(should_fail_futex(true)))
1879 return -EFAULT;
1880
1881 /*
1882 * Find the top_waiter and determine if there are additional waiters.
1883 * If the caller intends to requeue more than 1 waiter to pifutex,
1884 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1885 * as we have means to handle the possible fault. If not, don't set
1886 * the bit unecessarily as it will force the subsequent unlock to enter
1887 * the kernel.
1888 */
1889 top_waiter = futex_top_waiter(hb1, key1);
1890
1891 /* There are no waiters, nothing for us to do. */
1892 if (!top_waiter)
1893 return 0;
1894
1895 /* Ensure we requeue to the expected futex. */
1896 if (!match_futex(top_waiter->requeue_pi_key, key2))
1897 return -EINVAL;
1898
1899 /*
1900 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1901 * the contended case or if set_waiters is 1. The pi_state is returned
1902 * in ps in contended cases.
1903 */
1904 vpid = task_pid_vnr(top_waiter->task);
1905 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1906 set_waiters);
1907 if (ret == 1) {
1908 requeue_pi_wake_futex(top_waiter, key2, hb2);
1909 return vpid;
1910 }
1911 return ret;
1912}
1913
1914/**
1915 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1916 * @uaddr1: source futex user address
1917 * @flags: futex flags (FLAGS_SHARED, etc.)
1918 * @uaddr2: target futex user address
1919 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1920 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1921 * @cmpval: @uaddr1 expected value (or %NULL)
1922 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1923 * pi futex (pi to pi requeue is not supported)
1924 *
1925 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1926 * uaddr2 atomically on behalf of the top waiter.
1927 *
1928 * Return:
1929 * - >=0 - on success, the number of tasks requeued or woken;
1930 * - <0 - on error
1931 */
1932static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1933 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1934 u32 *cmpval, int requeue_pi)
1935{
1936 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1937 int drop_count = 0, task_count = 0, ret;
1938 struct futex_pi_state *pi_state = NULL;
1939 struct futex_hash_bucket *hb1, *hb2;
1940 struct futex_q *this, *next;
1941 DEFINE_WAKE_Q(wake_q);
1942
1943 if (nr_wake < 0 || nr_requeue < 0)
1944 return -EINVAL;
1945
1946 /*
1947 * When PI not supported: return -ENOSYS if requeue_pi is true,
1948 * consequently the compiler knows requeue_pi is always false past
1949 * this point which will optimize away all the conditional code
1950 * further down.
1951 */
1952 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
1953 return -ENOSYS;
1954
1955 if (requeue_pi) {
1956 /*
1957 * Requeue PI only works on two distinct uaddrs. This
1958 * check is only valid for private futexes. See below.
1959 */
1960 if (uaddr1 == uaddr2)
1961 return -EINVAL;
1962
1963 /*
1964 * requeue_pi requires a pi_state, try to allocate it now
1965 * without any locks in case it fails.
1966 */
1967 if (refill_pi_state_cache())
1968 return -ENOMEM;
1969 /*
1970 * requeue_pi must wake as many tasks as it can, up to nr_wake
1971 * + nr_requeue, since it acquires the rt_mutex prior to
1972 * returning to userspace, so as to not leave the rt_mutex with
1973 * waiters and no owner. However, second and third wake-ups
1974 * cannot be predicted as they involve race conditions with the
1975 * first wake and a fault while looking up the pi_state. Both
1976 * pthread_cond_signal() and pthread_cond_broadcast() should
1977 * use nr_wake=1.
1978 */
1979 if (nr_wake != 1)
1980 return -EINVAL;
1981 }
1982
1983retry:
1984 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1985 if (unlikely(ret != 0))
1986 goto out;
1987 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1988 requeue_pi ? FUTEX_WRITE : FUTEX_READ);
1989 if (unlikely(ret != 0))
1990 goto out_put_key1;
1991
1992 /*
1993 * The check above which compares uaddrs is not sufficient for
1994 * shared futexes. We need to compare the keys:
1995 */
1996 if (requeue_pi && match_futex(&key1, &key2)) {
1997 ret = -EINVAL;
1998 goto out_put_keys;
1999 }
2000
2001 hb1 = hash_futex(&key1);
2002 hb2 = hash_futex(&key2);
2003
2004retry_private:
2005 hb_waiters_inc(hb2);
2006 double_lock_hb(hb1, hb2);
2007
2008 if (likely(cmpval != NULL)) {
2009 u32 curval;
2010
2011 ret = get_futex_value_locked(&curval, uaddr1);
2012
2013 if (unlikely(ret)) {
2014 double_unlock_hb(hb1, hb2);
2015 hb_waiters_dec(hb2);
2016
2017 ret = get_user(curval, uaddr1);
2018 if (ret)
2019 goto out_put_keys;
2020
2021 if (!(flags & FLAGS_SHARED))
2022 goto retry_private;
2023
2024 put_futex_key(&key2);
2025 put_futex_key(&key1);
2026 goto retry;
2027 }
2028 if (curval != *cmpval) {
2029 ret = -EAGAIN;
2030 goto out_unlock;
2031 }
2032 }
2033
2034 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2035 /*
2036 * Attempt to acquire uaddr2 and wake the top waiter. If we
2037 * intend to requeue waiters, force setting the FUTEX_WAITERS
2038 * bit. We force this here where we are able to easily handle
2039 * faults rather in the requeue loop below.
2040 */
2041 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2042 &key2, &pi_state, nr_requeue);
2043
2044 /*
2045 * At this point the top_waiter has either taken uaddr2 or is
2046 * waiting on it. If the former, then the pi_state will not
2047 * exist yet, look it up one more time to ensure we have a
2048 * reference to it. If the lock was taken, ret contains the
2049 * vpid of the top waiter task.
2050 * If the lock was not taken, we have pi_state and an initial
2051 * refcount on it. In case of an error we have nothing.
2052 */
2053 if (ret > 0) {
2054 WARN_ON(pi_state);
2055 drop_count++;
2056 task_count++;
2057 /*
2058 * If we acquired the lock, then the user space value
2059 * of uaddr2 should be vpid. It cannot be changed by
2060 * the top waiter as it is blocked on hb2 lock if it
2061 * tries to do so. If something fiddled with it behind
2062 * our back the pi state lookup might unearth it. So
2063 * we rather use the known value than rereading and
2064 * handing potential crap to lookup_pi_state.
2065 *
2066 * If that call succeeds then we have pi_state and an
2067 * initial refcount on it.
2068 */
2069 ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
2070 }
2071
2072 switch (ret) {
2073 case 0:
2074 /* We hold a reference on the pi state. */
2075 break;
2076
2077 /* If the above failed, then pi_state is NULL */
2078 case -EFAULT:
2079 double_unlock_hb(hb1, hb2);
2080 hb_waiters_dec(hb2);
2081 put_futex_key(&key2);
2082 put_futex_key(&key1);
2083 ret = fault_in_user_writeable(uaddr2);
2084 if (!ret)
2085 goto retry;
2086 goto out;
2087 case -EAGAIN:
2088 /*
2089 * Two reasons for this:
2090 * - Owner is exiting and we just wait for the
2091 * exit to complete.
2092 * - The user space value changed.
2093 */
2094 double_unlock_hb(hb1, hb2);
2095 hb_waiters_dec(hb2);
2096 put_futex_key(&key2);
2097 put_futex_key(&key1);
2098 cond_resched();
2099 goto retry;
2100 default:
2101 goto out_unlock;
2102 }
2103 }
2104
2105 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2106 if (task_count - nr_wake >= nr_requeue)
2107 break;
2108
2109 if (!match_futex(&this->key, &key1))
2110 continue;
2111
2112 /*
2113 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
2114 * be paired with each other and no other futex ops.
2115 *
2116 * We should never be requeueing a futex_q with a pi_state,
2117 * which is awaiting a futex_unlock_pi().
2118 */
2119 if ((requeue_pi && !this->rt_waiter) ||
2120 (!requeue_pi && this->rt_waiter) ||
2121 this->pi_state) {
2122 ret = -EINVAL;
2123 break;
2124 }
2125
2126 /*
2127 * Wake nr_wake waiters. For requeue_pi, if we acquired the
2128 * lock, we already woke the top_waiter. If not, it will be
2129 * woken by futex_unlock_pi().
2130 */
2131 if (++task_count <= nr_wake && !requeue_pi) {
2132 mark_wake_futex(&wake_q, this);
2133 continue;
2134 }
2135
2136 /* Ensure we requeue to the expected futex for requeue_pi. */
2137 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
2138 ret = -EINVAL;
2139 break;
2140 }
2141
2142 /*
2143 * Requeue nr_requeue waiters and possibly one more in the case
2144 * of requeue_pi if we couldn't acquire the lock atomically.
2145 */
2146 if (requeue_pi) {
2147 /*
2148 * Prepare the waiter to take the rt_mutex. Take a
2149 * refcount on the pi_state and store the pointer in
2150 * the futex_q object of the waiter.
2151 */
2152 get_pi_state(pi_state);
2153 this->pi_state = pi_state;
2154 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
2155 this->rt_waiter,
2156 this->task);
2157 if (ret == 1) {
2158 /*
2159 * We got the lock. We do neither drop the
2160 * refcount on pi_state nor clear
2161 * this->pi_state because the waiter needs the
2162 * pi_state for cleaning up the user space
2163 * value. It will drop the refcount after
2164 * doing so.
2165 */
2166 requeue_pi_wake_futex(this, &key2, hb2);
2167 drop_count++;
2168 continue;
2169 } else if (ret) {
2170 /*
2171 * rt_mutex_start_proxy_lock() detected a
2172 * potential deadlock when we tried to queue
2173 * that waiter. Drop the pi_state reference
2174 * which we took above and remove the pointer
2175 * to the state from the waiters futex_q
2176 * object.
2177 */
2178 this->pi_state = NULL;
2179 put_pi_state(pi_state);
2180 /*
2181 * We stop queueing more waiters and let user
2182 * space deal with the mess.
2183 */
2184 break;
2185 }
2186 }
2187 requeue_futex(this, hb1, hb2, &key2);
2188 drop_count++;
2189 }
2190
2191 /*
2192 * We took an extra initial reference to the pi_state either
2193 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2194 * need to drop it here again.
2195 */
2196 put_pi_state(pi_state);
2197
2198out_unlock:
2199 double_unlock_hb(hb1, hb2);
2200 wake_up_q(&wake_q);
2201 hb_waiters_dec(hb2);
2202
2203 /*
2204 * drop_futex_key_refs() must be called outside the spinlocks. During
2205 * the requeue we moved futex_q's from the hash bucket at key1 to the
2206 * one at key2 and updated their key pointer. We no longer need to
2207 * hold the references to key1.
2208 */
2209 while (--drop_count >= 0)
2210 drop_futex_key_refs(&key1);
2211
2212out_put_keys:
2213 put_futex_key(&key2);
2214out_put_key1:
2215 put_futex_key(&key1);
2216out:
2217 return ret ? ret : task_count;
2218}
2219
2220/* The key must be already stored in q->key. */
2221static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2222 __acquires(&hb->lock)
2223{
2224 struct futex_hash_bucket *hb;
2225
2226 hb = hash_futex(&q->key);
2227
2228 /*
2229 * Increment the counter before taking the lock so that
2230 * a potential waker won't miss a to-be-slept task that is
2231 * waiting for the spinlock. This is safe as all queue_lock()
2232 * users end up calling queue_me(). Similarly, for housekeeping,
2233 * decrement the counter at queue_unlock() when some error has
2234 * occurred and we don't end up adding the task to the list.
2235 */
2236 hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2237
2238 q->lock_ptr = &hb->lock;
2239
2240 spin_lock(&hb->lock);
2241 return hb;
2242}
2243
2244static inline void
2245queue_unlock(struct futex_hash_bucket *hb)
2246 __releases(&hb->lock)
2247{
2248 spin_unlock(&hb->lock);
2249 hb_waiters_dec(hb);
2250}
2251
2252static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2253{
2254 int prio;
2255
2256 /*
2257 * The priority used to register this element is
2258 * - either the real thread-priority for the real-time threads
2259 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2260 * - or MAX_RT_PRIO for non-RT threads.
2261 * Thus, all RT-threads are woken first in priority order, and
2262 * the others are woken last, in FIFO order.
2263 */
2264 prio = min(current->normal_prio, MAX_RT_PRIO);
2265
2266 plist_node_init(&q->list, prio);
2267 plist_add(&q->list, &hb->chain);
2268 q->task = current;
2269}
2270
2271/**
2272 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2273 * @q: The futex_q to enqueue
2274 * @hb: The destination hash bucket
2275 *
2276 * The hb->lock must be held by the caller, and is released here. A call to
2277 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2278 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2279 * or nothing if the unqueue is done as part of the wake process and the unqueue
2280 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2281 * an example).
2282 */
2283static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2284 __releases(&hb->lock)
2285{
2286 __queue_me(q, hb);
2287 spin_unlock(&hb->lock);
2288}
2289
2290/**
2291 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2292 * @q: The futex_q to unqueue
2293 *
2294 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2295 * be paired with exactly one earlier call to queue_me().
2296 *
2297 * Return:
2298 * - 1 - if the futex_q was still queued (and we removed unqueued it);
2299 * - 0 - if the futex_q was already removed by the waking thread
2300 */
2301static int unqueue_me(struct futex_q *q)
2302{
2303 spinlock_t *lock_ptr;
2304 int ret = 0;
2305
2306 /* In the common case we don't take the spinlock, which is nice. */
2307retry:
2308 /*
2309 * q->lock_ptr can change between this read and the following spin_lock.
2310 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2311 * optimizing lock_ptr out of the logic below.
2312 */
2313 lock_ptr = READ_ONCE(q->lock_ptr);
2314 if (lock_ptr != NULL) {
2315 spin_lock(lock_ptr);
2316 /*
2317 * q->lock_ptr can change between reading it and
2318 * spin_lock(), causing us to take the wrong lock. This
2319 * corrects the race condition.
2320 *
2321 * Reasoning goes like this: if we have the wrong lock,
2322 * q->lock_ptr must have changed (maybe several times)
2323 * between reading it and the spin_lock(). It can
2324 * change again after the spin_lock() but only if it was
2325 * already changed before the spin_lock(). It cannot,
2326 * however, change back to the original value. Therefore
2327 * we can detect whether we acquired the correct lock.
2328 */
2329 if (unlikely(lock_ptr != q->lock_ptr)) {
2330 spin_unlock(lock_ptr);
2331 goto retry;
2332 }
2333 __unqueue_futex(q);
2334
2335 BUG_ON(q->pi_state);
2336
2337 spin_unlock(lock_ptr);
2338 ret = 1;
2339 }
2340
2341 drop_futex_key_refs(&q->key);
2342 return ret;
2343}
2344
2345/*
2346 * PI futexes can not be requeued and must remove themself from the
2347 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2348 * and dropped here.
2349 */
2350static void unqueue_me_pi(struct futex_q *q)
2351 __releases(q->lock_ptr)
2352{
2353 __unqueue_futex(q);
2354
2355 BUG_ON(!q->pi_state);
2356 put_pi_state(q->pi_state);
2357 q->pi_state = NULL;
2358
2359 spin_unlock(q->lock_ptr);
2360}
2361
2362static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2363 struct task_struct *argowner)
2364{
2365 struct futex_pi_state *pi_state = q->pi_state;
2366 u32 uval, uninitialized_var(curval), newval;
2367 struct task_struct *oldowner, *newowner;
2368 u32 newtid;
2369 int ret, err = 0;
2370
2371 lockdep_assert_held(q->lock_ptr);
2372
2373 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2374
2375 oldowner = pi_state->owner;
2376
2377 /*
2378 * We are here because either:
2379 *
2380 * - we stole the lock and pi_state->owner needs updating to reflect
2381 * that (@argowner == current),
2382 *
2383 * or:
2384 *
2385 * - someone stole our lock and we need to fix things to point to the
2386 * new owner (@argowner == NULL).
2387 *
2388 * Either way, we have to replace the TID in the user space variable.
2389 * This must be atomic as we have to preserve the owner died bit here.
2390 *
2391 * Note: We write the user space value _before_ changing the pi_state
2392 * because we can fault here. Imagine swapped out pages or a fork
2393 * that marked all the anonymous memory readonly for cow.
2394 *
2395 * Modifying pi_state _before_ the user space value would leave the
2396 * pi_state in an inconsistent state when we fault here, because we
2397 * need to drop the locks to handle the fault. This might be observed
2398 * in the PID check in lookup_pi_state.
2399 */
2400retry:
2401 if (!argowner) {
2402 if (oldowner != current) {
2403 /*
2404 * We raced against a concurrent self; things are
2405 * already fixed up. Nothing to do.
2406 */
2407 ret = 0;
2408 goto out_unlock;
2409 }
2410
2411 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2412 /* We got the lock after all, nothing to fix. */
2413 ret = 0;
2414 goto out_unlock;
2415 }
2416
2417 /*
2418 * Since we just failed the trylock; there must be an owner.
2419 */
2420 newowner = rt_mutex_owner(&pi_state->pi_mutex);
2421 BUG_ON(!newowner);
2422 } else {
2423 WARN_ON_ONCE(argowner != current);
2424 if (oldowner == current) {
2425 /*
2426 * We raced against a concurrent self; things are
2427 * already fixed up. Nothing to do.
2428 */
2429 ret = 0;
2430 goto out_unlock;
2431 }
2432 newowner = argowner;
2433 }
2434
2435 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2436 /* Owner died? */
2437 if (!pi_state->owner)
2438 newtid |= FUTEX_OWNER_DIED;
2439
2440 err = get_futex_value_locked(&uval, uaddr);
2441 if (err)
2442 goto handle_err;
2443
2444 for (;;) {
2445 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2446
2447 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
2448 if (err)
2449 goto handle_err;
2450
2451 if (curval == uval)
2452 break;
2453 uval = curval;
2454 }
2455
2456 /*
2457 * We fixed up user space. Now we need to fix the pi_state
2458 * itself.
2459 */
2460 if (pi_state->owner != NULL) {
2461 raw_spin_lock(&pi_state->owner->pi_lock);
2462 WARN_ON(list_empty(&pi_state->list));
2463 list_del_init(&pi_state->list);
2464 raw_spin_unlock(&pi_state->owner->pi_lock);
2465 }
2466
2467 pi_state->owner = newowner;
2468
2469 raw_spin_lock(&newowner->pi_lock);
2470 WARN_ON(!list_empty(&pi_state->list));
2471 list_add(&pi_state->list, &newowner->pi_state_list);
2472 raw_spin_unlock(&newowner->pi_lock);
2473 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2474
2475 return 0;
2476
2477 /*
2478 * In order to reschedule or handle a page fault, we need to drop the
2479 * locks here. In the case of a fault, this gives the other task
2480 * (either the highest priority waiter itself or the task which stole
2481 * the rtmutex) the chance to try the fixup of the pi_state. So once we
2482 * are back from handling the fault we need to check the pi_state after
2483 * reacquiring the locks and before trying to do another fixup. When
2484 * the fixup has been done already we simply return.
2485 *
2486 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
2487 * drop hb->lock since the caller owns the hb -> futex_q relation.
2488 * Dropping the pi_mutex->wait_lock requires the state revalidate.
2489 */
2490handle_err:
2491 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2492 spin_unlock(q->lock_ptr);
2493
2494 switch (err) {
2495 case -EFAULT:
2496 ret = fault_in_user_writeable(uaddr);
2497 break;
2498
2499 case -EAGAIN:
2500 cond_resched();
2501 ret = 0;
2502 break;
2503
2504 default:
2505 WARN_ON_ONCE(1);
2506 ret = err;
2507 break;
2508 }
2509
2510 spin_lock(q->lock_ptr);
2511 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2512
2513 /*
2514 * Check if someone else fixed it for us:
2515 */
2516 if (pi_state->owner != oldowner) {
2517 ret = 0;
2518 goto out_unlock;
2519 }
2520
2521 if (ret)
2522 goto out_unlock;
2523
2524 goto retry;
2525
2526out_unlock:
2527 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2528 return ret;
2529}
2530
2531static long futex_wait_restart(struct restart_block *restart);
2532
2533/**
2534 * fixup_owner() - Post lock pi_state and corner case management
2535 * @uaddr: user address of the futex
2536 * @q: futex_q (contains pi_state and access to the rt_mutex)
2537 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2538 *
2539 * After attempting to lock an rt_mutex, this function is called to cleanup
2540 * the pi_state owner as well as handle race conditions that may allow us to
2541 * acquire the lock. Must be called with the hb lock held.
2542 *
2543 * Return:
2544 * - 1 - success, lock taken;
2545 * - 0 - success, lock not taken;
2546 * - <0 - on error (-EFAULT)
2547 */
2548static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2549{
2550 int ret = 0;
2551
2552 if (locked) {
2553 /*
2554 * Got the lock. We might not be the anticipated owner if we
2555 * did a lock-steal - fix up the PI-state in that case:
2556 *
2557 * Speculative pi_state->owner read (we don't hold wait_lock);
2558 * since we own the lock pi_state->owner == current is the
2559 * stable state, anything else needs more attention.
2560 */
2561 if (q->pi_state->owner != current)
2562 ret = fixup_pi_state_owner(uaddr, q, current);
2563 goto out;
2564 }
2565
2566 /*
2567 * If we didn't get the lock; check if anybody stole it from us. In
2568 * that case, we need to fix up the uval to point to them instead of
2569 * us, otherwise bad things happen. [10]
2570 *
2571 * Another speculative read; pi_state->owner == current is unstable
2572 * but needs our attention.
2573 */
2574 if (q->pi_state->owner == current) {
2575 ret = fixup_pi_state_owner(uaddr, q, NULL);
2576 goto out;
2577 }
2578
2579 /*
2580 * Paranoia check. If we did not take the lock, then we should not be
2581 * the owner of the rt_mutex.
2582 */
2583 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
2584 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2585 "pi-state %p\n", ret,
2586 q->pi_state->pi_mutex.owner,
2587 q->pi_state->owner);
2588 }
2589
2590out:
2591 return ret ? ret : locked;
2592}
2593
2594/**
2595 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2596 * @hb: the futex hash bucket, must be locked by the caller
2597 * @q: the futex_q to queue up on
2598 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2599 */
2600static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2601 struct hrtimer_sleeper *timeout)
2602{
2603 /*
2604 * The task state is guaranteed to be set before another task can
2605 * wake it. set_current_state() is implemented using smp_store_mb() and
2606 * queue_me() calls spin_unlock() upon completion, both serializing
2607 * access to the hash list and forcing another memory barrier.
2608 */
2609 set_current_state(TASK_INTERRUPTIBLE);
2610 queue_me(q, hb);
2611
2612 /* Arm the timer */
2613 if (timeout)
2614 hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
2615
2616 /*
2617 * If we have been removed from the hash list, then another task
2618 * has tried to wake us, and we can skip the call to schedule().
2619 */
2620 if (likely(!plist_node_empty(&q->list))) {
2621 /*
2622 * If the timer has already expired, current will already be
2623 * flagged for rescheduling. Only call schedule if there
2624 * is no timeout, or if it has yet to expire.
2625 */
2626 if (!timeout || timeout->task)
2627 freezable_schedule();
2628 }
2629 __set_current_state(TASK_RUNNING);
2630}
2631
2632/**
2633 * futex_wait_setup() - Prepare to wait on a futex
2634 * @uaddr: the futex userspace address
2635 * @val: the expected value
2636 * @flags: futex flags (FLAGS_SHARED, etc.)
2637 * @q: the associated futex_q
2638 * @hb: storage for hash_bucket pointer to be returned to caller
2639 *
2640 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2641 * compare it with the expected value. Handle atomic faults internally.
2642 * Return with the hb lock held and a q.key reference on success, and unlocked
2643 * with no q.key reference on failure.
2644 *
2645 * Return:
2646 * - 0 - uaddr contains val and hb has been locked;
2647 * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2648 */
2649static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2650 struct futex_q *q, struct futex_hash_bucket **hb)
2651{
2652 u32 uval;
2653 int ret;
2654
2655 /*
2656 * Access the page AFTER the hash-bucket is locked.
2657 * Order is important:
2658 *
2659 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2660 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2661 *
2662 * The basic logical guarantee of a futex is that it blocks ONLY
2663 * if cond(var) is known to be true at the time of blocking, for
2664 * any cond. If we locked the hash-bucket after testing *uaddr, that
2665 * would open a race condition where we could block indefinitely with
2666 * cond(var) false, which would violate the guarantee.
2667 *
2668 * On the other hand, we insert q and release the hash-bucket only
2669 * after testing *uaddr. This guarantees that futex_wait() will NOT
2670 * absorb a wakeup if *uaddr does not match the desired values
2671 * while the syscall executes.
2672 */
2673retry:
2674 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2675 if (unlikely(ret != 0))
2676 return ret;
2677
2678retry_private:
2679 *hb = queue_lock(q);
2680
2681 ret = get_futex_value_locked(&uval, uaddr);
2682
2683 if (ret) {
2684 queue_unlock(*hb);
2685
2686 ret = get_user(uval, uaddr);
2687 if (ret)
2688 goto out;
2689
2690 if (!(flags & FLAGS_SHARED))
2691 goto retry_private;
2692
2693 put_futex_key(&q->key);
2694 goto retry;
2695 }
2696
2697 if (uval != val) {
2698 queue_unlock(*hb);
2699 ret = -EWOULDBLOCK;
2700 }
2701
2702out:
2703 if (ret)
2704 put_futex_key(&q->key);
2705 return ret;
2706}
2707
2708static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2709 ktime_t *abs_time, u32 bitset)
2710{
2711 struct hrtimer_sleeper timeout, *to;
2712 struct restart_block *restart;
2713 struct futex_hash_bucket *hb;
2714 struct futex_q q = futex_q_init;
2715 int ret;
2716
2717 if (!bitset)
2718 return -EINVAL;
2719 q.bitset = bitset;
2720
2721 to = futex_setup_timer(abs_time, &timeout, flags,
2722 current->timer_slack_ns);
2723retry:
2724 /*
2725 * Prepare to wait on uaddr. On success, holds hb lock and increments
2726 * q.key refs.
2727 */
2728 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2729 if (ret)
2730 goto out;
2731
2732 /* queue_me and wait for wakeup, timeout, or a signal. */
2733 futex_wait_queue_me(hb, &q, to);
2734
2735 /* If we were woken (and unqueued), we succeeded, whatever. */
2736 ret = 0;
2737 /* unqueue_me() drops q.key ref */
2738 if (!unqueue_me(&q))
2739 goto out;
2740 ret = -ETIMEDOUT;
2741 if (to && !to->task)
2742 goto out;
2743
2744 /*
2745 * We expect signal_pending(current), but we might be the
2746 * victim of a spurious wakeup as well.
2747 */
2748 if (!signal_pending(current))
2749 goto retry;
2750
2751 ret = -ERESTARTSYS;
2752 if (!abs_time)
2753 goto out;
2754
2755 restart = ¤t->restart_block;
2756 restart->fn = futex_wait_restart;
2757 restart->futex.uaddr = uaddr;
2758 restart->futex.val = val;
2759 restart->futex.time = *abs_time;
2760 restart->futex.bitset = bitset;
2761 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2762
2763 ret = -ERESTART_RESTARTBLOCK;
2764
2765out:
2766 if (to) {
2767 hrtimer_cancel(&to->timer);
2768 destroy_hrtimer_on_stack(&to->timer);
2769 }
2770 return ret;
2771}
2772
2773
2774static long futex_wait_restart(struct restart_block *restart)
2775{
2776 u32 __user *uaddr = restart->futex.uaddr;
2777 ktime_t t, *tp = NULL;
2778
2779 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2780 t = restart->futex.time;
2781 tp = &t;
2782 }
2783 restart->fn = do_no_restart_syscall;
2784
2785 return (long)futex_wait(uaddr, restart->futex.flags,
2786 restart->futex.val, tp, restart->futex.bitset);
2787}
2788
2789
2790/*
2791 * Userspace tried a 0 -> TID atomic transition of the futex value
2792 * and failed. The kernel side here does the whole locking operation:
2793 * if there are waiters then it will block as a consequence of relying
2794 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2795 * a 0 value of the futex too.).
2796 *
2797 * Also serves as futex trylock_pi()'ing, and due semantics.
2798 */
2799static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2800 ktime_t *time, int trylock)
2801{
2802 struct hrtimer_sleeper timeout, *to;
2803 struct futex_pi_state *pi_state = NULL;
2804 struct rt_mutex_waiter rt_waiter;
2805 struct futex_hash_bucket *hb;
2806 struct futex_q q = futex_q_init;
2807 int res, ret;
2808
2809 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2810 return -ENOSYS;
2811
2812 if (refill_pi_state_cache())
2813 return -ENOMEM;
2814
2815 to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
2816
2817retry:
2818 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2819 if (unlikely(ret != 0))
2820 goto out;
2821
2822retry_private:
2823 hb = queue_lock(&q);
2824
2825 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2826 if (unlikely(ret)) {
2827 /*
2828 * Atomic work succeeded and we got the lock,
2829 * or failed. Either way, we do _not_ block.
2830 */
2831 switch (ret) {
2832 case 1:
2833 /* We got the lock. */
2834 ret = 0;
2835 goto out_unlock_put_key;
2836 case -EFAULT:
2837 goto uaddr_faulted;
2838 case -EAGAIN:
2839 /*
2840 * Two reasons for this:
2841 * - Task is exiting and we just wait for the
2842 * exit to complete.
2843 * - The user space value changed.
2844 */
2845 queue_unlock(hb);
2846 put_futex_key(&q.key);
2847 cond_resched();
2848 goto retry;
2849 default:
2850 goto out_unlock_put_key;
2851 }
2852 }
2853
2854 WARN_ON(!q.pi_state);
2855
2856 /*
2857 * Only actually queue now that the atomic ops are done:
2858 */
2859 __queue_me(&q, hb);
2860
2861 if (trylock) {
2862 ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2863 /* Fixup the trylock return value: */
2864 ret = ret ? 0 : -EWOULDBLOCK;
2865 goto no_block;
2866 }
2867
2868 rt_mutex_init_waiter(&rt_waiter);
2869
2870 /*
2871 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
2872 * hold it while doing rt_mutex_start_proxy(), because then it will
2873 * include hb->lock in the blocking chain, even through we'll not in
2874 * fact hold it while blocking. This will lead it to report -EDEADLK
2875 * and BUG when futex_unlock_pi() interleaves with this.
2876 *
2877 * Therefore acquire wait_lock while holding hb->lock, but drop the
2878 * latter before calling __rt_mutex_start_proxy_lock(). This
2879 * interleaves with futex_unlock_pi() -- which does a similar lock
2880 * handoff -- such that the latter can observe the futex_q::pi_state
2881 * before __rt_mutex_start_proxy_lock() is done.
2882 */
2883 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
2884 spin_unlock(q.lock_ptr);
2885 /*
2886 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
2887 * such that futex_unlock_pi() is guaranteed to observe the waiter when
2888 * it sees the futex_q::pi_state.
2889 */
2890 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2891 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
2892
2893 if (ret) {
2894 if (ret == 1)
2895 ret = 0;
2896 goto cleanup;
2897 }
2898
2899 if (unlikely(to))
2900 hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
2901
2902 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2903
2904cleanup:
2905 spin_lock(q.lock_ptr);
2906 /*
2907 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2908 * first acquire the hb->lock before removing the lock from the
2909 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
2910 * lists consistent.
2911 *
2912 * In particular; it is important that futex_unlock_pi() can not
2913 * observe this inconsistency.
2914 */
2915 if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
2916 ret = 0;
2917
2918no_block:
2919 /*
2920 * Fixup the pi_state owner and possibly acquire the lock if we
2921 * haven't already.
2922 */
2923 res = fixup_owner(uaddr, &q, !ret);
2924 /*
2925 * If fixup_owner() returned an error, proprogate that. If it acquired
2926 * the lock, clear our -ETIMEDOUT or -EINTR.
2927 */
2928 if (res)
2929 ret = (res < 0) ? res : 0;
2930
2931 /*
2932 * If fixup_owner() faulted and was unable to handle the fault, unlock
2933 * it and return the fault to userspace.
2934 */
2935 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
2936 pi_state = q.pi_state;
2937 get_pi_state(pi_state);
2938 }
2939
2940 /* Unqueue and drop the lock */
2941 unqueue_me_pi(&q);
2942
2943 if (pi_state) {
2944 rt_mutex_futex_unlock(&pi_state->pi_mutex);
2945 put_pi_state(pi_state);
2946 }
2947
2948 goto out_put_key;
2949
2950out_unlock_put_key:
2951 queue_unlock(hb);
2952
2953out_put_key:
2954 put_futex_key(&q.key);
2955out:
2956 if (to) {
2957 hrtimer_cancel(&to->timer);
2958 destroy_hrtimer_on_stack(&to->timer);
2959 }
2960 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2961
2962uaddr_faulted:
2963 queue_unlock(hb);
2964
2965 ret = fault_in_user_writeable(uaddr);
2966 if (ret)
2967 goto out_put_key;
2968
2969 if (!(flags & FLAGS_SHARED))
2970 goto retry_private;
2971
2972 put_futex_key(&q.key);
2973 goto retry;
2974}
2975
2976/*
2977 * Userspace attempted a TID -> 0 atomic transition, and failed.
2978 * This is the in-kernel slowpath: we look up the PI state (if any),
2979 * and do the rt-mutex unlock.
2980 */
2981static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2982{
2983 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2984 union futex_key key = FUTEX_KEY_INIT;
2985 struct futex_hash_bucket *hb;
2986 struct futex_q *top_waiter;
2987 int ret;
2988
2989 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2990 return -ENOSYS;
2991
2992retry:
2993 if (get_user(uval, uaddr))
2994 return -EFAULT;
2995 /*
2996 * We release only a lock we actually own:
2997 */
2998 if ((uval & FUTEX_TID_MASK) != vpid)
2999 return -EPERM;
3000
3001 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
3002 if (ret)
3003 return ret;
3004
3005 hb = hash_futex(&key);
3006 spin_lock(&hb->lock);
3007
3008 /*
3009 * Check waiters first. We do not trust user space values at
3010 * all and we at least want to know if user space fiddled
3011 * with the futex value instead of blindly unlocking.
3012 */
3013 top_waiter = futex_top_waiter(hb, &key);
3014 if (top_waiter) {
3015 struct futex_pi_state *pi_state = top_waiter->pi_state;
3016
3017 ret = -EINVAL;
3018 if (!pi_state)
3019 goto out_unlock;
3020
3021 /*
3022 * If current does not own the pi_state then the futex is
3023 * inconsistent and user space fiddled with the futex value.
3024 */
3025 if (pi_state->owner != current)
3026 goto out_unlock;
3027
3028 get_pi_state(pi_state);
3029 /*
3030 * By taking wait_lock while still holding hb->lock, we ensure
3031 * there is no point where we hold neither; and therefore
3032 * wake_futex_pi() must observe a state consistent with what we
3033 * observed.
3034 *
3035 * In particular; this forces __rt_mutex_start_proxy() to
3036 * complete such that we're guaranteed to observe the
3037 * rt_waiter. Also see the WARN in wake_futex_pi().
3038 */
3039 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3040 spin_unlock(&hb->lock);
3041
3042 /* drops pi_state->pi_mutex.wait_lock */
3043 ret = wake_futex_pi(uaddr, uval, pi_state);
3044
3045 put_pi_state(pi_state);
3046
3047 /*
3048 * Success, we're done! No tricky corner cases.
3049 */
3050 if (!ret)
3051 goto out_putkey;
3052 /*
3053 * The atomic access to the futex value generated a
3054 * pagefault, so retry the user-access and the wakeup:
3055 */
3056 if (ret == -EFAULT)
3057 goto pi_faulted;
3058 /*
3059 * A unconditional UNLOCK_PI op raced against a waiter
3060 * setting the FUTEX_WAITERS bit. Try again.
3061 */
3062 if (ret == -EAGAIN)
3063 goto pi_retry;
3064 /*
3065 * wake_futex_pi has detected invalid state. Tell user
3066 * space.
3067 */
3068 goto out_putkey;
3069 }
3070
3071 /*
3072 * We have no kernel internal state, i.e. no waiters in the
3073 * kernel. Waiters which are about to queue themselves are stuck
3074 * on hb->lock. So we can safely ignore them. We do neither
3075 * preserve the WAITERS bit not the OWNER_DIED one. We are the
3076 * owner.
3077 */
3078 if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
3079 spin_unlock(&hb->lock);
3080 switch (ret) {
3081 case -EFAULT:
3082 goto pi_faulted;
3083
3084 case -EAGAIN:
3085 goto pi_retry;
3086
3087 default:
3088 WARN_ON_ONCE(1);
3089 goto out_putkey;
3090 }
3091 }
3092
3093 /*
3094 * If uval has changed, let user space handle it.
3095 */
3096 ret = (curval == uval) ? 0 : -EAGAIN;
3097
3098out_unlock:
3099 spin_unlock(&hb->lock);
3100out_putkey:
3101 put_futex_key(&key);
3102 return ret;
3103
3104pi_retry:
3105 put_futex_key(&key);
3106 cond_resched();
3107 goto retry;
3108
3109pi_faulted:
3110 put_futex_key(&key);
3111
3112 ret = fault_in_user_writeable(uaddr);
3113 if (!ret)
3114 goto retry;
3115
3116 return ret;
3117}
3118
3119/**
3120 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
3121 * @hb: the hash_bucket futex_q was original enqueued on
3122 * @q: the futex_q woken while waiting to be requeued
3123 * @key2: the futex_key of the requeue target futex
3124 * @timeout: the timeout associated with the wait (NULL if none)
3125 *
3126 * Detect if the task was woken on the initial futex as opposed to the requeue
3127 * target futex. If so, determine if it was a timeout or a signal that caused
3128 * the wakeup and return the appropriate error code to the caller. Must be
3129 * called with the hb lock held.
3130 *
3131 * Return:
3132 * - 0 = no early wakeup detected;
3133 * - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3134 */
3135static inline
3136int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
3137 struct futex_q *q, union futex_key *key2,
3138 struct hrtimer_sleeper *timeout)
3139{
3140 int ret = 0;
3141
3142 /*
3143 * With the hb lock held, we avoid races while we process the wakeup.
3144 * We only need to hold hb (and not hb2) to ensure atomicity as the
3145 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
3146 * It can't be requeued from uaddr2 to something else since we don't
3147 * support a PI aware source futex for requeue.
3148 */
3149 if (!match_futex(&q->key, key2)) {
3150 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
3151 /*
3152 * We were woken prior to requeue by a timeout or a signal.
3153 * Unqueue the futex_q and determine which it was.
3154 */
3155 plist_del(&q->list, &hb->chain);
3156 hb_waiters_dec(hb);
3157
3158 /* Handle spurious wakeups gracefully */
3159 ret = -EWOULDBLOCK;
3160 if (timeout && !timeout->task)
3161 ret = -ETIMEDOUT;
3162 else if (signal_pending(current))
3163 ret = -ERESTARTNOINTR;
3164 }
3165 return ret;
3166}
3167
3168/**
3169 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3170 * @uaddr: the futex we initially wait on (non-pi)
3171 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3172 * the same type, no requeueing from private to shared, etc.
3173 * @val: the expected value of uaddr
3174 * @abs_time: absolute timeout
3175 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
3176 * @uaddr2: the pi futex we will take prior to returning to user-space
3177 *
3178 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3179 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
3180 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
3181 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
3182 * without one, the pi logic would not know which task to boost/deboost, if
3183 * there was a need to.
3184 *
3185 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3186 * via the following--
3187 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3188 * 2) wakeup on uaddr2 after a requeue
3189 * 3) signal
3190 * 4) timeout
3191 *
3192 * If 3, cleanup and return -ERESTARTNOINTR.
3193 *
3194 * If 2, we may then block on trying to take the rt_mutex and return via:
3195 * 5) successful lock
3196 * 6) signal
3197 * 7) timeout
3198 * 8) other lock acquisition failure
3199 *
3200 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3201 *
3202 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
3203 *
3204 * Return:
3205 * - 0 - On success;
3206 * - <0 - On error
3207 */
3208static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3209 u32 val, ktime_t *abs_time, u32 bitset,
3210 u32 __user *uaddr2)
3211{
3212 struct hrtimer_sleeper timeout, *to;
3213 struct futex_pi_state *pi_state = NULL;
3214 struct rt_mutex_waiter rt_waiter;
3215 struct futex_hash_bucket *hb;
3216 union futex_key key2 = FUTEX_KEY_INIT;
3217 struct futex_q q = futex_q_init;
3218 int res, ret;
3219
3220 if (!IS_ENABLED(CONFIG_FUTEX_PI))
3221 return -ENOSYS;
3222
3223 if (uaddr == uaddr2)
3224 return -EINVAL;
3225
3226 if (!bitset)
3227 return -EINVAL;
3228
3229 to = futex_setup_timer(abs_time, &timeout, flags,
3230 current->timer_slack_ns);
3231
3232 /*
3233 * The waiter is allocated on our stack, manipulated by the requeue
3234 * code while we sleep on uaddr.
3235 */
3236 rt_mutex_init_waiter(&rt_waiter);
3237
3238 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3239 if (unlikely(ret != 0))
3240 goto out;
3241
3242 q.bitset = bitset;
3243 q.rt_waiter = &rt_waiter;
3244 q.requeue_pi_key = &key2;
3245
3246 /*
3247 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
3248 * count.
3249 */
3250 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
3251 if (ret)
3252 goto out_key2;
3253
3254 /*
3255 * The check above which compares uaddrs is not sufficient for
3256 * shared futexes. We need to compare the keys:
3257 */
3258 if (match_futex(&q.key, &key2)) {
3259 queue_unlock(hb);
3260 ret = -EINVAL;
3261 goto out_put_keys;
3262 }
3263
3264 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
3265 futex_wait_queue_me(hb, &q, to);
3266
3267 spin_lock(&hb->lock);
3268 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
3269 spin_unlock(&hb->lock);
3270 if (ret)
3271 goto out_put_keys;
3272
3273 /*
3274 * In order for us to be here, we know our q.key == key2, and since
3275 * we took the hb->lock above, we also know that futex_requeue() has
3276 * completed and we no longer have to concern ourselves with a wakeup
3277 * race with the atomic proxy lock acquisition by the requeue code. The
3278 * futex_requeue dropped our key1 reference and incremented our key2
3279 * reference count.
3280 */
3281
3282 /* Check if the requeue code acquired the second futex for us. */
3283 if (!q.rt_waiter) {
3284 /*
3285 * Got the lock. We might not be the anticipated owner if we
3286 * did a lock-steal - fix up the PI-state in that case.
3287 */
3288 if (q.pi_state && (q.pi_state->owner != current)) {
3289 spin_lock(q.lock_ptr);
3290 ret = fixup_pi_state_owner(uaddr2, &q, current);
3291 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3292 pi_state = q.pi_state;
3293 get_pi_state(pi_state);
3294 }
3295 /*
3296 * Drop the reference to the pi state which
3297 * the requeue_pi() code acquired for us.
3298 */
3299 put_pi_state(q.pi_state);
3300 spin_unlock(q.lock_ptr);
3301 }
3302 } else {
3303 struct rt_mutex *pi_mutex;
3304
3305 /*
3306 * We have been woken up by futex_unlock_pi(), a timeout, or a
3307 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
3308 * the pi_state.
3309 */
3310 WARN_ON(!q.pi_state);
3311 pi_mutex = &q.pi_state->pi_mutex;
3312 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3313
3314 spin_lock(q.lock_ptr);
3315 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
3316 ret = 0;
3317
3318 debug_rt_mutex_free_waiter(&rt_waiter);
3319 /*
3320 * Fixup the pi_state owner and possibly acquire the lock if we
3321 * haven't already.
3322 */
3323 res = fixup_owner(uaddr2, &q, !ret);
3324 /*
3325 * If fixup_owner() returned an error, proprogate that. If it
3326 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3327 */
3328 if (res)
3329 ret = (res < 0) ? res : 0;
3330
3331 /*
3332 * If fixup_pi_state_owner() faulted and was unable to handle
3333 * the fault, unlock the rt_mutex and return the fault to
3334 * userspace.
3335 */
3336 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3337 pi_state = q.pi_state;
3338 get_pi_state(pi_state);
3339 }
3340
3341 /* Unqueue and drop the lock. */
3342 unqueue_me_pi(&q);
3343 }
3344
3345 if (pi_state) {
3346 rt_mutex_futex_unlock(&pi_state->pi_mutex);
3347 put_pi_state(pi_state);
3348 }
3349
3350 if (ret == -EINTR) {
3351 /*
3352 * We've already been requeued, but cannot restart by calling
3353 * futex_lock_pi() directly. We could restart this syscall, but
3354 * it would detect that the user space "val" changed and return
3355 * -EWOULDBLOCK. Save the overhead of the restart and return
3356 * -EWOULDBLOCK directly.
3357 */
3358 ret = -EWOULDBLOCK;
3359 }
3360
3361out_put_keys:
3362 put_futex_key(&q.key);
3363out_key2:
3364 put_futex_key(&key2);
3365
3366out:
3367 if (to) {
3368 hrtimer_cancel(&to->timer);
3369 destroy_hrtimer_on_stack(&to->timer);
3370 }
3371 return ret;
3372}
3373
3374/*
3375 * Support for robust futexes: the kernel cleans up held futexes at
3376 * thread exit time.
3377 *
3378 * Implementation: user-space maintains a per-thread list of locks it
3379 * is holding. Upon do_exit(), the kernel carefully walks this list,
3380 * and marks all locks that are owned by this thread with the
3381 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3382 * always manipulated with the lock held, so the list is private and
3383 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3384 * field, to allow the kernel to clean up if the thread dies after
3385 * acquiring the lock, but just before it could have added itself to
3386 * the list. There can only be one such pending lock.
3387 */
3388
3389/**
3390 * sys_set_robust_list() - Set the robust-futex list head of a task
3391 * @head: pointer to the list-head
3392 * @len: length of the list-head, as userspace expects
3393 */
3394SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
3395 size_t, len)
3396{
3397 if (!futex_cmpxchg_enabled)
3398 return -ENOSYS;
3399 /*
3400 * The kernel knows only one size for now:
3401 */
3402 if (unlikely(len != sizeof(*head)))
3403 return -EINVAL;
3404
3405 current->robust_list = head;
3406
3407 return 0;
3408}
3409
3410/**
3411 * sys_get_robust_list() - Get the robust-futex list head of a task
3412 * @pid: pid of the process [zero for current task]
3413 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3414 * @len_ptr: pointer to a length field, the kernel fills in the header size
3415 */
3416SYSCALL_DEFINE3(get_robust_list, int, pid,
3417 struct robust_list_head __user * __user *, head_ptr,
3418 size_t __user *, len_ptr)
3419{
3420 struct robust_list_head __user *head;
3421 unsigned long ret;
3422 struct task_struct *p;
3423
3424 if (!futex_cmpxchg_enabled)
3425 return -ENOSYS;
3426
3427 rcu_read_lock();
3428
3429 ret = -ESRCH;
3430 if (!pid)
3431 p = current;
3432 else {
3433 p = find_task_by_vpid(pid);
3434 if (!p)
3435 goto err_unlock;
3436 }
3437
3438 ret = -EPERM;
3439 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3440 goto err_unlock;
3441
3442 head = p->robust_list;
3443 rcu_read_unlock();
3444
3445 if (put_user(sizeof(*head), len_ptr))
3446 return -EFAULT;
3447 return put_user(head, head_ptr);
3448
3449err_unlock:
3450 rcu_read_unlock();
3451
3452 return ret;
3453}
3454
3455/*
3456 * Process a futex-list entry, check whether it's owned by the
3457 * dying task, and do notification if so:
3458 */
3459static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3460{
3461 u32 uval, uninitialized_var(nval), mval;
3462 int err;
3463
3464 /* Futex address must be 32bit aligned */
3465 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
3466 return -1;
3467
3468retry:
3469 if (get_user(uval, uaddr))
3470 return -1;
3471
3472 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
3473 return 0;
3474
3475 /*
3476 * Ok, this dying thread is truly holding a futex
3477 * of interest. Set the OWNER_DIED bit atomically
3478 * via cmpxchg, and if the value had FUTEX_WAITERS
3479 * set, wake up a waiter (if any). (We have to do a
3480 * futex_wake() even if OWNER_DIED is already set -
3481 * to handle the rare but possible case of recursive
3482 * thread-death.) The rest of the cleanup is done in
3483 * userspace.
3484 */
3485 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3486
3487 /*
3488 * We are not holding a lock here, but we want to have
3489 * the pagefault_disable/enable() protection because
3490 * we want to handle the fault gracefully. If the
3491 * access fails we try to fault in the futex with R/W
3492 * verification via get_user_pages. get_user() above
3493 * does not guarantee R/W access. If that fails we
3494 * give up and leave the futex locked.
3495 */
3496 if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
3497 switch (err) {
3498 case -EFAULT:
3499 if (fault_in_user_writeable(uaddr))
3500 return -1;
3501 goto retry;
3502
3503 case -EAGAIN:
3504 cond_resched();
3505 goto retry;
3506
3507 default:
3508 WARN_ON_ONCE(1);
3509 return err;
3510 }
3511 }
3512
3513 if (nval != uval)
3514 goto retry;
3515
3516 /*
3517 * Wake robust non-PI futexes here. The wakeup of
3518 * PI futexes happens in exit_pi_state():
3519 */
3520 if (!pi && (uval & FUTEX_WAITERS))
3521 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3522
3523 return 0;
3524}
3525
3526/*
3527 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3528 */
3529static inline int fetch_robust_entry(struct robust_list __user **entry,
3530 struct robust_list __user * __user *head,
3531 unsigned int *pi)
3532{
3533 unsigned long uentry;
3534
3535 if (get_user(uentry, (unsigned long __user *)head))
3536 return -EFAULT;
3537
3538 *entry = (void __user *)(uentry & ~1UL);
3539 *pi = uentry & 1;
3540
3541 return 0;
3542}
3543
3544/*
3545 * Walk curr->robust_list (very carefully, it's a userspace list!)
3546 * and mark any locks found there dead, and notify any waiters.
3547 *
3548 * We silently return on any sign of list-walking problem.
3549 */
3550void exit_robust_list(struct task_struct *curr)
3551{
3552 struct robust_list_head __user *head = curr->robust_list;
3553 struct robust_list __user *entry, *next_entry, *pending;
3554 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3555 unsigned int uninitialized_var(next_pi);
3556 unsigned long futex_offset;
3557 int rc;
3558
3559 if (!futex_cmpxchg_enabled)
3560 return;
3561
3562 /*
3563 * Fetch the list head (which was registered earlier, via
3564 * sys_set_robust_list()):
3565 */
3566 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3567 return;
3568 /*
3569 * Fetch the relative futex offset:
3570 */
3571 if (get_user(futex_offset, &head->futex_offset))
3572 return;
3573 /*
3574 * Fetch any possibly pending lock-add first, and handle it
3575 * if it exists:
3576 */
3577 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3578 return;
3579
3580 next_entry = NULL; /* avoid warning with gcc */
3581 while (entry != &head->list) {
3582 /*
3583 * Fetch the next entry in the list before calling
3584 * handle_futex_death:
3585 */
3586 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3587 /*
3588 * A pending lock might already be on the list, so
3589 * don't process it twice:
3590 */
3591 if (entry != pending)
3592 if (handle_futex_death((void __user *)entry + futex_offset,
3593 curr, pi))
3594 return;
3595 if (rc)
3596 return;
3597 entry = next_entry;
3598 pi = next_pi;
3599 /*
3600 * Avoid excessively long or circular lists:
3601 */
3602 if (!--limit)
3603 break;
3604
3605 cond_resched();
3606 }
3607
3608 if (pending)
3609 handle_futex_death((void __user *)pending + futex_offset,
3610 curr, pip);
3611}
3612
3613long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3614 u32 __user *uaddr2, u32 val2, u32 val3)
3615{
3616 int cmd = op & FUTEX_CMD_MASK;
3617 unsigned int flags = 0;
3618
3619 if (!(op & FUTEX_PRIVATE_FLAG))
3620 flags |= FLAGS_SHARED;
3621
3622 if (op & FUTEX_CLOCK_REALTIME) {
3623 flags |= FLAGS_CLOCKRT;
3624 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3625 cmd != FUTEX_WAIT_REQUEUE_PI)
3626 return -ENOSYS;
3627 }
3628
3629 switch (cmd) {
3630 case FUTEX_LOCK_PI:
3631 case FUTEX_UNLOCK_PI:
3632 case FUTEX_TRYLOCK_PI:
3633 case FUTEX_WAIT_REQUEUE_PI:
3634 case FUTEX_CMP_REQUEUE_PI:
3635 if (!futex_cmpxchg_enabled)
3636 return -ENOSYS;
3637 }
3638
3639 switch (cmd) {
3640 case FUTEX_WAIT:
3641 val3 = FUTEX_BITSET_MATCH_ANY;
3642 /* fall through */
3643 case FUTEX_WAIT_BITSET:
3644 return futex_wait(uaddr, flags, val, timeout, val3);
3645 case FUTEX_WAKE:
3646 val3 = FUTEX_BITSET_MATCH_ANY;
3647 /* fall through */
3648 case FUTEX_WAKE_BITSET:
3649 return futex_wake(uaddr, flags, val, val3);
3650 case FUTEX_REQUEUE:
3651 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3652 case FUTEX_CMP_REQUEUE:
3653 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3654 case FUTEX_WAKE_OP:
3655 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3656 case FUTEX_LOCK_PI:
3657 return futex_lock_pi(uaddr, flags, timeout, 0);
3658 case FUTEX_UNLOCK_PI:
3659 return futex_unlock_pi(uaddr, flags);
3660 case FUTEX_TRYLOCK_PI:
3661 return futex_lock_pi(uaddr, flags, NULL, 1);
3662 case FUTEX_WAIT_REQUEUE_PI:
3663 val3 = FUTEX_BITSET_MATCH_ANY;
3664 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3665 uaddr2);
3666 case FUTEX_CMP_REQUEUE_PI:
3667 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3668 }
3669 return -ENOSYS;
3670}
3671
3672
3673SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3674 struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
3675 u32, val3)
3676{
3677 struct timespec64 ts;
3678 ktime_t t, *tp = NULL;
3679 u32 val2 = 0;
3680 int cmd = op & FUTEX_CMD_MASK;
3681
3682 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3683 cmd == FUTEX_WAIT_BITSET ||
3684 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3685 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3686 return -EFAULT;
3687 if (get_timespec64(&ts, utime))
3688 return -EFAULT;
3689 if (!timespec64_valid(&ts))
3690 return -EINVAL;
3691
3692 t = timespec64_to_ktime(ts);
3693 if (cmd == FUTEX_WAIT)
3694 t = ktime_add_safe(ktime_get(), t);
3695 tp = &t;
3696 }
3697 /*
3698 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3699 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3700 */
3701 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3702 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3703 val2 = (u32) (unsigned long) utime;
3704
3705 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3706}
3707
3708#ifdef CONFIG_COMPAT
3709/*
3710 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3711 */
3712static inline int
3713compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
3714 compat_uptr_t __user *head, unsigned int *pi)
3715{
3716 if (get_user(*uentry, head))
3717 return -EFAULT;
3718
3719 *entry = compat_ptr((*uentry) & ~1);
3720 *pi = (unsigned int)(*uentry) & 1;
3721
3722 return 0;
3723}
3724
3725static void __user *futex_uaddr(struct robust_list __user *entry,
3726 compat_long_t futex_offset)
3727{
3728 compat_uptr_t base = ptr_to_compat(entry);
3729 void __user *uaddr = compat_ptr(base + futex_offset);
3730
3731 return uaddr;
3732}
3733
3734/*
3735 * Walk curr->robust_list (very carefully, it's a userspace list!)
3736 * and mark any locks found there dead, and notify any waiters.
3737 *
3738 * We silently return on any sign of list-walking problem.
3739 */
3740void compat_exit_robust_list(struct task_struct *curr)
3741{
3742 struct compat_robust_list_head __user *head = curr->compat_robust_list;
3743 struct robust_list __user *entry, *next_entry, *pending;
3744 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3745 unsigned int uninitialized_var(next_pi);
3746 compat_uptr_t uentry, next_uentry, upending;
3747 compat_long_t futex_offset;
3748 int rc;
3749
3750 if (!futex_cmpxchg_enabled)
3751 return;
3752
3753 /*
3754 * Fetch the list head (which was registered earlier, via
3755 * sys_set_robust_list()):
3756 */
3757 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
3758 return;
3759 /*
3760 * Fetch the relative futex offset:
3761 */
3762 if (get_user(futex_offset, &head->futex_offset))
3763 return;
3764 /*
3765 * Fetch any possibly pending lock-add first, and handle it
3766 * if it exists:
3767 */
3768 if (compat_fetch_robust_entry(&upending, &pending,
3769 &head->list_op_pending, &pip))
3770 return;
3771
3772 next_entry = NULL; /* avoid warning with gcc */
3773 while (entry != (struct robust_list __user *) &head->list) {
3774 /*
3775 * Fetch the next entry in the list before calling
3776 * handle_futex_death:
3777 */
3778 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
3779 (compat_uptr_t __user *)&entry->next, &next_pi);
3780 /*
3781 * A pending lock might already be on the list, so
3782 * dont process it twice:
3783 */
3784 if (entry != pending) {
3785 void __user *uaddr = futex_uaddr(entry, futex_offset);
3786
3787 if (handle_futex_death(uaddr, curr, pi))
3788 return;
3789 }
3790 if (rc)
3791 return;
3792 uentry = next_uentry;
3793 entry = next_entry;
3794 pi = next_pi;
3795 /*
3796 * Avoid excessively long or circular lists:
3797 */
3798 if (!--limit)
3799 break;
3800
3801 cond_resched();
3802 }
3803 if (pending) {
3804 void __user *uaddr = futex_uaddr(pending, futex_offset);
3805
3806 handle_futex_death(uaddr, curr, pip);
3807 }
3808}
3809
3810COMPAT_SYSCALL_DEFINE2(set_robust_list,
3811 struct compat_robust_list_head __user *, head,
3812 compat_size_t, len)
3813{
3814 if (!futex_cmpxchg_enabled)
3815 return -ENOSYS;
3816
3817 if (unlikely(len != sizeof(*head)))
3818 return -EINVAL;
3819
3820 current->compat_robust_list = head;
3821
3822 return 0;
3823}
3824
3825COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
3826 compat_uptr_t __user *, head_ptr,
3827 compat_size_t __user *, len_ptr)
3828{
3829 struct compat_robust_list_head __user *head;
3830 unsigned long ret;
3831 struct task_struct *p;
3832
3833 if (!futex_cmpxchg_enabled)
3834 return -ENOSYS;
3835
3836 rcu_read_lock();
3837
3838 ret = -ESRCH;
3839 if (!pid)
3840 p = current;
3841 else {
3842 p = find_task_by_vpid(pid);
3843 if (!p)
3844 goto err_unlock;
3845 }
3846
3847 ret = -EPERM;
3848 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3849 goto err_unlock;
3850
3851 head = p->compat_robust_list;
3852 rcu_read_unlock();
3853
3854 if (put_user(sizeof(*head), len_ptr))
3855 return -EFAULT;
3856 return put_user(ptr_to_compat(head), head_ptr);
3857
3858err_unlock:
3859 rcu_read_unlock();
3860
3861 return ret;
3862}
3863#endif /* CONFIG_COMPAT */
3864
3865#ifdef CONFIG_COMPAT_32BIT_TIME
3866SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
3867 struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
3868 u32, val3)
3869{
3870 struct timespec64 ts;
3871 ktime_t t, *tp = NULL;
3872 int val2 = 0;
3873 int cmd = op & FUTEX_CMD_MASK;
3874
3875 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3876 cmd == FUTEX_WAIT_BITSET ||
3877 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3878 if (get_old_timespec32(&ts, utime))
3879 return -EFAULT;
3880 if (!timespec64_valid(&ts))
3881 return -EINVAL;
3882
3883 t = timespec64_to_ktime(ts);
3884 if (cmd == FUTEX_WAIT)
3885 t = ktime_add_safe(ktime_get(), t);
3886 tp = &t;
3887 }
3888 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3889 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3890 val2 = (int) (unsigned long) utime;
3891
3892 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3893}
3894#endif /* CONFIG_COMPAT_32BIT_TIME */
3895
3896static void __init futex_detect_cmpxchg(void)
3897{
3898#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3899 u32 curval;
3900
3901 /*
3902 * This will fail and we want it. Some arch implementations do
3903 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3904 * functionality. We want to know that before we call in any
3905 * of the complex code paths. Also we want to prevent
3906 * registration of robust lists in that case. NULL is
3907 * guaranteed to fault and we get -EFAULT on functional
3908 * implementation, the non-functional ones will return
3909 * -ENOSYS.
3910 */
3911 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3912 futex_cmpxchg_enabled = 1;
3913#endif
3914}
3915
3916static int __init futex_init(void)
3917{
3918 unsigned int futex_shift;
3919 unsigned long i;
3920
3921#if CONFIG_BASE_SMALL
3922 futex_hashsize = 16;
3923#else
3924 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3925#endif
3926
3927 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3928 futex_hashsize, 0,
3929 futex_hashsize < 256 ? HASH_SMALL : 0,
3930 &futex_shift, NULL,
3931 futex_hashsize, futex_hashsize);
3932 futex_hashsize = 1UL << futex_shift;
3933
3934 futex_detect_cmpxchg();
3935
3936 for (i = 0; i < futex_hashsize; i++) {
3937 atomic_set(&futex_queues[i].waiters, 0);
3938 plist_head_init(&futex_queues[i].chain);
3939 spin_lock_init(&futex_queues[i].lock);
3940 }
3941
3942 return 0;
3943}
3944core_initcall(futex_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
5 *
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 *
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
11 *
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 *
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 *
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 *
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 *
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
30 *
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
33 */
34#include <linux/compat.h>
35#include <linux/jhash.h>
36#include <linux/pagemap.h>
37#include <linux/syscalls.h>
38#include <linux/freezer.h>
39#include <linux/memblock.h>
40#include <linux/fault-inject.h>
41#include <linux/time_namespace.h>
42
43#include <asm/futex.h>
44
45#include "locking/rtmutex_common.h"
46
47/*
48 * READ this before attempting to hack on futexes!
49 *
50 * Basic futex operation and ordering guarantees
51 * =============================================
52 *
53 * The waiter reads the futex value in user space and calls
54 * futex_wait(). This function computes the hash bucket and acquires
55 * the hash bucket lock. After that it reads the futex user space value
56 * again and verifies that the data has not changed. If it has not changed
57 * it enqueues itself into the hash bucket, releases the hash bucket lock
58 * and schedules.
59 *
60 * The waker side modifies the user space value of the futex and calls
61 * futex_wake(). This function computes the hash bucket and acquires the
62 * hash bucket lock. Then it looks for waiters on that futex in the hash
63 * bucket and wakes them.
64 *
65 * In futex wake up scenarios where no tasks are blocked on a futex, taking
66 * the hb spinlock can be avoided and simply return. In order for this
67 * optimization to work, ordering guarantees must exist so that the waiter
68 * being added to the list is acknowledged when the list is concurrently being
69 * checked by the waker, avoiding scenarios like the following:
70 *
71 * CPU 0 CPU 1
72 * val = *futex;
73 * sys_futex(WAIT, futex, val);
74 * futex_wait(futex, val);
75 * uval = *futex;
76 * *futex = newval;
77 * sys_futex(WAKE, futex);
78 * futex_wake(futex);
79 * if (queue_empty())
80 * return;
81 * if (uval == val)
82 * lock(hash_bucket(futex));
83 * queue();
84 * unlock(hash_bucket(futex));
85 * schedule();
86 *
87 * This would cause the waiter on CPU 0 to wait forever because it
88 * missed the transition of the user space value from val to newval
89 * and the waker did not find the waiter in the hash bucket queue.
90 *
91 * The correct serialization ensures that a waiter either observes
92 * the changed user space value before blocking or is woken by a
93 * concurrent waker:
94 *
95 * CPU 0 CPU 1
96 * val = *futex;
97 * sys_futex(WAIT, futex, val);
98 * futex_wait(futex, val);
99 *
100 * waiters++; (a)
101 * smp_mb(); (A) <-- paired with -.
102 * |
103 * lock(hash_bucket(futex)); |
104 * |
105 * uval = *futex; |
106 * | *futex = newval;
107 * | sys_futex(WAKE, futex);
108 * | futex_wake(futex);
109 * |
110 * `--------> smp_mb(); (B)
111 * if (uval == val)
112 * queue();
113 * unlock(hash_bucket(futex));
114 * schedule(); if (waiters)
115 * lock(hash_bucket(futex));
116 * else wake_waiters(futex);
117 * waiters--; (b) unlock(hash_bucket(futex));
118 *
119 * Where (A) orders the waiters increment and the futex value read through
120 * atomic operations (see hb_waiters_inc) and where (B) orders the write
121 * to futex and the waiters read (see hb_waiters_pending()).
122 *
123 * This yields the following case (where X:=waiters, Y:=futex):
124 *
125 * X = Y = 0
126 *
127 * w[X]=1 w[Y]=1
128 * MB MB
129 * r[Y]=y r[X]=x
130 *
131 * Which guarantees that x==0 && y==0 is impossible; which translates back into
132 * the guarantee that we cannot both miss the futex variable change and the
133 * enqueue.
134 *
135 * Note that a new waiter is accounted for in (a) even when it is possible that
136 * the wait call can return error, in which case we backtrack from it in (b).
137 * Refer to the comment in queue_lock().
138 *
139 * Similarly, in order to account for waiters being requeued on another
140 * address we always increment the waiters for the destination bucket before
141 * acquiring the lock. It then decrements them again after releasing it -
142 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
143 * will do the additional required waiter count housekeeping. This is done for
144 * double_lock_hb() and double_unlock_hb(), respectively.
145 */
146
147#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
148#define futex_cmpxchg_enabled 1
149#else
150static int __read_mostly futex_cmpxchg_enabled;
151#endif
152
153/*
154 * Futex flags used to encode options to functions and preserve them across
155 * restarts.
156 */
157#ifdef CONFIG_MMU
158# define FLAGS_SHARED 0x01
159#else
160/*
161 * NOMMU does not have per process address space. Let the compiler optimize
162 * code away.
163 */
164# define FLAGS_SHARED 0x00
165#endif
166#define FLAGS_CLOCKRT 0x02
167#define FLAGS_HAS_TIMEOUT 0x04
168
169/*
170 * Priority Inheritance state:
171 */
172struct futex_pi_state {
173 /*
174 * list of 'owned' pi_state instances - these have to be
175 * cleaned up in do_exit() if the task exits prematurely:
176 */
177 struct list_head list;
178
179 /*
180 * The PI object:
181 */
182 struct rt_mutex pi_mutex;
183
184 struct task_struct *owner;
185 refcount_t refcount;
186
187 union futex_key key;
188} __randomize_layout;
189
190/**
191 * struct futex_q - The hashed futex queue entry, one per waiting task
192 * @list: priority-sorted list of tasks waiting on this futex
193 * @task: the task waiting on the futex
194 * @lock_ptr: the hash bucket lock
195 * @key: the key the futex is hashed on
196 * @pi_state: optional priority inheritance state
197 * @rt_waiter: rt_waiter storage for use with requeue_pi
198 * @requeue_pi_key: the requeue_pi target futex key
199 * @bitset: bitset for the optional bitmasked wakeup
200 *
201 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
202 * we can wake only the relevant ones (hashed queues may be shared).
203 *
204 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
205 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
206 * The order of wakeup is always to make the first condition true, then
207 * the second.
208 *
209 * PI futexes are typically woken before they are removed from the hash list via
210 * the rt_mutex code. See unqueue_me_pi().
211 */
212struct futex_q {
213 struct plist_node list;
214
215 struct task_struct *task;
216 spinlock_t *lock_ptr;
217 union futex_key key;
218 struct futex_pi_state *pi_state;
219 struct rt_mutex_waiter *rt_waiter;
220 union futex_key *requeue_pi_key;
221 u32 bitset;
222} __randomize_layout;
223
224static const struct futex_q futex_q_init = {
225 /* list gets initialized in queue_me()*/
226 .key = FUTEX_KEY_INIT,
227 .bitset = FUTEX_BITSET_MATCH_ANY
228};
229
230/*
231 * Hash buckets are shared by all the futex_keys that hash to the same
232 * location. Each key may have multiple futex_q structures, one for each task
233 * waiting on a futex.
234 */
235struct futex_hash_bucket {
236 atomic_t waiters;
237 spinlock_t lock;
238 struct plist_head chain;
239} ____cacheline_aligned_in_smp;
240
241/*
242 * The base of the bucket array and its size are always used together
243 * (after initialization only in hash_futex()), so ensure that they
244 * reside in the same cacheline.
245 */
246static struct {
247 struct futex_hash_bucket *queues;
248 unsigned long hashsize;
249} __futex_data __read_mostly __aligned(2*sizeof(long));
250#define futex_queues (__futex_data.queues)
251#define futex_hashsize (__futex_data.hashsize)
252
253
254/*
255 * Fault injections for futexes.
256 */
257#ifdef CONFIG_FAIL_FUTEX
258
259static struct {
260 struct fault_attr attr;
261
262 bool ignore_private;
263} fail_futex = {
264 .attr = FAULT_ATTR_INITIALIZER,
265 .ignore_private = false,
266};
267
268static int __init setup_fail_futex(char *str)
269{
270 return setup_fault_attr(&fail_futex.attr, str);
271}
272__setup("fail_futex=", setup_fail_futex);
273
274static bool should_fail_futex(bool fshared)
275{
276 if (fail_futex.ignore_private && !fshared)
277 return false;
278
279 return should_fail(&fail_futex.attr, 1);
280}
281
282#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
283
284static int __init fail_futex_debugfs(void)
285{
286 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
287 struct dentry *dir;
288
289 dir = fault_create_debugfs_attr("fail_futex", NULL,
290 &fail_futex.attr);
291 if (IS_ERR(dir))
292 return PTR_ERR(dir);
293
294 debugfs_create_bool("ignore-private", mode, dir,
295 &fail_futex.ignore_private);
296 return 0;
297}
298
299late_initcall(fail_futex_debugfs);
300
301#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
302
303#else
304static inline bool should_fail_futex(bool fshared)
305{
306 return false;
307}
308#endif /* CONFIG_FAIL_FUTEX */
309
310#ifdef CONFIG_COMPAT
311static void compat_exit_robust_list(struct task_struct *curr);
312#endif
313
314/*
315 * Reflects a new waiter being added to the waitqueue.
316 */
317static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
318{
319#ifdef CONFIG_SMP
320 atomic_inc(&hb->waiters);
321 /*
322 * Full barrier (A), see the ordering comment above.
323 */
324 smp_mb__after_atomic();
325#endif
326}
327
328/*
329 * Reflects a waiter being removed from the waitqueue by wakeup
330 * paths.
331 */
332static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
333{
334#ifdef CONFIG_SMP
335 atomic_dec(&hb->waiters);
336#endif
337}
338
339static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
340{
341#ifdef CONFIG_SMP
342 /*
343 * Full barrier (B), see the ordering comment above.
344 */
345 smp_mb();
346 return atomic_read(&hb->waiters);
347#else
348 return 1;
349#endif
350}
351
352/**
353 * hash_futex - Return the hash bucket in the global hash
354 * @key: Pointer to the futex key for which the hash is calculated
355 *
356 * We hash on the keys returned from get_futex_key (see below) and return the
357 * corresponding hash bucket in the global hash.
358 */
359static struct futex_hash_bucket *hash_futex(union futex_key *key)
360{
361 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
362 key->both.offset);
363
364 return &futex_queues[hash & (futex_hashsize - 1)];
365}
366
367
368/**
369 * match_futex - Check whether two futex keys are equal
370 * @key1: Pointer to key1
371 * @key2: Pointer to key2
372 *
373 * Return 1 if two futex_keys are equal, 0 otherwise.
374 */
375static inline int match_futex(union futex_key *key1, union futex_key *key2)
376{
377 return (key1 && key2
378 && key1->both.word == key2->both.word
379 && key1->both.ptr == key2->both.ptr
380 && key1->both.offset == key2->both.offset);
381}
382
383enum futex_access {
384 FUTEX_READ,
385 FUTEX_WRITE
386};
387
388/**
389 * futex_setup_timer - set up the sleeping hrtimer.
390 * @time: ptr to the given timeout value
391 * @timeout: the hrtimer_sleeper structure to be set up
392 * @flags: futex flags
393 * @range_ns: optional range in ns
394 *
395 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
396 * value given
397 */
398static inline struct hrtimer_sleeper *
399futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
400 int flags, u64 range_ns)
401{
402 if (!time)
403 return NULL;
404
405 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
406 CLOCK_REALTIME : CLOCK_MONOTONIC,
407 HRTIMER_MODE_ABS);
408 /*
409 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
410 * effectively the same as calling hrtimer_set_expires().
411 */
412 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
413
414 return timeout;
415}
416
417/*
418 * Generate a machine wide unique identifier for this inode.
419 *
420 * This relies on u64 not wrapping in the life-time of the machine; which with
421 * 1ns resolution means almost 585 years.
422 *
423 * This further relies on the fact that a well formed program will not unmap
424 * the file while it has a (shared) futex waiting on it. This mapping will have
425 * a file reference which pins the mount and inode.
426 *
427 * If for some reason an inode gets evicted and read back in again, it will get
428 * a new sequence number and will _NOT_ match, even though it is the exact same
429 * file.
430 *
431 * It is important that match_futex() will never have a false-positive, esp.
432 * for PI futexes that can mess up the state. The above argues that false-negatives
433 * are only possible for malformed programs.
434 */
435static u64 get_inode_sequence_number(struct inode *inode)
436{
437 static atomic64_t i_seq;
438 u64 old;
439
440 /* Does the inode already have a sequence number? */
441 old = atomic64_read(&inode->i_sequence);
442 if (likely(old))
443 return old;
444
445 for (;;) {
446 u64 new = atomic64_add_return(1, &i_seq);
447 if (WARN_ON_ONCE(!new))
448 continue;
449
450 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
451 if (old)
452 return old;
453 return new;
454 }
455}
456
457/**
458 * get_futex_key() - Get parameters which are the keys for a futex
459 * @uaddr: virtual address of the futex
460 * @fshared: false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED
461 * @key: address where result is stored.
462 * @rw: mapping needs to be read/write (values: FUTEX_READ,
463 * FUTEX_WRITE)
464 *
465 * Return: a negative error code or 0
466 *
467 * The key words are stored in @key on success.
468 *
469 * For shared mappings (when @fshared), the key is:
470 *
471 * ( inode->i_sequence, page->index, offset_within_page )
472 *
473 * [ also see get_inode_sequence_number() ]
474 *
475 * For private mappings (or when !@fshared), the key is:
476 *
477 * ( current->mm, address, 0 )
478 *
479 * This allows (cross process, where applicable) identification of the futex
480 * without keeping the page pinned for the duration of the FUTEX_WAIT.
481 *
482 * lock_page() might sleep, the caller should not hold a spinlock.
483 */
484static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
485 enum futex_access rw)
486{
487 unsigned long address = (unsigned long)uaddr;
488 struct mm_struct *mm = current->mm;
489 struct page *page, *tail;
490 struct address_space *mapping;
491 int err, ro = 0;
492
493 /*
494 * The futex address must be "naturally" aligned.
495 */
496 key->both.offset = address % PAGE_SIZE;
497 if (unlikely((address % sizeof(u32)) != 0))
498 return -EINVAL;
499 address -= key->both.offset;
500
501 if (unlikely(!access_ok(uaddr, sizeof(u32))))
502 return -EFAULT;
503
504 if (unlikely(should_fail_futex(fshared)))
505 return -EFAULT;
506
507 /*
508 * PROCESS_PRIVATE futexes are fast.
509 * As the mm cannot disappear under us and the 'key' only needs
510 * virtual address, we dont even have to find the underlying vma.
511 * Note : We do have to check 'uaddr' is a valid user address,
512 * but access_ok() should be faster than find_vma()
513 */
514 if (!fshared) {
515 key->private.mm = mm;
516 key->private.address = address;
517 return 0;
518 }
519
520again:
521 /* Ignore any VERIFY_READ mapping (futex common case) */
522 if (unlikely(should_fail_futex(true)))
523 return -EFAULT;
524
525 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
526 /*
527 * If write access is not required (eg. FUTEX_WAIT), try
528 * and get read-only access.
529 */
530 if (err == -EFAULT && rw == FUTEX_READ) {
531 err = get_user_pages_fast(address, 1, 0, &page);
532 ro = 1;
533 }
534 if (err < 0)
535 return err;
536 else
537 err = 0;
538
539 /*
540 * The treatment of mapping from this point on is critical. The page
541 * lock protects many things but in this context the page lock
542 * stabilizes mapping, prevents inode freeing in the shared
543 * file-backed region case and guards against movement to swap cache.
544 *
545 * Strictly speaking the page lock is not needed in all cases being
546 * considered here and page lock forces unnecessarily serialization
547 * From this point on, mapping will be re-verified if necessary and
548 * page lock will be acquired only if it is unavoidable
549 *
550 * Mapping checks require the head page for any compound page so the
551 * head page and mapping is looked up now. For anonymous pages, it
552 * does not matter if the page splits in the future as the key is
553 * based on the address. For filesystem-backed pages, the tail is
554 * required as the index of the page determines the key. For
555 * base pages, there is no tail page and tail == page.
556 */
557 tail = page;
558 page = compound_head(page);
559 mapping = READ_ONCE(page->mapping);
560
561 /*
562 * If page->mapping is NULL, then it cannot be a PageAnon
563 * page; but it might be the ZERO_PAGE or in the gate area or
564 * in a special mapping (all cases which we are happy to fail);
565 * or it may have been a good file page when get_user_pages_fast
566 * found it, but truncated or holepunched or subjected to
567 * invalidate_complete_page2 before we got the page lock (also
568 * cases which we are happy to fail). And we hold a reference,
569 * so refcount care in invalidate_complete_page's remove_mapping
570 * prevents drop_caches from setting mapping to NULL beneath us.
571 *
572 * The case we do have to guard against is when memory pressure made
573 * shmem_writepage move it from filecache to swapcache beneath us:
574 * an unlikely race, but we do need to retry for page->mapping.
575 */
576 if (unlikely(!mapping)) {
577 int shmem_swizzled;
578
579 /*
580 * Page lock is required to identify which special case above
581 * applies. If this is really a shmem page then the page lock
582 * will prevent unexpected transitions.
583 */
584 lock_page(page);
585 shmem_swizzled = PageSwapCache(page) || page->mapping;
586 unlock_page(page);
587 put_page(page);
588
589 if (shmem_swizzled)
590 goto again;
591
592 return -EFAULT;
593 }
594
595 /*
596 * Private mappings are handled in a simple way.
597 *
598 * If the futex key is stored on an anonymous page, then the associated
599 * object is the mm which is implicitly pinned by the calling process.
600 *
601 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
602 * it's a read-only handle, it's expected that futexes attach to
603 * the object not the particular process.
604 */
605 if (PageAnon(page)) {
606 /*
607 * A RO anonymous page will never change and thus doesn't make
608 * sense for futex operations.
609 */
610 if (unlikely(should_fail_futex(true)) || ro) {
611 err = -EFAULT;
612 goto out;
613 }
614
615 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
616 key->private.mm = mm;
617 key->private.address = address;
618
619 } else {
620 struct inode *inode;
621
622 /*
623 * The associated futex object in this case is the inode and
624 * the page->mapping must be traversed. Ordinarily this should
625 * be stabilised under page lock but it's not strictly
626 * necessary in this case as we just want to pin the inode, not
627 * update the radix tree or anything like that.
628 *
629 * The RCU read lock is taken as the inode is finally freed
630 * under RCU. If the mapping still matches expectations then the
631 * mapping->host can be safely accessed as being a valid inode.
632 */
633 rcu_read_lock();
634
635 if (READ_ONCE(page->mapping) != mapping) {
636 rcu_read_unlock();
637 put_page(page);
638
639 goto again;
640 }
641
642 inode = READ_ONCE(mapping->host);
643 if (!inode) {
644 rcu_read_unlock();
645 put_page(page);
646
647 goto again;
648 }
649
650 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
651 key->shared.i_seq = get_inode_sequence_number(inode);
652 key->shared.pgoff = page_to_pgoff(tail);
653 rcu_read_unlock();
654 }
655
656out:
657 put_page(page);
658 return err;
659}
660
661/**
662 * fault_in_user_writeable() - Fault in user address and verify RW access
663 * @uaddr: pointer to faulting user space address
664 *
665 * Slow path to fixup the fault we just took in the atomic write
666 * access to @uaddr.
667 *
668 * We have no generic implementation of a non-destructive write to the
669 * user address. We know that we faulted in the atomic pagefault
670 * disabled section so we can as well avoid the #PF overhead by
671 * calling get_user_pages() right away.
672 */
673static int fault_in_user_writeable(u32 __user *uaddr)
674{
675 struct mm_struct *mm = current->mm;
676 int ret;
677
678 mmap_read_lock(mm);
679 ret = fixup_user_fault(mm, (unsigned long)uaddr,
680 FAULT_FLAG_WRITE, NULL);
681 mmap_read_unlock(mm);
682
683 return ret < 0 ? ret : 0;
684}
685
686/**
687 * futex_top_waiter() - Return the highest priority waiter on a futex
688 * @hb: the hash bucket the futex_q's reside in
689 * @key: the futex key (to distinguish it from other futex futex_q's)
690 *
691 * Must be called with the hb lock held.
692 */
693static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
694 union futex_key *key)
695{
696 struct futex_q *this;
697
698 plist_for_each_entry(this, &hb->chain, list) {
699 if (match_futex(&this->key, key))
700 return this;
701 }
702 return NULL;
703}
704
705static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
706 u32 uval, u32 newval)
707{
708 int ret;
709
710 pagefault_disable();
711 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
712 pagefault_enable();
713
714 return ret;
715}
716
717static int get_futex_value_locked(u32 *dest, u32 __user *from)
718{
719 int ret;
720
721 pagefault_disable();
722 ret = __get_user(*dest, from);
723 pagefault_enable();
724
725 return ret ? -EFAULT : 0;
726}
727
728
729/*
730 * PI code:
731 */
732static int refill_pi_state_cache(void)
733{
734 struct futex_pi_state *pi_state;
735
736 if (likely(current->pi_state_cache))
737 return 0;
738
739 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
740
741 if (!pi_state)
742 return -ENOMEM;
743
744 INIT_LIST_HEAD(&pi_state->list);
745 /* pi_mutex gets initialized later */
746 pi_state->owner = NULL;
747 refcount_set(&pi_state->refcount, 1);
748 pi_state->key = FUTEX_KEY_INIT;
749
750 current->pi_state_cache = pi_state;
751
752 return 0;
753}
754
755static struct futex_pi_state *alloc_pi_state(void)
756{
757 struct futex_pi_state *pi_state = current->pi_state_cache;
758
759 WARN_ON(!pi_state);
760 current->pi_state_cache = NULL;
761
762 return pi_state;
763}
764
765static void pi_state_update_owner(struct futex_pi_state *pi_state,
766 struct task_struct *new_owner)
767{
768 struct task_struct *old_owner = pi_state->owner;
769
770 lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
771
772 if (old_owner) {
773 raw_spin_lock(&old_owner->pi_lock);
774 WARN_ON(list_empty(&pi_state->list));
775 list_del_init(&pi_state->list);
776 raw_spin_unlock(&old_owner->pi_lock);
777 }
778
779 if (new_owner) {
780 raw_spin_lock(&new_owner->pi_lock);
781 WARN_ON(!list_empty(&pi_state->list));
782 list_add(&pi_state->list, &new_owner->pi_state_list);
783 pi_state->owner = new_owner;
784 raw_spin_unlock(&new_owner->pi_lock);
785 }
786}
787
788static void get_pi_state(struct futex_pi_state *pi_state)
789{
790 WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
791}
792
793/*
794 * Drops a reference to the pi_state object and frees or caches it
795 * when the last reference is gone.
796 */
797static void put_pi_state(struct futex_pi_state *pi_state)
798{
799 if (!pi_state)
800 return;
801
802 if (!refcount_dec_and_test(&pi_state->refcount))
803 return;
804
805 /*
806 * If pi_state->owner is NULL, the owner is most probably dying
807 * and has cleaned up the pi_state already
808 */
809 if (pi_state->owner) {
810 unsigned long flags;
811
812 raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
813 pi_state_update_owner(pi_state, NULL);
814 rt_mutex_proxy_unlock(&pi_state->pi_mutex);
815 raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
816 }
817
818 if (current->pi_state_cache) {
819 kfree(pi_state);
820 } else {
821 /*
822 * pi_state->list is already empty.
823 * clear pi_state->owner.
824 * refcount is at 0 - put it back to 1.
825 */
826 pi_state->owner = NULL;
827 refcount_set(&pi_state->refcount, 1);
828 current->pi_state_cache = pi_state;
829 }
830}
831
832#ifdef CONFIG_FUTEX_PI
833
834/*
835 * This task is holding PI mutexes at exit time => bad.
836 * Kernel cleans up PI-state, but userspace is likely hosed.
837 * (Robust-futex cleanup is separate and might save the day for userspace.)
838 */
839static void exit_pi_state_list(struct task_struct *curr)
840{
841 struct list_head *next, *head = &curr->pi_state_list;
842 struct futex_pi_state *pi_state;
843 struct futex_hash_bucket *hb;
844 union futex_key key = FUTEX_KEY_INIT;
845
846 if (!futex_cmpxchg_enabled)
847 return;
848 /*
849 * We are a ZOMBIE and nobody can enqueue itself on
850 * pi_state_list anymore, but we have to be careful
851 * versus waiters unqueueing themselves:
852 */
853 raw_spin_lock_irq(&curr->pi_lock);
854 while (!list_empty(head)) {
855 next = head->next;
856 pi_state = list_entry(next, struct futex_pi_state, list);
857 key = pi_state->key;
858 hb = hash_futex(&key);
859
860 /*
861 * We can race against put_pi_state() removing itself from the
862 * list (a waiter going away). put_pi_state() will first
863 * decrement the reference count and then modify the list, so
864 * its possible to see the list entry but fail this reference
865 * acquire.
866 *
867 * In that case; drop the locks to let put_pi_state() make
868 * progress and retry the loop.
869 */
870 if (!refcount_inc_not_zero(&pi_state->refcount)) {
871 raw_spin_unlock_irq(&curr->pi_lock);
872 cpu_relax();
873 raw_spin_lock_irq(&curr->pi_lock);
874 continue;
875 }
876 raw_spin_unlock_irq(&curr->pi_lock);
877
878 spin_lock(&hb->lock);
879 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
880 raw_spin_lock(&curr->pi_lock);
881 /*
882 * We dropped the pi-lock, so re-check whether this
883 * task still owns the PI-state:
884 */
885 if (head->next != next) {
886 /* retain curr->pi_lock for the loop invariant */
887 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
888 spin_unlock(&hb->lock);
889 put_pi_state(pi_state);
890 continue;
891 }
892
893 WARN_ON(pi_state->owner != curr);
894 WARN_ON(list_empty(&pi_state->list));
895 list_del_init(&pi_state->list);
896 pi_state->owner = NULL;
897
898 raw_spin_unlock(&curr->pi_lock);
899 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
900 spin_unlock(&hb->lock);
901
902 rt_mutex_futex_unlock(&pi_state->pi_mutex);
903 put_pi_state(pi_state);
904
905 raw_spin_lock_irq(&curr->pi_lock);
906 }
907 raw_spin_unlock_irq(&curr->pi_lock);
908}
909#else
910static inline void exit_pi_state_list(struct task_struct *curr) { }
911#endif
912
913/*
914 * We need to check the following states:
915 *
916 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
917 *
918 * [1] NULL | --- | --- | 0 | 0/1 | Valid
919 * [2] NULL | --- | --- | >0 | 0/1 | Valid
920 *
921 * [3] Found | NULL | -- | Any | 0/1 | Invalid
922 *
923 * [4] Found | Found | NULL | 0 | 1 | Valid
924 * [5] Found | Found | NULL | >0 | 1 | Invalid
925 *
926 * [6] Found | Found | task | 0 | 1 | Valid
927 *
928 * [7] Found | Found | NULL | Any | 0 | Invalid
929 *
930 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
931 * [9] Found | Found | task | 0 | 0 | Invalid
932 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
933 *
934 * [1] Indicates that the kernel can acquire the futex atomically. We
935 * came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
936 *
937 * [2] Valid, if TID does not belong to a kernel thread. If no matching
938 * thread is found then it indicates that the owner TID has died.
939 *
940 * [3] Invalid. The waiter is queued on a non PI futex
941 *
942 * [4] Valid state after exit_robust_list(), which sets the user space
943 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
944 *
945 * [5] The user space value got manipulated between exit_robust_list()
946 * and exit_pi_state_list()
947 *
948 * [6] Valid state after exit_pi_state_list() which sets the new owner in
949 * the pi_state but cannot access the user space value.
950 *
951 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
952 *
953 * [8] Owner and user space value match
954 *
955 * [9] There is no transient state which sets the user space TID to 0
956 * except exit_robust_list(), but this is indicated by the
957 * FUTEX_OWNER_DIED bit. See [4]
958 *
959 * [10] There is no transient state which leaves owner and user space
960 * TID out of sync. Except one error case where the kernel is denied
961 * write access to the user address, see fixup_pi_state_owner().
962 *
963 *
964 * Serialization and lifetime rules:
965 *
966 * hb->lock:
967 *
968 * hb -> futex_q, relation
969 * futex_q -> pi_state, relation
970 *
971 * (cannot be raw because hb can contain arbitrary amount
972 * of futex_q's)
973 *
974 * pi_mutex->wait_lock:
975 *
976 * {uval, pi_state}
977 *
978 * (and pi_mutex 'obviously')
979 *
980 * p->pi_lock:
981 *
982 * p->pi_state_list -> pi_state->list, relation
983 * pi_mutex->owner -> pi_state->owner, relation
984 *
985 * pi_state->refcount:
986 *
987 * pi_state lifetime
988 *
989 *
990 * Lock order:
991 *
992 * hb->lock
993 * pi_mutex->wait_lock
994 * p->pi_lock
995 *
996 */
997
998/*
999 * Validate that the existing waiter has a pi_state and sanity check
1000 * the pi_state against the user space value. If correct, attach to
1001 * it.
1002 */
1003static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1004 struct futex_pi_state *pi_state,
1005 struct futex_pi_state **ps)
1006{
1007 pid_t pid = uval & FUTEX_TID_MASK;
1008 u32 uval2;
1009 int ret;
1010
1011 /*
1012 * Userspace might have messed up non-PI and PI futexes [3]
1013 */
1014 if (unlikely(!pi_state))
1015 return -EINVAL;
1016
1017 /*
1018 * We get here with hb->lock held, and having found a
1019 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
1020 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1021 * which in turn means that futex_lock_pi() still has a reference on
1022 * our pi_state.
1023 *
1024 * The waiter holding a reference on @pi_state also protects against
1025 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1026 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1027 * free pi_state before we can take a reference ourselves.
1028 */
1029 WARN_ON(!refcount_read(&pi_state->refcount));
1030
1031 /*
1032 * Now that we have a pi_state, we can acquire wait_lock
1033 * and do the state validation.
1034 */
1035 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1036
1037 /*
1038 * Since {uval, pi_state} is serialized by wait_lock, and our current
1039 * uval was read without holding it, it can have changed. Verify it
1040 * still is what we expect it to be, otherwise retry the entire
1041 * operation.
1042 */
1043 if (get_futex_value_locked(&uval2, uaddr))
1044 goto out_efault;
1045
1046 if (uval != uval2)
1047 goto out_eagain;
1048
1049 /*
1050 * Handle the owner died case:
1051 */
1052 if (uval & FUTEX_OWNER_DIED) {
1053 /*
1054 * exit_pi_state_list sets owner to NULL and wakes the
1055 * topmost waiter. The task which acquires the
1056 * pi_state->rt_mutex will fixup owner.
1057 */
1058 if (!pi_state->owner) {
1059 /*
1060 * No pi state owner, but the user space TID
1061 * is not 0. Inconsistent state. [5]
1062 */
1063 if (pid)
1064 goto out_einval;
1065 /*
1066 * Take a ref on the state and return success. [4]
1067 */
1068 goto out_attach;
1069 }
1070
1071 /*
1072 * If TID is 0, then either the dying owner has not
1073 * yet executed exit_pi_state_list() or some waiter
1074 * acquired the rtmutex in the pi state, but did not
1075 * yet fixup the TID in user space.
1076 *
1077 * Take a ref on the state and return success. [6]
1078 */
1079 if (!pid)
1080 goto out_attach;
1081 } else {
1082 /*
1083 * If the owner died bit is not set, then the pi_state
1084 * must have an owner. [7]
1085 */
1086 if (!pi_state->owner)
1087 goto out_einval;
1088 }
1089
1090 /*
1091 * Bail out if user space manipulated the futex value. If pi
1092 * state exists then the owner TID must be the same as the
1093 * user space TID. [9/10]
1094 */
1095 if (pid != task_pid_vnr(pi_state->owner))
1096 goto out_einval;
1097
1098out_attach:
1099 get_pi_state(pi_state);
1100 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1101 *ps = pi_state;
1102 return 0;
1103
1104out_einval:
1105 ret = -EINVAL;
1106 goto out_error;
1107
1108out_eagain:
1109 ret = -EAGAIN;
1110 goto out_error;
1111
1112out_efault:
1113 ret = -EFAULT;
1114 goto out_error;
1115
1116out_error:
1117 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1118 return ret;
1119}
1120
1121/**
1122 * wait_for_owner_exiting - Block until the owner has exited
1123 * @ret: owner's current futex lock status
1124 * @exiting: Pointer to the exiting task
1125 *
1126 * Caller must hold a refcount on @exiting.
1127 */
1128static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
1129{
1130 if (ret != -EBUSY) {
1131 WARN_ON_ONCE(exiting);
1132 return;
1133 }
1134
1135 if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
1136 return;
1137
1138 mutex_lock(&exiting->futex_exit_mutex);
1139 /*
1140 * No point in doing state checking here. If the waiter got here
1141 * while the task was in exec()->exec_futex_release() then it can
1142 * have any FUTEX_STATE_* value when the waiter has acquired the
1143 * mutex. OK, if running, EXITING or DEAD if it reached exit()
1144 * already. Highly unlikely and not a problem. Just one more round
1145 * through the futex maze.
1146 */
1147 mutex_unlock(&exiting->futex_exit_mutex);
1148
1149 put_task_struct(exiting);
1150}
1151
1152static int handle_exit_race(u32 __user *uaddr, u32 uval,
1153 struct task_struct *tsk)
1154{
1155 u32 uval2;
1156
1157 /*
1158 * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
1159 * caller that the alleged owner is busy.
1160 */
1161 if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
1162 return -EBUSY;
1163
1164 /*
1165 * Reread the user space value to handle the following situation:
1166 *
1167 * CPU0 CPU1
1168 *
1169 * sys_exit() sys_futex()
1170 * do_exit() futex_lock_pi()
1171 * futex_lock_pi_atomic()
1172 * exit_signals(tsk) No waiters:
1173 * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
1174 * mm_release(tsk) Set waiter bit
1175 * exit_robust_list(tsk) { *uaddr = 0x80000PID;
1176 * Set owner died attach_to_pi_owner() {
1177 * *uaddr = 0xC0000000; tsk = get_task(PID);
1178 * } if (!tsk->flags & PF_EXITING) {
1179 * ... attach();
1180 * tsk->futex_state = } else {
1181 * FUTEX_STATE_DEAD; if (tsk->futex_state !=
1182 * FUTEX_STATE_DEAD)
1183 * return -EAGAIN;
1184 * return -ESRCH; <--- FAIL
1185 * }
1186 *
1187 * Returning ESRCH unconditionally is wrong here because the
1188 * user space value has been changed by the exiting task.
1189 *
1190 * The same logic applies to the case where the exiting task is
1191 * already gone.
1192 */
1193 if (get_futex_value_locked(&uval2, uaddr))
1194 return -EFAULT;
1195
1196 /* If the user space value has changed, try again. */
1197 if (uval2 != uval)
1198 return -EAGAIN;
1199
1200 /*
1201 * The exiting task did not have a robust list, the robust list was
1202 * corrupted or the user space value in *uaddr is simply bogus.
1203 * Give up and tell user space.
1204 */
1205 return -ESRCH;
1206}
1207
1208/*
1209 * Lookup the task for the TID provided from user space and attach to
1210 * it after doing proper sanity checks.
1211 */
1212static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1213 struct futex_pi_state **ps,
1214 struct task_struct **exiting)
1215{
1216 pid_t pid = uval & FUTEX_TID_MASK;
1217 struct futex_pi_state *pi_state;
1218 struct task_struct *p;
1219
1220 /*
1221 * We are the first waiter - try to look up the real owner and attach
1222 * the new pi_state to it, but bail out when TID = 0 [1]
1223 *
1224 * The !pid check is paranoid. None of the call sites should end up
1225 * with pid == 0, but better safe than sorry. Let the caller retry
1226 */
1227 if (!pid)
1228 return -EAGAIN;
1229 p = find_get_task_by_vpid(pid);
1230 if (!p)
1231 return handle_exit_race(uaddr, uval, NULL);
1232
1233 if (unlikely(p->flags & PF_KTHREAD)) {
1234 put_task_struct(p);
1235 return -EPERM;
1236 }
1237
1238 /*
1239 * We need to look at the task state to figure out, whether the
1240 * task is exiting. To protect against the change of the task state
1241 * in futex_exit_release(), we do this protected by p->pi_lock:
1242 */
1243 raw_spin_lock_irq(&p->pi_lock);
1244 if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
1245 /*
1246 * The task is on the way out. When the futex state is
1247 * FUTEX_STATE_DEAD, we know that the task has finished
1248 * the cleanup:
1249 */
1250 int ret = handle_exit_race(uaddr, uval, p);
1251
1252 raw_spin_unlock_irq(&p->pi_lock);
1253 /*
1254 * If the owner task is between FUTEX_STATE_EXITING and
1255 * FUTEX_STATE_DEAD then store the task pointer and keep
1256 * the reference on the task struct. The calling code will
1257 * drop all locks, wait for the task to reach
1258 * FUTEX_STATE_DEAD and then drop the refcount. This is
1259 * required to prevent a live lock when the current task
1260 * preempted the exiting task between the two states.
1261 */
1262 if (ret == -EBUSY)
1263 *exiting = p;
1264 else
1265 put_task_struct(p);
1266 return ret;
1267 }
1268
1269 /*
1270 * No existing pi state. First waiter. [2]
1271 *
1272 * This creates pi_state, we have hb->lock held, this means nothing can
1273 * observe this state, wait_lock is irrelevant.
1274 */
1275 pi_state = alloc_pi_state();
1276
1277 /*
1278 * Initialize the pi_mutex in locked state and make @p
1279 * the owner of it:
1280 */
1281 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1282
1283 /* Store the key for possible exit cleanups: */
1284 pi_state->key = *key;
1285
1286 WARN_ON(!list_empty(&pi_state->list));
1287 list_add(&pi_state->list, &p->pi_state_list);
1288 /*
1289 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1290 * because there is no concurrency as the object is not published yet.
1291 */
1292 pi_state->owner = p;
1293 raw_spin_unlock_irq(&p->pi_lock);
1294
1295 put_task_struct(p);
1296
1297 *ps = pi_state;
1298
1299 return 0;
1300}
1301
1302static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1303 struct futex_hash_bucket *hb,
1304 union futex_key *key, struct futex_pi_state **ps,
1305 struct task_struct **exiting)
1306{
1307 struct futex_q *top_waiter = futex_top_waiter(hb, key);
1308
1309 /*
1310 * If there is a waiter on that futex, validate it and
1311 * attach to the pi_state when the validation succeeds.
1312 */
1313 if (top_waiter)
1314 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1315
1316 /*
1317 * We are the first waiter - try to look up the owner based on
1318 * @uval and attach to it.
1319 */
1320 return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
1321}
1322
1323static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1324{
1325 int err;
1326 u32 curval;
1327
1328 if (unlikely(should_fail_futex(true)))
1329 return -EFAULT;
1330
1331 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1332 if (unlikely(err))
1333 return err;
1334
1335 /* If user space value changed, let the caller retry */
1336 return curval != uval ? -EAGAIN : 0;
1337}
1338
1339/**
1340 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1341 * @uaddr: the pi futex user address
1342 * @hb: the pi futex hash bucket
1343 * @key: the futex key associated with uaddr and hb
1344 * @ps: the pi_state pointer where we store the result of the
1345 * lookup
1346 * @task: the task to perform the atomic lock work for. This will
1347 * be "current" except in the case of requeue pi.
1348 * @exiting: Pointer to store the task pointer of the owner task
1349 * which is in the middle of exiting
1350 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1351 *
1352 * Return:
1353 * - 0 - ready to wait;
1354 * - 1 - acquired the lock;
1355 * - <0 - error
1356 *
1357 * The hb->lock and futex_key refs shall be held by the caller.
1358 *
1359 * @exiting is only set when the return value is -EBUSY. If so, this holds
1360 * a refcount on the exiting task on return and the caller needs to drop it
1361 * after waiting for the exit to complete.
1362 */
1363static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1364 union futex_key *key,
1365 struct futex_pi_state **ps,
1366 struct task_struct *task,
1367 struct task_struct **exiting,
1368 int set_waiters)
1369{
1370 u32 uval, newval, vpid = task_pid_vnr(task);
1371 struct futex_q *top_waiter;
1372 int ret;
1373
1374 /*
1375 * Read the user space value first so we can validate a few
1376 * things before proceeding further.
1377 */
1378 if (get_futex_value_locked(&uval, uaddr))
1379 return -EFAULT;
1380
1381 if (unlikely(should_fail_futex(true)))
1382 return -EFAULT;
1383
1384 /*
1385 * Detect deadlocks.
1386 */
1387 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1388 return -EDEADLK;
1389
1390 if ((unlikely(should_fail_futex(true))))
1391 return -EDEADLK;
1392
1393 /*
1394 * Lookup existing state first. If it exists, try to attach to
1395 * its pi_state.
1396 */
1397 top_waiter = futex_top_waiter(hb, key);
1398 if (top_waiter)
1399 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1400
1401 /*
1402 * No waiter and user TID is 0. We are here because the
1403 * waiters or the owner died bit is set or called from
1404 * requeue_cmp_pi or for whatever reason something took the
1405 * syscall.
1406 */
1407 if (!(uval & FUTEX_TID_MASK)) {
1408 /*
1409 * We take over the futex. No other waiters and the user space
1410 * TID is 0. We preserve the owner died bit.
1411 */
1412 newval = uval & FUTEX_OWNER_DIED;
1413 newval |= vpid;
1414
1415 /* The futex requeue_pi code can enforce the waiters bit */
1416 if (set_waiters)
1417 newval |= FUTEX_WAITERS;
1418
1419 ret = lock_pi_update_atomic(uaddr, uval, newval);
1420 /* If the take over worked, return 1 */
1421 return ret < 0 ? ret : 1;
1422 }
1423
1424 /*
1425 * First waiter. Set the waiters bit before attaching ourself to
1426 * the owner. If owner tries to unlock, it will be forced into
1427 * the kernel and blocked on hb->lock.
1428 */
1429 newval = uval | FUTEX_WAITERS;
1430 ret = lock_pi_update_atomic(uaddr, uval, newval);
1431 if (ret)
1432 return ret;
1433 /*
1434 * If the update of the user space value succeeded, we try to
1435 * attach to the owner. If that fails, no harm done, we only
1436 * set the FUTEX_WAITERS bit in the user space variable.
1437 */
1438 return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
1439}
1440
1441/**
1442 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1443 * @q: The futex_q to unqueue
1444 *
1445 * The q->lock_ptr must not be NULL and must be held by the caller.
1446 */
1447static void __unqueue_futex(struct futex_q *q)
1448{
1449 struct futex_hash_bucket *hb;
1450
1451 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1452 return;
1453 lockdep_assert_held(q->lock_ptr);
1454
1455 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1456 plist_del(&q->list, &hb->chain);
1457 hb_waiters_dec(hb);
1458}
1459
1460/*
1461 * The hash bucket lock must be held when this is called.
1462 * Afterwards, the futex_q must not be accessed. Callers
1463 * must ensure to later call wake_up_q() for the actual
1464 * wakeups to occur.
1465 */
1466static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1467{
1468 struct task_struct *p = q->task;
1469
1470 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1471 return;
1472
1473 get_task_struct(p);
1474 __unqueue_futex(q);
1475 /*
1476 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
1477 * is written, without taking any locks. This is possible in the event
1478 * of a spurious wakeup, for example. A memory barrier is required here
1479 * to prevent the following store to lock_ptr from getting ahead of the
1480 * plist_del in __unqueue_futex().
1481 */
1482 smp_store_release(&q->lock_ptr, NULL);
1483
1484 /*
1485 * Queue the task for later wakeup for after we've released
1486 * the hb->lock.
1487 */
1488 wake_q_add_safe(wake_q, p);
1489}
1490
1491/*
1492 * Caller must hold a reference on @pi_state.
1493 */
1494static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1495{
1496 u32 curval, newval;
1497 struct rt_mutex_waiter *top_waiter;
1498 struct task_struct *new_owner;
1499 bool postunlock = false;
1500 DEFINE_WAKE_Q(wake_q);
1501 int ret = 0;
1502
1503 top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
1504 if (WARN_ON_ONCE(!top_waiter)) {
1505 /*
1506 * As per the comment in futex_unlock_pi() this should not happen.
1507 *
1508 * When this happens, give up our locks and try again, giving
1509 * the futex_lock_pi() instance time to complete, either by
1510 * waiting on the rtmutex or removing itself from the futex
1511 * queue.
1512 */
1513 ret = -EAGAIN;
1514 goto out_unlock;
1515 }
1516
1517 new_owner = top_waiter->task;
1518
1519 /*
1520 * We pass it to the next owner. The WAITERS bit is always kept
1521 * enabled while there is PI state around. We cleanup the owner
1522 * died bit, because we are the owner.
1523 */
1524 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1525
1526 if (unlikely(should_fail_futex(true))) {
1527 ret = -EFAULT;
1528 goto out_unlock;
1529 }
1530
1531 ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1532 if (!ret && (curval != uval)) {
1533 /*
1534 * If a unconditional UNLOCK_PI operation (user space did not
1535 * try the TID->0 transition) raced with a waiter setting the
1536 * FUTEX_WAITERS flag between get_user() and locking the hash
1537 * bucket lock, retry the operation.
1538 */
1539 if ((FUTEX_TID_MASK & curval) == uval)
1540 ret = -EAGAIN;
1541 else
1542 ret = -EINVAL;
1543 }
1544
1545 if (!ret) {
1546 /*
1547 * This is a point of no return; once we modified the uval
1548 * there is no going back and subsequent operations must
1549 * not fail.
1550 */
1551 pi_state_update_owner(pi_state, new_owner);
1552 postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1553 }
1554
1555out_unlock:
1556 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1557
1558 if (postunlock)
1559 rt_mutex_postunlock(&wake_q);
1560
1561 return ret;
1562}
1563
1564/*
1565 * Express the locking dependencies for lockdep:
1566 */
1567static inline void
1568double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1569{
1570 if (hb1 <= hb2) {
1571 spin_lock(&hb1->lock);
1572 if (hb1 < hb2)
1573 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1574 } else { /* hb1 > hb2 */
1575 spin_lock(&hb2->lock);
1576 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1577 }
1578}
1579
1580static inline void
1581double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1582{
1583 spin_unlock(&hb1->lock);
1584 if (hb1 != hb2)
1585 spin_unlock(&hb2->lock);
1586}
1587
1588/*
1589 * Wake up waiters matching bitset queued on this futex (uaddr).
1590 */
1591static int
1592futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1593{
1594 struct futex_hash_bucket *hb;
1595 struct futex_q *this, *next;
1596 union futex_key key = FUTEX_KEY_INIT;
1597 int ret;
1598 DEFINE_WAKE_Q(wake_q);
1599
1600 if (!bitset)
1601 return -EINVAL;
1602
1603 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
1604 if (unlikely(ret != 0))
1605 return ret;
1606
1607 hb = hash_futex(&key);
1608
1609 /* Make sure we really have tasks to wakeup */
1610 if (!hb_waiters_pending(hb))
1611 return ret;
1612
1613 spin_lock(&hb->lock);
1614
1615 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1616 if (match_futex (&this->key, &key)) {
1617 if (this->pi_state || this->rt_waiter) {
1618 ret = -EINVAL;
1619 break;
1620 }
1621
1622 /* Check if one of the bits is set in both bitsets */
1623 if (!(this->bitset & bitset))
1624 continue;
1625
1626 mark_wake_futex(&wake_q, this);
1627 if (++ret >= nr_wake)
1628 break;
1629 }
1630 }
1631
1632 spin_unlock(&hb->lock);
1633 wake_up_q(&wake_q);
1634 return ret;
1635}
1636
1637static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1638{
1639 unsigned int op = (encoded_op & 0x70000000) >> 28;
1640 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
1641 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1642 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1643 int oldval, ret;
1644
1645 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1646 if (oparg < 0 || oparg > 31) {
1647 char comm[sizeof(current->comm)];
1648 /*
1649 * kill this print and return -EINVAL when userspace
1650 * is sane again
1651 */
1652 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1653 get_task_comm(comm, current), oparg);
1654 oparg &= 31;
1655 }
1656 oparg = 1 << oparg;
1657 }
1658
1659 pagefault_disable();
1660 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1661 pagefault_enable();
1662 if (ret)
1663 return ret;
1664
1665 switch (cmp) {
1666 case FUTEX_OP_CMP_EQ:
1667 return oldval == cmparg;
1668 case FUTEX_OP_CMP_NE:
1669 return oldval != cmparg;
1670 case FUTEX_OP_CMP_LT:
1671 return oldval < cmparg;
1672 case FUTEX_OP_CMP_GE:
1673 return oldval >= cmparg;
1674 case FUTEX_OP_CMP_LE:
1675 return oldval <= cmparg;
1676 case FUTEX_OP_CMP_GT:
1677 return oldval > cmparg;
1678 default:
1679 return -ENOSYS;
1680 }
1681}
1682
1683/*
1684 * Wake up all waiters hashed on the physical page that is mapped
1685 * to this virtual address:
1686 */
1687static int
1688futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1689 int nr_wake, int nr_wake2, int op)
1690{
1691 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1692 struct futex_hash_bucket *hb1, *hb2;
1693 struct futex_q *this, *next;
1694 int ret, op_ret;
1695 DEFINE_WAKE_Q(wake_q);
1696
1697retry:
1698 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1699 if (unlikely(ret != 0))
1700 return ret;
1701 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1702 if (unlikely(ret != 0))
1703 return ret;
1704
1705 hb1 = hash_futex(&key1);
1706 hb2 = hash_futex(&key2);
1707
1708retry_private:
1709 double_lock_hb(hb1, hb2);
1710 op_ret = futex_atomic_op_inuser(op, uaddr2);
1711 if (unlikely(op_ret < 0)) {
1712 double_unlock_hb(hb1, hb2);
1713
1714 if (!IS_ENABLED(CONFIG_MMU) ||
1715 unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
1716 /*
1717 * we don't get EFAULT from MMU faults if we don't have
1718 * an MMU, but we might get them from range checking
1719 */
1720 ret = op_ret;
1721 return ret;
1722 }
1723
1724 if (op_ret == -EFAULT) {
1725 ret = fault_in_user_writeable(uaddr2);
1726 if (ret)
1727 return ret;
1728 }
1729
1730 cond_resched();
1731 if (!(flags & FLAGS_SHARED))
1732 goto retry_private;
1733 goto retry;
1734 }
1735
1736 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1737 if (match_futex (&this->key, &key1)) {
1738 if (this->pi_state || this->rt_waiter) {
1739 ret = -EINVAL;
1740 goto out_unlock;
1741 }
1742 mark_wake_futex(&wake_q, this);
1743 if (++ret >= nr_wake)
1744 break;
1745 }
1746 }
1747
1748 if (op_ret > 0) {
1749 op_ret = 0;
1750 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1751 if (match_futex (&this->key, &key2)) {
1752 if (this->pi_state || this->rt_waiter) {
1753 ret = -EINVAL;
1754 goto out_unlock;
1755 }
1756 mark_wake_futex(&wake_q, this);
1757 if (++op_ret >= nr_wake2)
1758 break;
1759 }
1760 }
1761 ret += op_ret;
1762 }
1763
1764out_unlock:
1765 double_unlock_hb(hb1, hb2);
1766 wake_up_q(&wake_q);
1767 return ret;
1768}
1769
1770/**
1771 * requeue_futex() - Requeue a futex_q from one hb to another
1772 * @q: the futex_q to requeue
1773 * @hb1: the source hash_bucket
1774 * @hb2: the target hash_bucket
1775 * @key2: the new key for the requeued futex_q
1776 */
1777static inline
1778void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1779 struct futex_hash_bucket *hb2, union futex_key *key2)
1780{
1781
1782 /*
1783 * If key1 and key2 hash to the same bucket, no need to
1784 * requeue.
1785 */
1786 if (likely(&hb1->chain != &hb2->chain)) {
1787 plist_del(&q->list, &hb1->chain);
1788 hb_waiters_dec(hb1);
1789 hb_waiters_inc(hb2);
1790 plist_add(&q->list, &hb2->chain);
1791 q->lock_ptr = &hb2->lock;
1792 }
1793 q->key = *key2;
1794}
1795
1796/**
1797 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1798 * @q: the futex_q
1799 * @key: the key of the requeue target futex
1800 * @hb: the hash_bucket of the requeue target futex
1801 *
1802 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1803 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1804 * to the requeue target futex so the waiter can detect the wakeup on the right
1805 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1806 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1807 * to protect access to the pi_state to fixup the owner later. Must be called
1808 * with both q->lock_ptr and hb->lock held.
1809 */
1810static inline
1811void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1812 struct futex_hash_bucket *hb)
1813{
1814 q->key = *key;
1815
1816 __unqueue_futex(q);
1817
1818 WARN_ON(!q->rt_waiter);
1819 q->rt_waiter = NULL;
1820
1821 q->lock_ptr = &hb->lock;
1822
1823 wake_up_state(q->task, TASK_NORMAL);
1824}
1825
1826/**
1827 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1828 * @pifutex: the user address of the to futex
1829 * @hb1: the from futex hash bucket, must be locked by the caller
1830 * @hb2: the to futex hash bucket, must be locked by the caller
1831 * @key1: the from futex key
1832 * @key2: the to futex key
1833 * @ps: address to store the pi_state pointer
1834 * @exiting: Pointer to store the task pointer of the owner task
1835 * which is in the middle of exiting
1836 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1837 *
1838 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1839 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1840 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1841 * hb1 and hb2 must be held by the caller.
1842 *
1843 * @exiting is only set when the return value is -EBUSY. If so, this holds
1844 * a refcount on the exiting task on return and the caller needs to drop it
1845 * after waiting for the exit to complete.
1846 *
1847 * Return:
1848 * - 0 - failed to acquire the lock atomically;
1849 * - >0 - acquired the lock, return value is vpid of the top_waiter
1850 * - <0 - error
1851 */
1852static int
1853futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
1854 struct futex_hash_bucket *hb2, union futex_key *key1,
1855 union futex_key *key2, struct futex_pi_state **ps,
1856 struct task_struct **exiting, int set_waiters)
1857{
1858 struct futex_q *top_waiter = NULL;
1859 u32 curval;
1860 int ret, vpid;
1861
1862 if (get_futex_value_locked(&curval, pifutex))
1863 return -EFAULT;
1864
1865 if (unlikely(should_fail_futex(true)))
1866 return -EFAULT;
1867
1868 /*
1869 * Find the top_waiter and determine if there are additional waiters.
1870 * If the caller intends to requeue more than 1 waiter to pifutex,
1871 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1872 * as we have means to handle the possible fault. If not, don't set
1873 * the bit unnecessarily as it will force the subsequent unlock to enter
1874 * the kernel.
1875 */
1876 top_waiter = futex_top_waiter(hb1, key1);
1877
1878 /* There are no waiters, nothing for us to do. */
1879 if (!top_waiter)
1880 return 0;
1881
1882 /* Ensure we requeue to the expected futex. */
1883 if (!match_futex(top_waiter->requeue_pi_key, key2))
1884 return -EINVAL;
1885
1886 /*
1887 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1888 * the contended case or if set_waiters is 1. The pi_state is returned
1889 * in ps in contended cases.
1890 */
1891 vpid = task_pid_vnr(top_waiter->task);
1892 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1893 exiting, set_waiters);
1894 if (ret == 1) {
1895 requeue_pi_wake_futex(top_waiter, key2, hb2);
1896 return vpid;
1897 }
1898 return ret;
1899}
1900
1901/**
1902 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1903 * @uaddr1: source futex user address
1904 * @flags: futex flags (FLAGS_SHARED, etc.)
1905 * @uaddr2: target futex user address
1906 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1907 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1908 * @cmpval: @uaddr1 expected value (or %NULL)
1909 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1910 * pi futex (pi to pi requeue is not supported)
1911 *
1912 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1913 * uaddr2 atomically on behalf of the top waiter.
1914 *
1915 * Return:
1916 * - >=0 - on success, the number of tasks requeued or woken;
1917 * - <0 - on error
1918 */
1919static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1920 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1921 u32 *cmpval, int requeue_pi)
1922{
1923 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1924 int task_count = 0, ret;
1925 struct futex_pi_state *pi_state = NULL;
1926 struct futex_hash_bucket *hb1, *hb2;
1927 struct futex_q *this, *next;
1928 DEFINE_WAKE_Q(wake_q);
1929
1930 if (nr_wake < 0 || nr_requeue < 0)
1931 return -EINVAL;
1932
1933 /*
1934 * When PI not supported: return -ENOSYS if requeue_pi is true,
1935 * consequently the compiler knows requeue_pi is always false past
1936 * this point which will optimize away all the conditional code
1937 * further down.
1938 */
1939 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
1940 return -ENOSYS;
1941
1942 if (requeue_pi) {
1943 /*
1944 * Requeue PI only works on two distinct uaddrs. This
1945 * check is only valid for private futexes. See below.
1946 */
1947 if (uaddr1 == uaddr2)
1948 return -EINVAL;
1949
1950 /*
1951 * requeue_pi requires a pi_state, try to allocate it now
1952 * without any locks in case it fails.
1953 */
1954 if (refill_pi_state_cache())
1955 return -ENOMEM;
1956 /*
1957 * requeue_pi must wake as many tasks as it can, up to nr_wake
1958 * + nr_requeue, since it acquires the rt_mutex prior to
1959 * returning to userspace, so as to not leave the rt_mutex with
1960 * waiters and no owner. However, second and third wake-ups
1961 * cannot be predicted as they involve race conditions with the
1962 * first wake and a fault while looking up the pi_state. Both
1963 * pthread_cond_signal() and pthread_cond_broadcast() should
1964 * use nr_wake=1.
1965 */
1966 if (nr_wake != 1)
1967 return -EINVAL;
1968 }
1969
1970retry:
1971 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1972 if (unlikely(ret != 0))
1973 return ret;
1974 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1975 requeue_pi ? FUTEX_WRITE : FUTEX_READ);
1976 if (unlikely(ret != 0))
1977 return ret;
1978
1979 /*
1980 * The check above which compares uaddrs is not sufficient for
1981 * shared futexes. We need to compare the keys:
1982 */
1983 if (requeue_pi && match_futex(&key1, &key2))
1984 return -EINVAL;
1985
1986 hb1 = hash_futex(&key1);
1987 hb2 = hash_futex(&key2);
1988
1989retry_private:
1990 hb_waiters_inc(hb2);
1991 double_lock_hb(hb1, hb2);
1992
1993 if (likely(cmpval != NULL)) {
1994 u32 curval;
1995
1996 ret = get_futex_value_locked(&curval, uaddr1);
1997
1998 if (unlikely(ret)) {
1999 double_unlock_hb(hb1, hb2);
2000 hb_waiters_dec(hb2);
2001
2002 ret = get_user(curval, uaddr1);
2003 if (ret)
2004 return ret;
2005
2006 if (!(flags & FLAGS_SHARED))
2007 goto retry_private;
2008
2009 goto retry;
2010 }
2011 if (curval != *cmpval) {
2012 ret = -EAGAIN;
2013 goto out_unlock;
2014 }
2015 }
2016
2017 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2018 struct task_struct *exiting = NULL;
2019
2020 /*
2021 * Attempt to acquire uaddr2 and wake the top waiter. If we
2022 * intend to requeue waiters, force setting the FUTEX_WAITERS
2023 * bit. We force this here where we are able to easily handle
2024 * faults rather in the requeue loop below.
2025 */
2026 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2027 &key2, &pi_state,
2028 &exiting, nr_requeue);
2029
2030 /*
2031 * At this point the top_waiter has either taken uaddr2 or is
2032 * waiting on it. If the former, then the pi_state will not
2033 * exist yet, look it up one more time to ensure we have a
2034 * reference to it. If the lock was taken, ret contains the
2035 * vpid of the top waiter task.
2036 * If the lock was not taken, we have pi_state and an initial
2037 * refcount on it. In case of an error we have nothing.
2038 */
2039 if (ret > 0) {
2040 WARN_ON(pi_state);
2041 task_count++;
2042 /*
2043 * If we acquired the lock, then the user space value
2044 * of uaddr2 should be vpid. It cannot be changed by
2045 * the top waiter as it is blocked on hb2 lock if it
2046 * tries to do so. If something fiddled with it behind
2047 * our back the pi state lookup might unearth it. So
2048 * we rather use the known value than rereading and
2049 * handing potential crap to lookup_pi_state.
2050 *
2051 * If that call succeeds then we have pi_state and an
2052 * initial refcount on it.
2053 */
2054 ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
2055 &pi_state, &exiting);
2056 }
2057
2058 switch (ret) {
2059 case 0:
2060 /* We hold a reference on the pi state. */
2061 break;
2062
2063 /* If the above failed, then pi_state is NULL */
2064 case -EFAULT:
2065 double_unlock_hb(hb1, hb2);
2066 hb_waiters_dec(hb2);
2067 ret = fault_in_user_writeable(uaddr2);
2068 if (!ret)
2069 goto retry;
2070 return ret;
2071 case -EBUSY:
2072 case -EAGAIN:
2073 /*
2074 * Two reasons for this:
2075 * - EBUSY: Owner is exiting and we just wait for the
2076 * exit to complete.
2077 * - EAGAIN: The user space value changed.
2078 */
2079 double_unlock_hb(hb1, hb2);
2080 hb_waiters_dec(hb2);
2081 /*
2082 * Handle the case where the owner is in the middle of
2083 * exiting. Wait for the exit to complete otherwise
2084 * this task might loop forever, aka. live lock.
2085 */
2086 wait_for_owner_exiting(ret, exiting);
2087 cond_resched();
2088 goto retry;
2089 default:
2090 goto out_unlock;
2091 }
2092 }
2093
2094 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2095 if (task_count - nr_wake >= nr_requeue)
2096 break;
2097
2098 if (!match_futex(&this->key, &key1))
2099 continue;
2100
2101 /*
2102 * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always
2103 * be paired with each other and no other futex ops.
2104 *
2105 * We should never be requeueing a futex_q with a pi_state,
2106 * which is awaiting a futex_unlock_pi().
2107 */
2108 if ((requeue_pi && !this->rt_waiter) ||
2109 (!requeue_pi && this->rt_waiter) ||
2110 this->pi_state) {
2111 ret = -EINVAL;
2112 break;
2113 }
2114
2115 /*
2116 * Wake nr_wake waiters. For requeue_pi, if we acquired the
2117 * lock, we already woke the top_waiter. If not, it will be
2118 * woken by futex_unlock_pi().
2119 */
2120 if (++task_count <= nr_wake && !requeue_pi) {
2121 mark_wake_futex(&wake_q, this);
2122 continue;
2123 }
2124
2125 /* Ensure we requeue to the expected futex for requeue_pi. */
2126 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
2127 ret = -EINVAL;
2128 break;
2129 }
2130
2131 /*
2132 * Requeue nr_requeue waiters and possibly one more in the case
2133 * of requeue_pi if we couldn't acquire the lock atomically.
2134 */
2135 if (requeue_pi) {
2136 /*
2137 * Prepare the waiter to take the rt_mutex. Take a
2138 * refcount on the pi_state and store the pointer in
2139 * the futex_q object of the waiter.
2140 */
2141 get_pi_state(pi_state);
2142 this->pi_state = pi_state;
2143 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
2144 this->rt_waiter,
2145 this->task);
2146 if (ret == 1) {
2147 /*
2148 * We got the lock. We do neither drop the
2149 * refcount on pi_state nor clear
2150 * this->pi_state because the waiter needs the
2151 * pi_state for cleaning up the user space
2152 * value. It will drop the refcount after
2153 * doing so.
2154 */
2155 requeue_pi_wake_futex(this, &key2, hb2);
2156 continue;
2157 } else if (ret) {
2158 /*
2159 * rt_mutex_start_proxy_lock() detected a
2160 * potential deadlock when we tried to queue
2161 * that waiter. Drop the pi_state reference
2162 * which we took above and remove the pointer
2163 * to the state from the waiters futex_q
2164 * object.
2165 */
2166 this->pi_state = NULL;
2167 put_pi_state(pi_state);
2168 /*
2169 * We stop queueing more waiters and let user
2170 * space deal with the mess.
2171 */
2172 break;
2173 }
2174 }
2175 requeue_futex(this, hb1, hb2, &key2);
2176 }
2177
2178 /*
2179 * We took an extra initial reference to the pi_state either
2180 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2181 * need to drop it here again.
2182 */
2183 put_pi_state(pi_state);
2184
2185out_unlock:
2186 double_unlock_hb(hb1, hb2);
2187 wake_up_q(&wake_q);
2188 hb_waiters_dec(hb2);
2189 return ret ? ret : task_count;
2190}
2191
2192/* The key must be already stored in q->key. */
2193static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2194 __acquires(&hb->lock)
2195{
2196 struct futex_hash_bucket *hb;
2197
2198 hb = hash_futex(&q->key);
2199
2200 /*
2201 * Increment the counter before taking the lock so that
2202 * a potential waker won't miss a to-be-slept task that is
2203 * waiting for the spinlock. This is safe as all queue_lock()
2204 * users end up calling queue_me(). Similarly, for housekeeping,
2205 * decrement the counter at queue_unlock() when some error has
2206 * occurred and we don't end up adding the task to the list.
2207 */
2208 hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2209
2210 q->lock_ptr = &hb->lock;
2211
2212 spin_lock(&hb->lock);
2213 return hb;
2214}
2215
2216static inline void
2217queue_unlock(struct futex_hash_bucket *hb)
2218 __releases(&hb->lock)
2219{
2220 spin_unlock(&hb->lock);
2221 hb_waiters_dec(hb);
2222}
2223
2224static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2225{
2226 int prio;
2227
2228 /*
2229 * The priority used to register this element is
2230 * - either the real thread-priority for the real-time threads
2231 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2232 * - or MAX_RT_PRIO for non-RT threads.
2233 * Thus, all RT-threads are woken first in priority order, and
2234 * the others are woken last, in FIFO order.
2235 */
2236 prio = min(current->normal_prio, MAX_RT_PRIO);
2237
2238 plist_node_init(&q->list, prio);
2239 plist_add(&q->list, &hb->chain);
2240 q->task = current;
2241}
2242
2243/**
2244 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2245 * @q: The futex_q to enqueue
2246 * @hb: The destination hash bucket
2247 *
2248 * The hb->lock must be held by the caller, and is released here. A call to
2249 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2250 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2251 * or nothing if the unqueue is done as part of the wake process and the unqueue
2252 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2253 * an example).
2254 */
2255static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2256 __releases(&hb->lock)
2257{
2258 __queue_me(q, hb);
2259 spin_unlock(&hb->lock);
2260}
2261
2262/**
2263 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2264 * @q: The futex_q to unqueue
2265 *
2266 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2267 * be paired with exactly one earlier call to queue_me().
2268 *
2269 * Return:
2270 * - 1 - if the futex_q was still queued (and we removed unqueued it);
2271 * - 0 - if the futex_q was already removed by the waking thread
2272 */
2273static int unqueue_me(struct futex_q *q)
2274{
2275 spinlock_t *lock_ptr;
2276 int ret = 0;
2277
2278 /* In the common case we don't take the spinlock, which is nice. */
2279retry:
2280 /*
2281 * q->lock_ptr can change between this read and the following spin_lock.
2282 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2283 * optimizing lock_ptr out of the logic below.
2284 */
2285 lock_ptr = READ_ONCE(q->lock_ptr);
2286 if (lock_ptr != NULL) {
2287 spin_lock(lock_ptr);
2288 /*
2289 * q->lock_ptr can change between reading it and
2290 * spin_lock(), causing us to take the wrong lock. This
2291 * corrects the race condition.
2292 *
2293 * Reasoning goes like this: if we have the wrong lock,
2294 * q->lock_ptr must have changed (maybe several times)
2295 * between reading it and the spin_lock(). It can
2296 * change again after the spin_lock() but only if it was
2297 * already changed before the spin_lock(). It cannot,
2298 * however, change back to the original value. Therefore
2299 * we can detect whether we acquired the correct lock.
2300 */
2301 if (unlikely(lock_ptr != q->lock_ptr)) {
2302 spin_unlock(lock_ptr);
2303 goto retry;
2304 }
2305 __unqueue_futex(q);
2306
2307 BUG_ON(q->pi_state);
2308
2309 spin_unlock(lock_ptr);
2310 ret = 1;
2311 }
2312
2313 return ret;
2314}
2315
2316/*
2317 * PI futexes can not be requeued and must remove themselves from the
2318 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
2319 */
2320static void unqueue_me_pi(struct futex_q *q)
2321{
2322 __unqueue_futex(q);
2323
2324 BUG_ON(!q->pi_state);
2325 put_pi_state(q->pi_state);
2326 q->pi_state = NULL;
2327}
2328
2329static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2330 struct task_struct *argowner)
2331{
2332 struct futex_pi_state *pi_state = q->pi_state;
2333 struct task_struct *oldowner, *newowner;
2334 u32 uval, curval, newval, newtid;
2335 int err = 0;
2336
2337 oldowner = pi_state->owner;
2338
2339 /*
2340 * We are here because either:
2341 *
2342 * - we stole the lock and pi_state->owner needs updating to reflect
2343 * that (@argowner == current),
2344 *
2345 * or:
2346 *
2347 * - someone stole our lock and we need to fix things to point to the
2348 * new owner (@argowner == NULL).
2349 *
2350 * Either way, we have to replace the TID in the user space variable.
2351 * This must be atomic as we have to preserve the owner died bit here.
2352 *
2353 * Note: We write the user space value _before_ changing the pi_state
2354 * because we can fault here. Imagine swapped out pages or a fork
2355 * that marked all the anonymous memory readonly for cow.
2356 *
2357 * Modifying pi_state _before_ the user space value would leave the
2358 * pi_state in an inconsistent state when we fault here, because we
2359 * need to drop the locks to handle the fault. This might be observed
2360 * in the PID check in lookup_pi_state.
2361 */
2362retry:
2363 if (!argowner) {
2364 if (oldowner != current) {
2365 /*
2366 * We raced against a concurrent self; things are
2367 * already fixed up. Nothing to do.
2368 */
2369 return 0;
2370 }
2371
2372 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2373 /* We got the lock. pi_state is correct. Tell caller. */
2374 return 1;
2375 }
2376
2377 /*
2378 * The trylock just failed, so either there is an owner or
2379 * there is a higher priority waiter than this one.
2380 */
2381 newowner = rt_mutex_owner(&pi_state->pi_mutex);
2382 /*
2383 * If the higher priority waiter has not yet taken over the
2384 * rtmutex then newowner is NULL. We can't return here with
2385 * that state because it's inconsistent vs. the user space
2386 * state. So drop the locks and try again. It's a valid
2387 * situation and not any different from the other retry
2388 * conditions.
2389 */
2390 if (unlikely(!newowner)) {
2391 err = -EAGAIN;
2392 goto handle_err;
2393 }
2394 } else {
2395 WARN_ON_ONCE(argowner != current);
2396 if (oldowner == current) {
2397 /*
2398 * We raced against a concurrent self; things are
2399 * already fixed up. Nothing to do.
2400 */
2401 return 1;
2402 }
2403 newowner = argowner;
2404 }
2405
2406 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2407 /* Owner died? */
2408 if (!pi_state->owner)
2409 newtid |= FUTEX_OWNER_DIED;
2410
2411 err = get_futex_value_locked(&uval, uaddr);
2412 if (err)
2413 goto handle_err;
2414
2415 for (;;) {
2416 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2417
2418 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
2419 if (err)
2420 goto handle_err;
2421
2422 if (curval == uval)
2423 break;
2424 uval = curval;
2425 }
2426
2427 /*
2428 * We fixed up user space. Now we need to fix the pi_state
2429 * itself.
2430 */
2431 pi_state_update_owner(pi_state, newowner);
2432
2433 return argowner == current;
2434
2435 /*
2436 * In order to reschedule or handle a page fault, we need to drop the
2437 * locks here. In the case of a fault, this gives the other task
2438 * (either the highest priority waiter itself or the task which stole
2439 * the rtmutex) the chance to try the fixup of the pi_state. So once we
2440 * are back from handling the fault we need to check the pi_state after
2441 * reacquiring the locks and before trying to do another fixup. When
2442 * the fixup has been done already we simply return.
2443 *
2444 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
2445 * drop hb->lock since the caller owns the hb -> futex_q relation.
2446 * Dropping the pi_mutex->wait_lock requires the state revalidate.
2447 */
2448handle_err:
2449 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2450 spin_unlock(q->lock_ptr);
2451
2452 switch (err) {
2453 case -EFAULT:
2454 err = fault_in_user_writeable(uaddr);
2455 break;
2456
2457 case -EAGAIN:
2458 cond_resched();
2459 err = 0;
2460 break;
2461
2462 default:
2463 WARN_ON_ONCE(1);
2464 break;
2465 }
2466
2467 spin_lock(q->lock_ptr);
2468 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2469
2470 /*
2471 * Check if someone else fixed it for us:
2472 */
2473 if (pi_state->owner != oldowner)
2474 return argowner == current;
2475
2476 /* Retry if err was -EAGAIN or the fault in succeeded */
2477 if (!err)
2478 goto retry;
2479
2480 /*
2481 * fault_in_user_writeable() failed so user state is immutable. At
2482 * best we can make the kernel state consistent but user state will
2483 * be most likely hosed and any subsequent unlock operation will be
2484 * rejected due to PI futex rule [10].
2485 *
2486 * Ensure that the rtmutex owner is also the pi_state owner despite
2487 * the user space value claiming something different. There is no
2488 * point in unlocking the rtmutex if current is the owner as it
2489 * would need to wait until the next waiter has taken the rtmutex
2490 * to guarantee consistent state. Keep it simple. Userspace asked
2491 * for this wreckaged state.
2492 *
2493 * The rtmutex has an owner - either current or some other
2494 * task. See the EAGAIN loop above.
2495 */
2496 pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
2497
2498 return err;
2499}
2500
2501static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2502 struct task_struct *argowner)
2503{
2504 struct futex_pi_state *pi_state = q->pi_state;
2505 int ret;
2506
2507 lockdep_assert_held(q->lock_ptr);
2508
2509 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2510 ret = __fixup_pi_state_owner(uaddr, q, argowner);
2511 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2512 return ret;
2513}
2514
2515static long futex_wait_restart(struct restart_block *restart);
2516
2517/**
2518 * fixup_owner() - Post lock pi_state and corner case management
2519 * @uaddr: user address of the futex
2520 * @q: futex_q (contains pi_state and access to the rt_mutex)
2521 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2522 *
2523 * After attempting to lock an rt_mutex, this function is called to cleanup
2524 * the pi_state owner as well as handle race conditions that may allow us to
2525 * acquire the lock. Must be called with the hb lock held.
2526 *
2527 * Return:
2528 * - 1 - success, lock taken;
2529 * - 0 - success, lock not taken;
2530 * - <0 - on error (-EFAULT)
2531 */
2532static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2533{
2534 if (locked) {
2535 /*
2536 * Got the lock. We might not be the anticipated owner if we
2537 * did a lock-steal - fix up the PI-state in that case:
2538 *
2539 * Speculative pi_state->owner read (we don't hold wait_lock);
2540 * since we own the lock pi_state->owner == current is the
2541 * stable state, anything else needs more attention.
2542 */
2543 if (q->pi_state->owner != current)
2544 return fixup_pi_state_owner(uaddr, q, current);
2545 return 1;
2546 }
2547
2548 /*
2549 * If we didn't get the lock; check if anybody stole it from us. In
2550 * that case, we need to fix up the uval to point to them instead of
2551 * us, otherwise bad things happen. [10]
2552 *
2553 * Another speculative read; pi_state->owner == current is unstable
2554 * but needs our attention.
2555 */
2556 if (q->pi_state->owner == current)
2557 return fixup_pi_state_owner(uaddr, q, NULL);
2558
2559 /*
2560 * Paranoia check. If we did not take the lock, then we should not be
2561 * the owner of the rt_mutex. Warn and establish consistent state.
2562 */
2563 if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
2564 return fixup_pi_state_owner(uaddr, q, current);
2565
2566 return 0;
2567}
2568
2569/**
2570 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2571 * @hb: the futex hash bucket, must be locked by the caller
2572 * @q: the futex_q to queue up on
2573 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2574 */
2575static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2576 struct hrtimer_sleeper *timeout)
2577{
2578 /*
2579 * The task state is guaranteed to be set before another task can
2580 * wake it. set_current_state() is implemented using smp_store_mb() and
2581 * queue_me() calls spin_unlock() upon completion, both serializing
2582 * access to the hash list and forcing another memory barrier.
2583 */
2584 set_current_state(TASK_INTERRUPTIBLE);
2585 queue_me(q, hb);
2586
2587 /* Arm the timer */
2588 if (timeout)
2589 hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
2590
2591 /*
2592 * If we have been removed from the hash list, then another task
2593 * has tried to wake us, and we can skip the call to schedule().
2594 */
2595 if (likely(!plist_node_empty(&q->list))) {
2596 /*
2597 * If the timer has already expired, current will already be
2598 * flagged for rescheduling. Only call schedule if there
2599 * is no timeout, or if it has yet to expire.
2600 */
2601 if (!timeout || timeout->task)
2602 freezable_schedule();
2603 }
2604 __set_current_state(TASK_RUNNING);
2605}
2606
2607/**
2608 * futex_wait_setup() - Prepare to wait on a futex
2609 * @uaddr: the futex userspace address
2610 * @val: the expected value
2611 * @flags: futex flags (FLAGS_SHARED, etc.)
2612 * @q: the associated futex_q
2613 * @hb: storage for hash_bucket pointer to be returned to caller
2614 *
2615 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2616 * compare it with the expected value. Handle atomic faults internally.
2617 * Return with the hb lock held and a q.key reference on success, and unlocked
2618 * with no q.key reference on failure.
2619 *
2620 * Return:
2621 * - 0 - uaddr contains val and hb has been locked;
2622 * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2623 */
2624static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2625 struct futex_q *q, struct futex_hash_bucket **hb)
2626{
2627 u32 uval;
2628 int ret;
2629
2630 /*
2631 * Access the page AFTER the hash-bucket is locked.
2632 * Order is important:
2633 *
2634 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2635 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2636 *
2637 * The basic logical guarantee of a futex is that it blocks ONLY
2638 * if cond(var) is known to be true at the time of blocking, for
2639 * any cond. If we locked the hash-bucket after testing *uaddr, that
2640 * would open a race condition where we could block indefinitely with
2641 * cond(var) false, which would violate the guarantee.
2642 *
2643 * On the other hand, we insert q and release the hash-bucket only
2644 * after testing *uaddr. This guarantees that futex_wait() will NOT
2645 * absorb a wakeup if *uaddr does not match the desired values
2646 * while the syscall executes.
2647 */
2648retry:
2649 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2650 if (unlikely(ret != 0))
2651 return ret;
2652
2653retry_private:
2654 *hb = queue_lock(q);
2655
2656 ret = get_futex_value_locked(&uval, uaddr);
2657
2658 if (ret) {
2659 queue_unlock(*hb);
2660
2661 ret = get_user(uval, uaddr);
2662 if (ret)
2663 return ret;
2664
2665 if (!(flags & FLAGS_SHARED))
2666 goto retry_private;
2667
2668 goto retry;
2669 }
2670
2671 if (uval != val) {
2672 queue_unlock(*hb);
2673 ret = -EWOULDBLOCK;
2674 }
2675
2676 return ret;
2677}
2678
2679static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2680 ktime_t *abs_time, u32 bitset)
2681{
2682 struct hrtimer_sleeper timeout, *to;
2683 struct restart_block *restart;
2684 struct futex_hash_bucket *hb;
2685 struct futex_q q = futex_q_init;
2686 int ret;
2687
2688 if (!bitset)
2689 return -EINVAL;
2690 q.bitset = bitset;
2691
2692 to = futex_setup_timer(abs_time, &timeout, flags,
2693 current->timer_slack_ns);
2694retry:
2695 /*
2696 * Prepare to wait on uaddr. On success, holds hb lock and increments
2697 * q.key refs.
2698 */
2699 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2700 if (ret)
2701 goto out;
2702
2703 /* queue_me and wait for wakeup, timeout, or a signal. */
2704 futex_wait_queue_me(hb, &q, to);
2705
2706 /* If we were woken (and unqueued), we succeeded, whatever. */
2707 ret = 0;
2708 /* unqueue_me() drops q.key ref */
2709 if (!unqueue_me(&q))
2710 goto out;
2711 ret = -ETIMEDOUT;
2712 if (to && !to->task)
2713 goto out;
2714
2715 /*
2716 * We expect signal_pending(current), but we might be the
2717 * victim of a spurious wakeup as well.
2718 */
2719 if (!signal_pending(current))
2720 goto retry;
2721
2722 ret = -ERESTARTSYS;
2723 if (!abs_time)
2724 goto out;
2725
2726 restart = ¤t->restart_block;
2727 restart->futex.uaddr = uaddr;
2728 restart->futex.val = val;
2729 restart->futex.time = *abs_time;
2730 restart->futex.bitset = bitset;
2731 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2732
2733 ret = set_restart_fn(restart, futex_wait_restart);
2734
2735out:
2736 if (to) {
2737 hrtimer_cancel(&to->timer);
2738 destroy_hrtimer_on_stack(&to->timer);
2739 }
2740 return ret;
2741}
2742
2743
2744static long futex_wait_restart(struct restart_block *restart)
2745{
2746 u32 __user *uaddr = restart->futex.uaddr;
2747 ktime_t t, *tp = NULL;
2748
2749 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2750 t = restart->futex.time;
2751 tp = &t;
2752 }
2753 restart->fn = do_no_restart_syscall;
2754
2755 return (long)futex_wait(uaddr, restart->futex.flags,
2756 restart->futex.val, tp, restart->futex.bitset);
2757}
2758
2759
2760/*
2761 * Userspace tried a 0 -> TID atomic transition of the futex value
2762 * and failed. The kernel side here does the whole locking operation:
2763 * if there are waiters then it will block as a consequence of relying
2764 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2765 * a 0 value of the futex too.).
2766 *
2767 * Also serves as futex trylock_pi()'ing, and due semantics.
2768 */
2769static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2770 ktime_t *time, int trylock)
2771{
2772 struct hrtimer_sleeper timeout, *to;
2773 struct task_struct *exiting = NULL;
2774 struct rt_mutex_waiter rt_waiter;
2775 struct futex_hash_bucket *hb;
2776 struct futex_q q = futex_q_init;
2777 int res, ret;
2778
2779 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2780 return -ENOSYS;
2781
2782 if (refill_pi_state_cache())
2783 return -ENOMEM;
2784
2785 to = futex_setup_timer(time, &timeout, flags, 0);
2786
2787retry:
2788 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2789 if (unlikely(ret != 0))
2790 goto out;
2791
2792retry_private:
2793 hb = queue_lock(&q);
2794
2795 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
2796 &exiting, 0);
2797 if (unlikely(ret)) {
2798 /*
2799 * Atomic work succeeded and we got the lock,
2800 * or failed. Either way, we do _not_ block.
2801 */
2802 switch (ret) {
2803 case 1:
2804 /* We got the lock. */
2805 ret = 0;
2806 goto out_unlock_put_key;
2807 case -EFAULT:
2808 goto uaddr_faulted;
2809 case -EBUSY:
2810 case -EAGAIN:
2811 /*
2812 * Two reasons for this:
2813 * - EBUSY: Task is exiting and we just wait for the
2814 * exit to complete.
2815 * - EAGAIN: The user space value changed.
2816 */
2817 queue_unlock(hb);
2818 /*
2819 * Handle the case where the owner is in the middle of
2820 * exiting. Wait for the exit to complete otherwise
2821 * this task might loop forever, aka. live lock.
2822 */
2823 wait_for_owner_exiting(ret, exiting);
2824 cond_resched();
2825 goto retry;
2826 default:
2827 goto out_unlock_put_key;
2828 }
2829 }
2830
2831 WARN_ON(!q.pi_state);
2832
2833 /*
2834 * Only actually queue now that the atomic ops are done:
2835 */
2836 __queue_me(&q, hb);
2837
2838 if (trylock) {
2839 ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2840 /* Fixup the trylock return value: */
2841 ret = ret ? 0 : -EWOULDBLOCK;
2842 goto no_block;
2843 }
2844
2845 rt_mutex_init_waiter(&rt_waiter);
2846
2847 /*
2848 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
2849 * hold it while doing rt_mutex_start_proxy(), because then it will
2850 * include hb->lock in the blocking chain, even through we'll not in
2851 * fact hold it while blocking. This will lead it to report -EDEADLK
2852 * and BUG when futex_unlock_pi() interleaves with this.
2853 *
2854 * Therefore acquire wait_lock while holding hb->lock, but drop the
2855 * latter before calling __rt_mutex_start_proxy_lock(). This
2856 * interleaves with futex_unlock_pi() -- which does a similar lock
2857 * handoff -- such that the latter can observe the futex_q::pi_state
2858 * before __rt_mutex_start_proxy_lock() is done.
2859 */
2860 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
2861 spin_unlock(q.lock_ptr);
2862 /*
2863 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
2864 * such that futex_unlock_pi() is guaranteed to observe the waiter when
2865 * it sees the futex_q::pi_state.
2866 */
2867 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2868 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
2869
2870 if (ret) {
2871 if (ret == 1)
2872 ret = 0;
2873 goto cleanup;
2874 }
2875
2876 if (unlikely(to))
2877 hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
2878
2879 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2880
2881cleanup:
2882 spin_lock(q.lock_ptr);
2883 /*
2884 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2885 * first acquire the hb->lock before removing the lock from the
2886 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
2887 * lists consistent.
2888 *
2889 * In particular; it is important that futex_unlock_pi() can not
2890 * observe this inconsistency.
2891 */
2892 if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
2893 ret = 0;
2894
2895no_block:
2896 /*
2897 * Fixup the pi_state owner and possibly acquire the lock if we
2898 * haven't already.
2899 */
2900 res = fixup_owner(uaddr, &q, !ret);
2901 /*
2902 * If fixup_owner() returned an error, propagate that. If it acquired
2903 * the lock, clear our -ETIMEDOUT or -EINTR.
2904 */
2905 if (res)
2906 ret = (res < 0) ? res : 0;
2907
2908 unqueue_me_pi(&q);
2909 spin_unlock(q.lock_ptr);
2910 goto out;
2911
2912out_unlock_put_key:
2913 queue_unlock(hb);
2914
2915out:
2916 if (to) {
2917 hrtimer_cancel(&to->timer);
2918 destroy_hrtimer_on_stack(&to->timer);
2919 }
2920 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2921
2922uaddr_faulted:
2923 queue_unlock(hb);
2924
2925 ret = fault_in_user_writeable(uaddr);
2926 if (ret)
2927 goto out;
2928
2929 if (!(flags & FLAGS_SHARED))
2930 goto retry_private;
2931
2932 goto retry;
2933}
2934
2935/*
2936 * Userspace attempted a TID -> 0 atomic transition, and failed.
2937 * This is the in-kernel slowpath: we look up the PI state (if any),
2938 * and do the rt-mutex unlock.
2939 */
2940static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2941{
2942 u32 curval, uval, vpid = task_pid_vnr(current);
2943 union futex_key key = FUTEX_KEY_INIT;
2944 struct futex_hash_bucket *hb;
2945 struct futex_q *top_waiter;
2946 int ret;
2947
2948 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2949 return -ENOSYS;
2950
2951retry:
2952 if (get_user(uval, uaddr))
2953 return -EFAULT;
2954 /*
2955 * We release only a lock we actually own:
2956 */
2957 if ((uval & FUTEX_TID_MASK) != vpid)
2958 return -EPERM;
2959
2960 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
2961 if (ret)
2962 return ret;
2963
2964 hb = hash_futex(&key);
2965 spin_lock(&hb->lock);
2966
2967 /*
2968 * Check waiters first. We do not trust user space values at
2969 * all and we at least want to know if user space fiddled
2970 * with the futex value instead of blindly unlocking.
2971 */
2972 top_waiter = futex_top_waiter(hb, &key);
2973 if (top_waiter) {
2974 struct futex_pi_state *pi_state = top_waiter->pi_state;
2975
2976 ret = -EINVAL;
2977 if (!pi_state)
2978 goto out_unlock;
2979
2980 /*
2981 * If current does not own the pi_state then the futex is
2982 * inconsistent and user space fiddled with the futex value.
2983 */
2984 if (pi_state->owner != current)
2985 goto out_unlock;
2986
2987 get_pi_state(pi_state);
2988 /*
2989 * By taking wait_lock while still holding hb->lock, we ensure
2990 * there is no point where we hold neither; and therefore
2991 * wake_futex_pi() must observe a state consistent with what we
2992 * observed.
2993 *
2994 * In particular; this forces __rt_mutex_start_proxy() to
2995 * complete such that we're guaranteed to observe the
2996 * rt_waiter. Also see the WARN in wake_futex_pi().
2997 */
2998 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2999 spin_unlock(&hb->lock);
3000
3001 /* drops pi_state->pi_mutex.wait_lock */
3002 ret = wake_futex_pi(uaddr, uval, pi_state);
3003
3004 put_pi_state(pi_state);
3005
3006 /*
3007 * Success, we're done! No tricky corner cases.
3008 */
3009 if (!ret)
3010 return ret;
3011 /*
3012 * The atomic access to the futex value generated a
3013 * pagefault, so retry the user-access and the wakeup:
3014 */
3015 if (ret == -EFAULT)
3016 goto pi_faulted;
3017 /*
3018 * A unconditional UNLOCK_PI op raced against a waiter
3019 * setting the FUTEX_WAITERS bit. Try again.
3020 */
3021 if (ret == -EAGAIN)
3022 goto pi_retry;
3023 /*
3024 * wake_futex_pi has detected invalid state. Tell user
3025 * space.
3026 */
3027 return ret;
3028 }
3029
3030 /*
3031 * We have no kernel internal state, i.e. no waiters in the
3032 * kernel. Waiters which are about to queue themselves are stuck
3033 * on hb->lock. So we can safely ignore them. We do neither
3034 * preserve the WAITERS bit not the OWNER_DIED one. We are the
3035 * owner.
3036 */
3037 if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
3038 spin_unlock(&hb->lock);
3039 switch (ret) {
3040 case -EFAULT:
3041 goto pi_faulted;
3042
3043 case -EAGAIN:
3044 goto pi_retry;
3045
3046 default:
3047 WARN_ON_ONCE(1);
3048 return ret;
3049 }
3050 }
3051
3052 /*
3053 * If uval has changed, let user space handle it.
3054 */
3055 ret = (curval == uval) ? 0 : -EAGAIN;
3056
3057out_unlock:
3058 spin_unlock(&hb->lock);
3059 return ret;
3060
3061pi_retry:
3062 cond_resched();
3063 goto retry;
3064
3065pi_faulted:
3066
3067 ret = fault_in_user_writeable(uaddr);
3068 if (!ret)
3069 goto retry;
3070
3071 return ret;
3072}
3073
3074/**
3075 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
3076 * @hb: the hash_bucket futex_q was original enqueued on
3077 * @q: the futex_q woken while waiting to be requeued
3078 * @key2: the futex_key of the requeue target futex
3079 * @timeout: the timeout associated with the wait (NULL if none)
3080 *
3081 * Detect if the task was woken on the initial futex as opposed to the requeue
3082 * target futex. If so, determine if it was a timeout or a signal that caused
3083 * the wakeup and return the appropriate error code to the caller. Must be
3084 * called with the hb lock held.
3085 *
3086 * Return:
3087 * - 0 = no early wakeup detected;
3088 * - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3089 */
3090static inline
3091int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
3092 struct futex_q *q, union futex_key *key2,
3093 struct hrtimer_sleeper *timeout)
3094{
3095 int ret = 0;
3096
3097 /*
3098 * With the hb lock held, we avoid races while we process the wakeup.
3099 * We only need to hold hb (and not hb2) to ensure atomicity as the
3100 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
3101 * It can't be requeued from uaddr2 to something else since we don't
3102 * support a PI aware source futex for requeue.
3103 */
3104 if (!match_futex(&q->key, key2)) {
3105 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
3106 /*
3107 * We were woken prior to requeue by a timeout or a signal.
3108 * Unqueue the futex_q and determine which it was.
3109 */
3110 plist_del(&q->list, &hb->chain);
3111 hb_waiters_dec(hb);
3112
3113 /* Handle spurious wakeups gracefully */
3114 ret = -EWOULDBLOCK;
3115 if (timeout && !timeout->task)
3116 ret = -ETIMEDOUT;
3117 else if (signal_pending(current))
3118 ret = -ERESTARTNOINTR;
3119 }
3120 return ret;
3121}
3122
3123/**
3124 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3125 * @uaddr: the futex we initially wait on (non-pi)
3126 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3127 * the same type, no requeueing from private to shared, etc.
3128 * @val: the expected value of uaddr
3129 * @abs_time: absolute timeout
3130 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
3131 * @uaddr2: the pi futex we will take prior to returning to user-space
3132 *
3133 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3134 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
3135 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
3136 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
3137 * without one, the pi logic would not know which task to boost/deboost, if
3138 * there was a need to.
3139 *
3140 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3141 * via the following--
3142 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3143 * 2) wakeup on uaddr2 after a requeue
3144 * 3) signal
3145 * 4) timeout
3146 *
3147 * If 3, cleanup and return -ERESTARTNOINTR.
3148 *
3149 * If 2, we may then block on trying to take the rt_mutex and return via:
3150 * 5) successful lock
3151 * 6) signal
3152 * 7) timeout
3153 * 8) other lock acquisition failure
3154 *
3155 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3156 *
3157 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
3158 *
3159 * Return:
3160 * - 0 - On success;
3161 * - <0 - On error
3162 */
3163static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3164 u32 val, ktime_t *abs_time, u32 bitset,
3165 u32 __user *uaddr2)
3166{
3167 struct hrtimer_sleeper timeout, *to;
3168 struct rt_mutex_waiter rt_waiter;
3169 struct futex_hash_bucket *hb;
3170 union futex_key key2 = FUTEX_KEY_INIT;
3171 struct futex_q q = futex_q_init;
3172 int res, ret;
3173
3174 if (!IS_ENABLED(CONFIG_FUTEX_PI))
3175 return -ENOSYS;
3176
3177 if (uaddr == uaddr2)
3178 return -EINVAL;
3179
3180 if (!bitset)
3181 return -EINVAL;
3182
3183 to = futex_setup_timer(abs_time, &timeout, flags,
3184 current->timer_slack_ns);
3185
3186 /*
3187 * The waiter is allocated on our stack, manipulated by the requeue
3188 * code while we sleep on uaddr.
3189 */
3190 rt_mutex_init_waiter(&rt_waiter);
3191
3192 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3193 if (unlikely(ret != 0))
3194 goto out;
3195
3196 q.bitset = bitset;
3197 q.rt_waiter = &rt_waiter;
3198 q.requeue_pi_key = &key2;
3199
3200 /*
3201 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
3202 * count.
3203 */
3204 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
3205 if (ret)
3206 goto out;
3207
3208 /*
3209 * The check above which compares uaddrs is not sufficient for
3210 * shared futexes. We need to compare the keys:
3211 */
3212 if (match_futex(&q.key, &key2)) {
3213 queue_unlock(hb);
3214 ret = -EINVAL;
3215 goto out;
3216 }
3217
3218 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
3219 futex_wait_queue_me(hb, &q, to);
3220
3221 spin_lock(&hb->lock);
3222 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
3223 spin_unlock(&hb->lock);
3224 if (ret)
3225 goto out;
3226
3227 /*
3228 * In order for us to be here, we know our q.key == key2, and since
3229 * we took the hb->lock above, we also know that futex_requeue() has
3230 * completed and we no longer have to concern ourselves with a wakeup
3231 * race with the atomic proxy lock acquisition by the requeue code. The
3232 * futex_requeue dropped our key1 reference and incremented our key2
3233 * reference count.
3234 */
3235
3236 /*
3237 * Check if the requeue code acquired the second futex for us and do
3238 * any pertinent fixup.
3239 */
3240 if (!q.rt_waiter) {
3241 if (q.pi_state && (q.pi_state->owner != current)) {
3242 spin_lock(q.lock_ptr);
3243 ret = fixup_owner(uaddr2, &q, true);
3244 /*
3245 * Drop the reference to the pi state which
3246 * the requeue_pi() code acquired for us.
3247 */
3248 put_pi_state(q.pi_state);
3249 spin_unlock(q.lock_ptr);
3250 /*
3251 * Adjust the return value. It's either -EFAULT or
3252 * success (1) but the caller expects 0 for success.
3253 */
3254 ret = ret < 0 ? ret : 0;
3255 }
3256 } else {
3257 struct rt_mutex *pi_mutex;
3258
3259 /*
3260 * We have been woken up by futex_unlock_pi(), a timeout, or a
3261 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
3262 * the pi_state.
3263 */
3264 WARN_ON(!q.pi_state);
3265 pi_mutex = &q.pi_state->pi_mutex;
3266 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3267
3268 spin_lock(q.lock_ptr);
3269 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
3270 ret = 0;
3271
3272 debug_rt_mutex_free_waiter(&rt_waiter);
3273 /*
3274 * Fixup the pi_state owner and possibly acquire the lock if we
3275 * haven't already.
3276 */
3277 res = fixup_owner(uaddr2, &q, !ret);
3278 /*
3279 * If fixup_owner() returned an error, propagate that. If it
3280 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3281 */
3282 if (res)
3283 ret = (res < 0) ? res : 0;
3284
3285 unqueue_me_pi(&q);
3286 spin_unlock(q.lock_ptr);
3287 }
3288
3289 if (ret == -EINTR) {
3290 /*
3291 * We've already been requeued, but cannot restart by calling
3292 * futex_lock_pi() directly. We could restart this syscall, but
3293 * it would detect that the user space "val" changed and return
3294 * -EWOULDBLOCK. Save the overhead of the restart and return
3295 * -EWOULDBLOCK directly.
3296 */
3297 ret = -EWOULDBLOCK;
3298 }
3299
3300out:
3301 if (to) {
3302 hrtimer_cancel(&to->timer);
3303 destroy_hrtimer_on_stack(&to->timer);
3304 }
3305 return ret;
3306}
3307
3308/*
3309 * Support for robust futexes: the kernel cleans up held futexes at
3310 * thread exit time.
3311 *
3312 * Implementation: user-space maintains a per-thread list of locks it
3313 * is holding. Upon do_exit(), the kernel carefully walks this list,
3314 * and marks all locks that are owned by this thread with the
3315 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3316 * always manipulated with the lock held, so the list is private and
3317 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3318 * field, to allow the kernel to clean up if the thread dies after
3319 * acquiring the lock, but just before it could have added itself to
3320 * the list. There can only be one such pending lock.
3321 */
3322
3323/**
3324 * sys_set_robust_list() - Set the robust-futex list head of a task
3325 * @head: pointer to the list-head
3326 * @len: length of the list-head, as userspace expects
3327 */
3328SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
3329 size_t, len)
3330{
3331 if (!futex_cmpxchg_enabled)
3332 return -ENOSYS;
3333 /*
3334 * The kernel knows only one size for now:
3335 */
3336 if (unlikely(len != sizeof(*head)))
3337 return -EINVAL;
3338
3339 current->robust_list = head;
3340
3341 return 0;
3342}
3343
3344/**
3345 * sys_get_robust_list() - Get the robust-futex list head of a task
3346 * @pid: pid of the process [zero for current task]
3347 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3348 * @len_ptr: pointer to a length field, the kernel fills in the header size
3349 */
3350SYSCALL_DEFINE3(get_robust_list, int, pid,
3351 struct robust_list_head __user * __user *, head_ptr,
3352 size_t __user *, len_ptr)
3353{
3354 struct robust_list_head __user *head;
3355 unsigned long ret;
3356 struct task_struct *p;
3357
3358 if (!futex_cmpxchg_enabled)
3359 return -ENOSYS;
3360
3361 rcu_read_lock();
3362
3363 ret = -ESRCH;
3364 if (!pid)
3365 p = current;
3366 else {
3367 p = find_task_by_vpid(pid);
3368 if (!p)
3369 goto err_unlock;
3370 }
3371
3372 ret = -EPERM;
3373 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3374 goto err_unlock;
3375
3376 head = p->robust_list;
3377 rcu_read_unlock();
3378
3379 if (put_user(sizeof(*head), len_ptr))
3380 return -EFAULT;
3381 return put_user(head, head_ptr);
3382
3383err_unlock:
3384 rcu_read_unlock();
3385
3386 return ret;
3387}
3388
3389/* Constants for the pending_op argument of handle_futex_death */
3390#define HANDLE_DEATH_PENDING true
3391#define HANDLE_DEATH_LIST false
3392
3393/*
3394 * Process a futex-list entry, check whether it's owned by the
3395 * dying task, and do notification if so:
3396 */
3397static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
3398 bool pi, bool pending_op)
3399{
3400 u32 uval, nval, mval;
3401 int err;
3402
3403 /* Futex address must be 32bit aligned */
3404 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
3405 return -1;
3406
3407retry:
3408 if (get_user(uval, uaddr))
3409 return -1;
3410
3411 /*
3412 * Special case for regular (non PI) futexes. The unlock path in
3413 * user space has two race scenarios:
3414 *
3415 * 1. The unlock path releases the user space futex value and
3416 * before it can execute the futex() syscall to wake up
3417 * waiters it is killed.
3418 *
3419 * 2. A woken up waiter is killed before it can acquire the
3420 * futex in user space.
3421 *
3422 * In both cases the TID validation below prevents a wakeup of
3423 * potential waiters which can cause these waiters to block
3424 * forever.
3425 *
3426 * In both cases the following conditions are met:
3427 *
3428 * 1) task->robust_list->list_op_pending != NULL
3429 * @pending_op == true
3430 * 2) User space futex value == 0
3431 * 3) Regular futex: @pi == false
3432 *
3433 * If these conditions are met, it is safe to attempt waking up a
3434 * potential waiter without touching the user space futex value and
3435 * trying to set the OWNER_DIED bit. The user space futex value is
3436 * uncontended and the rest of the user space mutex state is
3437 * consistent, so a woken waiter will just take over the
3438 * uncontended futex. Setting the OWNER_DIED bit would create
3439 * inconsistent state and malfunction of the user space owner died
3440 * handling.
3441 */
3442 if (pending_op && !pi && !uval) {
3443 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3444 return 0;
3445 }
3446
3447 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
3448 return 0;
3449
3450 /*
3451 * Ok, this dying thread is truly holding a futex
3452 * of interest. Set the OWNER_DIED bit atomically
3453 * via cmpxchg, and if the value had FUTEX_WAITERS
3454 * set, wake up a waiter (if any). (We have to do a
3455 * futex_wake() even if OWNER_DIED is already set -
3456 * to handle the rare but possible case of recursive
3457 * thread-death.) The rest of the cleanup is done in
3458 * userspace.
3459 */
3460 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3461
3462 /*
3463 * We are not holding a lock here, but we want to have
3464 * the pagefault_disable/enable() protection because
3465 * we want to handle the fault gracefully. If the
3466 * access fails we try to fault in the futex with R/W
3467 * verification via get_user_pages. get_user() above
3468 * does not guarantee R/W access. If that fails we
3469 * give up and leave the futex locked.
3470 */
3471 if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
3472 switch (err) {
3473 case -EFAULT:
3474 if (fault_in_user_writeable(uaddr))
3475 return -1;
3476 goto retry;
3477
3478 case -EAGAIN:
3479 cond_resched();
3480 goto retry;
3481
3482 default:
3483 WARN_ON_ONCE(1);
3484 return err;
3485 }
3486 }
3487
3488 if (nval != uval)
3489 goto retry;
3490
3491 /*
3492 * Wake robust non-PI futexes here. The wakeup of
3493 * PI futexes happens in exit_pi_state():
3494 */
3495 if (!pi && (uval & FUTEX_WAITERS))
3496 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3497
3498 return 0;
3499}
3500
3501/*
3502 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3503 */
3504static inline int fetch_robust_entry(struct robust_list __user **entry,
3505 struct robust_list __user * __user *head,
3506 unsigned int *pi)
3507{
3508 unsigned long uentry;
3509
3510 if (get_user(uentry, (unsigned long __user *)head))
3511 return -EFAULT;
3512
3513 *entry = (void __user *)(uentry & ~1UL);
3514 *pi = uentry & 1;
3515
3516 return 0;
3517}
3518
3519/*
3520 * Walk curr->robust_list (very carefully, it's a userspace list!)
3521 * and mark any locks found there dead, and notify any waiters.
3522 *
3523 * We silently return on any sign of list-walking problem.
3524 */
3525static void exit_robust_list(struct task_struct *curr)
3526{
3527 struct robust_list_head __user *head = curr->robust_list;
3528 struct robust_list __user *entry, *next_entry, *pending;
3529 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3530 unsigned int next_pi;
3531 unsigned long futex_offset;
3532 int rc;
3533
3534 if (!futex_cmpxchg_enabled)
3535 return;
3536
3537 /*
3538 * Fetch the list head (which was registered earlier, via
3539 * sys_set_robust_list()):
3540 */
3541 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3542 return;
3543 /*
3544 * Fetch the relative futex offset:
3545 */
3546 if (get_user(futex_offset, &head->futex_offset))
3547 return;
3548 /*
3549 * Fetch any possibly pending lock-add first, and handle it
3550 * if it exists:
3551 */
3552 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3553 return;
3554
3555 next_entry = NULL; /* avoid warning with gcc */
3556 while (entry != &head->list) {
3557 /*
3558 * Fetch the next entry in the list before calling
3559 * handle_futex_death:
3560 */
3561 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3562 /*
3563 * A pending lock might already be on the list, so
3564 * don't process it twice:
3565 */
3566 if (entry != pending) {
3567 if (handle_futex_death((void __user *)entry + futex_offset,
3568 curr, pi, HANDLE_DEATH_LIST))
3569 return;
3570 }
3571 if (rc)
3572 return;
3573 entry = next_entry;
3574 pi = next_pi;
3575 /*
3576 * Avoid excessively long or circular lists:
3577 */
3578 if (!--limit)
3579 break;
3580
3581 cond_resched();
3582 }
3583
3584 if (pending) {
3585 handle_futex_death((void __user *)pending + futex_offset,
3586 curr, pip, HANDLE_DEATH_PENDING);
3587 }
3588}
3589
3590static void futex_cleanup(struct task_struct *tsk)
3591{
3592 if (unlikely(tsk->robust_list)) {
3593 exit_robust_list(tsk);
3594 tsk->robust_list = NULL;
3595 }
3596
3597#ifdef CONFIG_COMPAT
3598 if (unlikely(tsk->compat_robust_list)) {
3599 compat_exit_robust_list(tsk);
3600 tsk->compat_robust_list = NULL;
3601 }
3602#endif
3603
3604 if (unlikely(!list_empty(&tsk->pi_state_list)))
3605 exit_pi_state_list(tsk);
3606}
3607
3608/**
3609 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
3610 * @tsk: task to set the state on
3611 *
3612 * Set the futex exit state of the task lockless. The futex waiter code
3613 * observes that state when a task is exiting and loops until the task has
3614 * actually finished the futex cleanup. The worst case for this is that the
3615 * waiter runs through the wait loop until the state becomes visible.
3616 *
3617 * This is called from the recursive fault handling path in do_exit().
3618 *
3619 * This is best effort. Either the futex exit code has run already or
3620 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
3621 * take it over. If not, the problem is pushed back to user space. If the
3622 * futex exit code did not run yet, then an already queued waiter might
3623 * block forever, but there is nothing which can be done about that.
3624 */
3625void futex_exit_recursive(struct task_struct *tsk)
3626{
3627 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
3628 if (tsk->futex_state == FUTEX_STATE_EXITING)
3629 mutex_unlock(&tsk->futex_exit_mutex);
3630 tsk->futex_state = FUTEX_STATE_DEAD;
3631}
3632
3633static void futex_cleanup_begin(struct task_struct *tsk)
3634{
3635 /*
3636 * Prevent various race issues against a concurrent incoming waiter
3637 * including live locks by forcing the waiter to block on
3638 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
3639 * attach_to_pi_owner().
3640 */
3641 mutex_lock(&tsk->futex_exit_mutex);
3642
3643 /*
3644 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
3645 *
3646 * This ensures that all subsequent checks of tsk->futex_state in
3647 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
3648 * tsk->pi_lock held.
3649 *
3650 * It guarantees also that a pi_state which was queued right before
3651 * the state change under tsk->pi_lock by a concurrent waiter must
3652 * be observed in exit_pi_state_list().
3653 */
3654 raw_spin_lock_irq(&tsk->pi_lock);
3655 tsk->futex_state = FUTEX_STATE_EXITING;
3656 raw_spin_unlock_irq(&tsk->pi_lock);
3657}
3658
3659static void futex_cleanup_end(struct task_struct *tsk, int state)
3660{
3661 /*
3662 * Lockless store. The only side effect is that an observer might
3663 * take another loop until it becomes visible.
3664 */
3665 tsk->futex_state = state;
3666 /*
3667 * Drop the exit protection. This unblocks waiters which observed
3668 * FUTEX_STATE_EXITING to reevaluate the state.
3669 */
3670 mutex_unlock(&tsk->futex_exit_mutex);
3671}
3672
3673void futex_exec_release(struct task_struct *tsk)
3674{
3675 /*
3676 * The state handling is done for consistency, but in the case of
3677 * exec() there is no way to prevent further damage as the PID stays
3678 * the same. But for the unlikely and arguably buggy case that a
3679 * futex is held on exec(), this provides at least as much state
3680 * consistency protection which is possible.
3681 */
3682 futex_cleanup_begin(tsk);
3683 futex_cleanup(tsk);
3684 /*
3685 * Reset the state to FUTEX_STATE_OK. The task is alive and about
3686 * exec a new binary.
3687 */
3688 futex_cleanup_end(tsk, FUTEX_STATE_OK);
3689}
3690
3691void futex_exit_release(struct task_struct *tsk)
3692{
3693 futex_cleanup_begin(tsk);
3694 futex_cleanup(tsk);
3695 futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
3696}
3697
3698long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3699 u32 __user *uaddr2, u32 val2, u32 val3)
3700{
3701 int cmd = op & FUTEX_CMD_MASK;
3702 unsigned int flags = 0;
3703
3704 if (!(op & FUTEX_PRIVATE_FLAG))
3705 flags |= FLAGS_SHARED;
3706
3707 if (op & FUTEX_CLOCK_REALTIME) {
3708 flags |= FLAGS_CLOCKRT;
3709 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI &&
3710 cmd != FUTEX_LOCK_PI2)
3711 return -ENOSYS;
3712 }
3713
3714 switch (cmd) {
3715 case FUTEX_LOCK_PI:
3716 case FUTEX_LOCK_PI2:
3717 case FUTEX_UNLOCK_PI:
3718 case FUTEX_TRYLOCK_PI:
3719 case FUTEX_WAIT_REQUEUE_PI:
3720 case FUTEX_CMP_REQUEUE_PI:
3721 if (!futex_cmpxchg_enabled)
3722 return -ENOSYS;
3723 }
3724
3725 switch (cmd) {
3726 case FUTEX_WAIT:
3727 val3 = FUTEX_BITSET_MATCH_ANY;
3728 fallthrough;
3729 case FUTEX_WAIT_BITSET:
3730 return futex_wait(uaddr, flags, val, timeout, val3);
3731 case FUTEX_WAKE:
3732 val3 = FUTEX_BITSET_MATCH_ANY;
3733 fallthrough;
3734 case FUTEX_WAKE_BITSET:
3735 return futex_wake(uaddr, flags, val, val3);
3736 case FUTEX_REQUEUE:
3737 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3738 case FUTEX_CMP_REQUEUE:
3739 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3740 case FUTEX_WAKE_OP:
3741 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3742 case FUTEX_LOCK_PI:
3743 flags |= FLAGS_CLOCKRT;
3744 fallthrough;
3745 case FUTEX_LOCK_PI2:
3746 return futex_lock_pi(uaddr, flags, timeout, 0);
3747 case FUTEX_UNLOCK_PI:
3748 return futex_unlock_pi(uaddr, flags);
3749 case FUTEX_TRYLOCK_PI:
3750 return futex_lock_pi(uaddr, flags, NULL, 1);
3751 case FUTEX_WAIT_REQUEUE_PI:
3752 val3 = FUTEX_BITSET_MATCH_ANY;
3753 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3754 uaddr2);
3755 case FUTEX_CMP_REQUEUE_PI:
3756 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3757 }
3758 return -ENOSYS;
3759}
3760
3761static __always_inline bool futex_cmd_has_timeout(u32 cmd)
3762{
3763 switch (cmd) {
3764 case FUTEX_WAIT:
3765 case FUTEX_LOCK_PI:
3766 case FUTEX_LOCK_PI2:
3767 case FUTEX_WAIT_BITSET:
3768 case FUTEX_WAIT_REQUEUE_PI:
3769 return true;
3770 }
3771 return false;
3772}
3773
3774static __always_inline int
3775futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t)
3776{
3777 if (!timespec64_valid(ts))
3778 return -EINVAL;
3779
3780 *t = timespec64_to_ktime(*ts);
3781 if (cmd == FUTEX_WAIT)
3782 *t = ktime_add_safe(ktime_get(), *t);
3783 else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
3784 *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t);
3785 return 0;
3786}
3787
3788SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3789 const struct __kernel_timespec __user *, utime,
3790 u32 __user *, uaddr2, u32, val3)
3791{
3792 int ret, cmd = op & FUTEX_CMD_MASK;
3793 ktime_t t, *tp = NULL;
3794 struct timespec64 ts;
3795
3796 if (utime && futex_cmd_has_timeout(cmd)) {
3797 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3798 return -EFAULT;
3799 if (get_timespec64(&ts, utime))
3800 return -EFAULT;
3801 ret = futex_init_timeout(cmd, op, &ts, &t);
3802 if (ret)
3803 return ret;
3804 tp = &t;
3805 }
3806
3807 return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
3808}
3809
3810#ifdef CONFIG_COMPAT
3811/*
3812 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3813 */
3814static inline int
3815compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
3816 compat_uptr_t __user *head, unsigned int *pi)
3817{
3818 if (get_user(*uentry, head))
3819 return -EFAULT;
3820
3821 *entry = compat_ptr((*uentry) & ~1);
3822 *pi = (unsigned int)(*uentry) & 1;
3823
3824 return 0;
3825}
3826
3827static void __user *futex_uaddr(struct robust_list __user *entry,
3828 compat_long_t futex_offset)
3829{
3830 compat_uptr_t base = ptr_to_compat(entry);
3831 void __user *uaddr = compat_ptr(base + futex_offset);
3832
3833 return uaddr;
3834}
3835
3836/*
3837 * Walk curr->robust_list (very carefully, it's a userspace list!)
3838 * and mark any locks found there dead, and notify any waiters.
3839 *
3840 * We silently return on any sign of list-walking problem.
3841 */
3842static void compat_exit_robust_list(struct task_struct *curr)
3843{
3844 struct compat_robust_list_head __user *head = curr->compat_robust_list;
3845 struct robust_list __user *entry, *next_entry, *pending;
3846 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3847 unsigned int next_pi;
3848 compat_uptr_t uentry, next_uentry, upending;
3849 compat_long_t futex_offset;
3850 int rc;
3851
3852 if (!futex_cmpxchg_enabled)
3853 return;
3854
3855 /*
3856 * Fetch the list head (which was registered earlier, via
3857 * sys_set_robust_list()):
3858 */
3859 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
3860 return;
3861 /*
3862 * Fetch the relative futex offset:
3863 */
3864 if (get_user(futex_offset, &head->futex_offset))
3865 return;
3866 /*
3867 * Fetch any possibly pending lock-add first, and handle it
3868 * if it exists:
3869 */
3870 if (compat_fetch_robust_entry(&upending, &pending,
3871 &head->list_op_pending, &pip))
3872 return;
3873
3874 next_entry = NULL; /* avoid warning with gcc */
3875 while (entry != (struct robust_list __user *) &head->list) {
3876 /*
3877 * Fetch the next entry in the list before calling
3878 * handle_futex_death:
3879 */
3880 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
3881 (compat_uptr_t __user *)&entry->next, &next_pi);
3882 /*
3883 * A pending lock might already be on the list, so
3884 * dont process it twice:
3885 */
3886 if (entry != pending) {
3887 void __user *uaddr = futex_uaddr(entry, futex_offset);
3888
3889 if (handle_futex_death(uaddr, curr, pi,
3890 HANDLE_DEATH_LIST))
3891 return;
3892 }
3893 if (rc)
3894 return;
3895 uentry = next_uentry;
3896 entry = next_entry;
3897 pi = next_pi;
3898 /*
3899 * Avoid excessively long or circular lists:
3900 */
3901 if (!--limit)
3902 break;
3903
3904 cond_resched();
3905 }
3906 if (pending) {
3907 void __user *uaddr = futex_uaddr(pending, futex_offset);
3908
3909 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
3910 }
3911}
3912
3913COMPAT_SYSCALL_DEFINE2(set_robust_list,
3914 struct compat_robust_list_head __user *, head,
3915 compat_size_t, len)
3916{
3917 if (!futex_cmpxchg_enabled)
3918 return -ENOSYS;
3919
3920 if (unlikely(len != sizeof(*head)))
3921 return -EINVAL;
3922
3923 current->compat_robust_list = head;
3924
3925 return 0;
3926}
3927
3928COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
3929 compat_uptr_t __user *, head_ptr,
3930 compat_size_t __user *, len_ptr)
3931{
3932 struct compat_robust_list_head __user *head;
3933 unsigned long ret;
3934 struct task_struct *p;
3935
3936 if (!futex_cmpxchg_enabled)
3937 return -ENOSYS;
3938
3939 rcu_read_lock();
3940
3941 ret = -ESRCH;
3942 if (!pid)
3943 p = current;
3944 else {
3945 p = find_task_by_vpid(pid);
3946 if (!p)
3947 goto err_unlock;
3948 }
3949
3950 ret = -EPERM;
3951 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3952 goto err_unlock;
3953
3954 head = p->compat_robust_list;
3955 rcu_read_unlock();
3956
3957 if (put_user(sizeof(*head), len_ptr))
3958 return -EFAULT;
3959 return put_user(ptr_to_compat(head), head_ptr);
3960
3961err_unlock:
3962 rcu_read_unlock();
3963
3964 return ret;
3965}
3966#endif /* CONFIG_COMPAT */
3967
3968#ifdef CONFIG_COMPAT_32BIT_TIME
3969SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
3970 const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
3971 u32, val3)
3972{
3973 int ret, cmd = op & FUTEX_CMD_MASK;
3974 ktime_t t, *tp = NULL;
3975 struct timespec64 ts;
3976
3977 if (utime && futex_cmd_has_timeout(cmd)) {
3978 if (get_old_timespec32(&ts, utime))
3979 return -EFAULT;
3980 ret = futex_init_timeout(cmd, op, &ts, &t);
3981 if (ret)
3982 return ret;
3983 tp = &t;
3984 }
3985
3986 return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
3987}
3988#endif /* CONFIG_COMPAT_32BIT_TIME */
3989
3990static void __init futex_detect_cmpxchg(void)
3991{
3992#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3993 u32 curval;
3994
3995 /*
3996 * This will fail and we want it. Some arch implementations do
3997 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3998 * functionality. We want to know that before we call in any
3999 * of the complex code paths. Also we want to prevent
4000 * registration of robust lists in that case. NULL is
4001 * guaranteed to fault and we get -EFAULT on functional
4002 * implementation, the non-functional ones will return
4003 * -ENOSYS.
4004 */
4005 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
4006 futex_cmpxchg_enabled = 1;
4007#endif
4008}
4009
4010static int __init futex_init(void)
4011{
4012 unsigned int futex_shift;
4013 unsigned long i;
4014
4015#if CONFIG_BASE_SMALL
4016 futex_hashsize = 16;
4017#else
4018 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
4019#endif
4020
4021 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
4022 futex_hashsize, 0,
4023 futex_hashsize < 256 ? HASH_SMALL : 0,
4024 &futex_shift, NULL,
4025 futex_hashsize, futex_hashsize);
4026 futex_hashsize = 1UL << futex_shift;
4027
4028 futex_detect_cmpxchg();
4029
4030 for (i = 0; i < futex_hashsize; i++) {
4031 atomic_set(&futex_queues[i].waiters, 0);
4032 plist_head_init(&futex_queues[i].chain);
4033 spin_lock_init(&futex_queues[i].lock);
4034 }
4035
4036 return 0;
4037}
4038core_initcall(futex_init);