Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* rwsem.c: R/W semaphores: contention handling functions
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from arch/i386/kernel/semaphore.c
6 *
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
9 *
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12 */
13#include <linux/rwsem.h>
14#include <linux/init.h>
15#include <linux/export.h>
16#include <linux/sched/signal.h>
17#include <linux/sched/rt.h>
18#include <linux/sched/wake_q.h>
19#include <linux/sched/debug.h>
20#include <linux/osq_lock.h>
21
22#include "rwsem.h"
23
24/*
25 * Guide to the rw_semaphore's count field for common values.
26 * (32-bit case illustrated, similar for 64-bit)
27 *
28 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
29 * X = #active_readers + #readers attempting to lock
30 * (X*ACTIVE_BIAS)
31 *
32 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
33 * attempting to read lock or write lock.
34 *
35 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
36 * X = #active readers + # readers attempting lock
37 * (X*ACTIVE_BIAS + WAITING_BIAS)
38 * (2) 1 writer attempting lock, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 * (3) 1 writer active, no waiters for lock
42 * X-1 = #active readers + #readers attempting lock
43 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
44 *
45 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
46 * (WAITING_BIAS + ACTIVE_BIAS)
47 * (2) 1 writer active or attempting lock, no waiters for lock
48 * (ACTIVE_WRITE_BIAS)
49 *
50 * 0xffff0000 (1) There are writers or readers queued but none active
51 * or in the process of attempting lock.
52 * (WAITING_BIAS)
53 * Note: writer can attempt to steal lock for this count by adding
54 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
55 *
56 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
57 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
58 *
59 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
60 * the count becomes more than 0 for successful lock acquisition,
61 * i.e. the case where there are only readers or nobody has lock.
62 * (1st and 2nd case above).
63 *
64 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
65 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
66 * acquisition (i.e. nobody else has lock or attempts lock). If
67 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
68 * are only waiters but none active (5th case above), and attempt to
69 * steal the lock.
70 *
71 */
72
73/*
74 * Initialize an rwsem:
75 */
76void __init_rwsem(struct rw_semaphore *sem, const char *name,
77 struct lock_class_key *key)
78{
79#ifdef CONFIG_DEBUG_LOCK_ALLOC
80 /*
81 * Make sure we are not reinitializing a held semaphore:
82 */
83 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
84 lockdep_init_map(&sem->dep_map, name, key, 0);
85#endif
86 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
87 raw_spin_lock_init(&sem->wait_lock);
88 INIT_LIST_HEAD(&sem->wait_list);
89#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
90 sem->owner = NULL;
91 osq_lock_init(&sem->osq);
92#endif
93}
94
95EXPORT_SYMBOL(__init_rwsem);
96
97enum rwsem_waiter_type {
98 RWSEM_WAITING_FOR_WRITE,
99 RWSEM_WAITING_FOR_READ
100};
101
102struct rwsem_waiter {
103 struct list_head list;
104 struct task_struct *task;
105 enum rwsem_waiter_type type;
106};
107
108enum rwsem_wake_type {
109 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
110 RWSEM_WAKE_READERS, /* Wake readers only */
111 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
112};
113
114/*
115 * handle the lock release when processes blocked on it that can now run
116 * - if we come here from up_xxxx(), then:
117 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
118 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
119 * - there must be someone on the queue
120 * - the wait_lock must be held by the caller
121 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
122 * to actually wakeup the blocked task(s) and drop the reference count,
123 * preferably when the wait_lock is released
124 * - woken process blocks are discarded from the list after having task zeroed
125 * - writers are only marked woken if downgrading is false
126 */
127static void __rwsem_mark_wake(struct rw_semaphore *sem,
128 enum rwsem_wake_type wake_type,
129 struct wake_q_head *wake_q)
130{
131 struct rwsem_waiter *waiter, *tmp;
132 long oldcount, woken = 0, adjustment = 0;
133
134 /*
135 * Take a peek at the queue head waiter such that we can determine
136 * the wakeup(s) to perform.
137 */
138 waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
139
140 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
141 if (wake_type == RWSEM_WAKE_ANY) {
142 /*
143 * Mark writer at the front of the queue for wakeup.
144 * Until the task is actually later awoken later by
145 * the caller, other writers are able to steal it.
146 * Readers, on the other hand, will block as they
147 * will notice the queued writer.
148 */
149 wake_q_add(wake_q, waiter->task);
150 }
151
152 return;
153 }
154
155 /*
156 * Writers might steal the lock before we grant it to the next reader.
157 * We prefer to do the first reader grant before counting readers
158 * so we can bail out early if a writer stole the lock.
159 */
160 if (wake_type != RWSEM_WAKE_READ_OWNED) {
161 adjustment = RWSEM_ACTIVE_READ_BIAS;
162 try_reader_grant:
163 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
164 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
165 /*
166 * If the count is still less than RWSEM_WAITING_BIAS
167 * after removing the adjustment, it is assumed that
168 * a writer has stolen the lock. We have to undo our
169 * reader grant.
170 */
171 if (atomic_long_add_return(-adjustment, &sem->count) <
172 RWSEM_WAITING_BIAS)
173 return;
174
175 /* Last active locker left. Retry waking readers. */
176 goto try_reader_grant;
177 }
178 /*
179 * It is not really necessary to set it to reader-owned here,
180 * but it gives the spinners an early indication that the
181 * readers now have the lock.
182 */
183 rwsem_set_reader_owned(sem);
184 }
185
186 /*
187 * Grant an infinite number of read locks to the readers at the front
188 * of the queue. We know that woken will be at least 1 as we accounted
189 * for above. Note we increment the 'active part' of the count by the
190 * number of readers before waking any processes up.
191 */
192 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
193 struct task_struct *tsk;
194
195 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
196 break;
197
198 woken++;
199 tsk = waiter->task;
200
201 wake_q_add(wake_q, tsk);
202 list_del(&waiter->list);
203 /*
204 * Ensure that the last operation is setting the reader
205 * waiter to nil such that rwsem_down_read_failed() cannot
206 * race with do_exit() by always holding a reference count
207 * to the task to wakeup.
208 */
209 smp_store_release(&waiter->task, NULL);
210 }
211
212 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
213 if (list_empty(&sem->wait_list)) {
214 /* hit end of list above */
215 adjustment -= RWSEM_WAITING_BIAS;
216 }
217
218 if (adjustment)
219 atomic_long_add(adjustment, &sem->count);
220}
221
222/*
223 * Wait for the read lock to be granted
224 */
225static inline struct rw_semaphore __sched *
226__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
227{
228 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
229 struct rwsem_waiter waiter;
230 DEFINE_WAKE_Q(wake_q);
231
232 waiter.task = current;
233 waiter.type = RWSEM_WAITING_FOR_READ;
234
235 raw_spin_lock_irq(&sem->wait_lock);
236 if (list_empty(&sem->wait_list))
237 adjustment += RWSEM_WAITING_BIAS;
238 list_add_tail(&waiter.list, &sem->wait_list);
239
240 /* we're now waiting on the lock, but no longer actively locking */
241 count = atomic_long_add_return(adjustment, &sem->count);
242
243 /*
244 * If there are no active locks, wake the front queued process(es).
245 *
246 * If there are no writers and we are first in the queue,
247 * wake our own waiter to join the existing active readers !
248 */
249 if (count == RWSEM_WAITING_BIAS ||
250 (count > RWSEM_WAITING_BIAS &&
251 adjustment != -RWSEM_ACTIVE_READ_BIAS))
252 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
253
254 raw_spin_unlock_irq(&sem->wait_lock);
255 wake_up_q(&wake_q);
256
257 /* wait to be given the lock */
258 while (true) {
259 set_current_state(state);
260 if (!waiter.task)
261 break;
262 if (signal_pending_state(state, current)) {
263 raw_spin_lock_irq(&sem->wait_lock);
264 if (waiter.task)
265 goto out_nolock;
266 raw_spin_unlock_irq(&sem->wait_lock);
267 break;
268 }
269 schedule();
270 }
271
272 __set_current_state(TASK_RUNNING);
273 return sem;
274out_nolock:
275 list_del(&waiter.list);
276 if (list_empty(&sem->wait_list))
277 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
278 raw_spin_unlock_irq(&sem->wait_lock);
279 __set_current_state(TASK_RUNNING);
280 return ERR_PTR(-EINTR);
281}
282
283__visible struct rw_semaphore * __sched
284rwsem_down_read_failed(struct rw_semaphore *sem)
285{
286 return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
287}
288EXPORT_SYMBOL(rwsem_down_read_failed);
289
290__visible struct rw_semaphore * __sched
291rwsem_down_read_failed_killable(struct rw_semaphore *sem)
292{
293 return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
294}
295EXPORT_SYMBOL(rwsem_down_read_failed_killable);
296
297/*
298 * This function must be called with the sem->wait_lock held to prevent
299 * race conditions between checking the rwsem wait list and setting the
300 * sem->count accordingly.
301 */
302static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
303{
304 /*
305 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
306 */
307 if (count != RWSEM_WAITING_BIAS)
308 return false;
309
310 /*
311 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
312 * are other tasks on the wait list, we need to add on WAITING_BIAS.
313 */
314 count = list_is_singular(&sem->wait_list) ?
315 RWSEM_ACTIVE_WRITE_BIAS :
316 RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
317
318 if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
319 == RWSEM_WAITING_BIAS) {
320 rwsem_set_owner(sem);
321 return true;
322 }
323
324 return false;
325}
326
327#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
328/*
329 * Try to acquire write lock before the writer has been put on wait queue.
330 */
331static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
332{
333 long old, count = atomic_long_read(&sem->count);
334
335 while (true) {
336 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
337 return false;
338
339 old = atomic_long_cmpxchg_acquire(&sem->count, count,
340 count + RWSEM_ACTIVE_WRITE_BIAS);
341 if (old == count) {
342 rwsem_set_owner(sem);
343 return true;
344 }
345
346 count = old;
347 }
348}
349
350static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
351{
352 struct task_struct *owner;
353 bool ret = true;
354
355 BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
356
357 if (need_resched())
358 return false;
359
360 rcu_read_lock();
361 owner = READ_ONCE(sem->owner);
362 if (!owner || !is_rwsem_owner_spinnable(owner)) {
363 ret = !owner; /* !owner is spinnable */
364 goto done;
365 }
366
367 /*
368 * As lock holder preemption issue, we both skip spinning if task is not
369 * on cpu or its cpu is preempted
370 */
371 ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
372done:
373 rcu_read_unlock();
374 return ret;
375}
376
377/*
378 * Return true only if we can still spin on the owner field of the rwsem.
379 */
380static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
381{
382 struct task_struct *owner = READ_ONCE(sem->owner);
383
384 if (!is_rwsem_owner_spinnable(owner))
385 return false;
386
387 rcu_read_lock();
388 while (owner && (READ_ONCE(sem->owner) == owner)) {
389 /*
390 * Ensure we emit the owner->on_cpu, dereference _after_
391 * checking sem->owner still matches owner, if that fails,
392 * owner might point to free()d memory, if it still matches,
393 * the rcu_read_lock() ensures the memory stays valid.
394 */
395 barrier();
396
397 /*
398 * abort spinning when need_resched or owner is not running or
399 * owner's cpu is preempted.
400 */
401 if (!owner->on_cpu || need_resched() ||
402 vcpu_is_preempted(task_cpu(owner))) {
403 rcu_read_unlock();
404 return false;
405 }
406
407 cpu_relax();
408 }
409 rcu_read_unlock();
410
411 /*
412 * If there is a new owner or the owner is not set, we continue
413 * spinning.
414 */
415 return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
416}
417
418static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
419{
420 bool taken = false;
421
422 preempt_disable();
423
424 /* sem->wait_lock should not be held when doing optimistic spinning */
425 if (!rwsem_can_spin_on_owner(sem))
426 goto done;
427
428 if (!osq_lock(&sem->osq))
429 goto done;
430
431 /*
432 * Optimistically spin on the owner field and attempt to acquire the
433 * lock whenever the owner changes. Spinning will be stopped when:
434 * 1) the owning writer isn't running; or
435 * 2) readers own the lock as we can't determine if they are
436 * actively running or not.
437 */
438 while (rwsem_spin_on_owner(sem)) {
439 /*
440 * Try to acquire the lock
441 */
442 if (rwsem_try_write_lock_unqueued(sem)) {
443 taken = true;
444 break;
445 }
446
447 /*
448 * When there's no owner, we might have preempted between the
449 * owner acquiring the lock and setting the owner field. If
450 * we're an RT task that will live-lock because we won't let
451 * the owner complete.
452 */
453 if (!sem->owner && (need_resched() || rt_task(current)))
454 break;
455
456 /*
457 * The cpu_relax() call is a compiler barrier which forces
458 * everything in this loop to be re-loaded. We don't need
459 * memory barriers as we'll eventually observe the right
460 * values at the cost of a few extra spins.
461 */
462 cpu_relax();
463 }
464 osq_unlock(&sem->osq);
465done:
466 preempt_enable();
467 return taken;
468}
469
470/*
471 * Return true if the rwsem has active spinner
472 */
473static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
474{
475 return osq_is_locked(&sem->osq);
476}
477
478#else
479static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
480{
481 return false;
482}
483
484static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
485{
486 return false;
487}
488#endif
489
490/*
491 * Wait until we successfully acquire the write lock
492 */
493static inline struct rw_semaphore *
494__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
495{
496 long count;
497 bool waiting = true; /* any queued threads before us */
498 struct rwsem_waiter waiter;
499 struct rw_semaphore *ret = sem;
500 DEFINE_WAKE_Q(wake_q);
501
502 /* undo write bias from down_write operation, stop active locking */
503 count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
504
505 /* do optimistic spinning and steal lock if possible */
506 if (rwsem_optimistic_spin(sem))
507 return sem;
508
509 /*
510 * Optimistic spinning failed, proceed to the slowpath
511 * and block until we can acquire the sem.
512 */
513 waiter.task = current;
514 waiter.type = RWSEM_WAITING_FOR_WRITE;
515
516 raw_spin_lock_irq(&sem->wait_lock);
517
518 /* account for this before adding a new element to the list */
519 if (list_empty(&sem->wait_list))
520 waiting = false;
521
522 list_add_tail(&waiter.list, &sem->wait_list);
523
524 /* we're now waiting on the lock, but no longer actively locking */
525 if (waiting) {
526 count = atomic_long_read(&sem->count);
527
528 /*
529 * If there were already threads queued before us and there are
530 * no active writers, the lock must be read owned; so we try to
531 * wake any read locks that were queued ahead of us.
532 */
533 if (count > RWSEM_WAITING_BIAS) {
534 __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
535 /*
536 * The wakeup is normally called _after_ the wait_lock
537 * is released, but given that we are proactively waking
538 * readers we can deal with the wake_q overhead as it is
539 * similar to releasing and taking the wait_lock again
540 * for attempting rwsem_try_write_lock().
541 */
542 wake_up_q(&wake_q);
543
544 /*
545 * Reinitialize wake_q after use.
546 */
547 wake_q_init(&wake_q);
548 }
549
550 } else
551 count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
552
553 /* wait until we successfully acquire the lock */
554 set_current_state(state);
555 while (true) {
556 if (rwsem_try_write_lock(count, sem))
557 break;
558 raw_spin_unlock_irq(&sem->wait_lock);
559
560 /* Block until there are no active lockers. */
561 do {
562 if (signal_pending_state(state, current))
563 goto out_nolock;
564
565 schedule();
566 set_current_state(state);
567 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
568
569 raw_spin_lock_irq(&sem->wait_lock);
570 }
571 __set_current_state(TASK_RUNNING);
572 list_del(&waiter.list);
573 raw_spin_unlock_irq(&sem->wait_lock);
574
575 return ret;
576
577out_nolock:
578 __set_current_state(TASK_RUNNING);
579 raw_spin_lock_irq(&sem->wait_lock);
580 list_del(&waiter.list);
581 if (list_empty(&sem->wait_list))
582 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
583 else
584 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
585 raw_spin_unlock_irq(&sem->wait_lock);
586 wake_up_q(&wake_q);
587
588 return ERR_PTR(-EINTR);
589}
590
591__visible struct rw_semaphore * __sched
592rwsem_down_write_failed(struct rw_semaphore *sem)
593{
594 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
595}
596EXPORT_SYMBOL(rwsem_down_write_failed);
597
598__visible struct rw_semaphore * __sched
599rwsem_down_write_failed_killable(struct rw_semaphore *sem)
600{
601 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
602}
603EXPORT_SYMBOL(rwsem_down_write_failed_killable);
604
605/*
606 * handle waking up a waiter on the semaphore
607 * - up_read/up_write has decremented the active part of count if we come here
608 */
609__visible
610struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
611{
612 unsigned long flags;
613 DEFINE_WAKE_Q(wake_q);
614
615 /*
616 * __rwsem_down_write_failed_common(sem)
617 * rwsem_optimistic_spin(sem)
618 * osq_unlock(sem->osq)
619 * ...
620 * atomic_long_add_return(&sem->count)
621 *
622 * - VS -
623 *
624 * __up_write()
625 * if (atomic_long_sub_return_release(&sem->count) < 0)
626 * rwsem_wake(sem)
627 * osq_is_locked(&sem->osq)
628 *
629 * And __up_write() must observe !osq_is_locked() when it observes the
630 * atomic_long_add_return() in order to not miss a wakeup.
631 *
632 * This boils down to:
633 *
634 * [S.rel] X = 1 [RmW] r0 = (Y += 0)
635 * MB RMB
636 * [RmW] Y += 1 [L] r1 = X
637 *
638 * exists (r0=1 /\ r1=0)
639 */
640 smp_rmb();
641
642 /*
643 * If a spinner is present, it is not necessary to do the wakeup.
644 * Try to do wakeup only if the trylock succeeds to minimize
645 * spinlock contention which may introduce too much delay in the
646 * unlock operation.
647 *
648 * spinning writer up_write/up_read caller
649 * --------------- -----------------------
650 * [S] osq_unlock() [L] osq
651 * MB RMB
652 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
653 *
654 * Here, it is important to make sure that there won't be a missed
655 * wakeup while the rwsem is free and the only spinning writer goes
656 * to sleep without taking the rwsem. Even when the spinning writer
657 * is just going to break out of the waiting loop, it will still do
658 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
659 * rwsem_has_spinner() is true, it will guarantee at least one
660 * trylock attempt on the rwsem later on.
661 */
662 if (rwsem_has_spinner(sem)) {
663 /*
664 * The smp_rmb() here is to make sure that the spinner
665 * state is consulted before reading the wait_lock.
666 */
667 smp_rmb();
668 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
669 return sem;
670 goto locked;
671 }
672 raw_spin_lock_irqsave(&sem->wait_lock, flags);
673locked:
674
675 if (!list_empty(&sem->wait_list))
676 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
677
678 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
679 wake_up_q(&wake_q);
680
681 return sem;
682}
683EXPORT_SYMBOL(rwsem_wake);
684
685/*
686 * downgrade a write lock into a read lock
687 * - caller incremented waiting part of count and discovered it still negative
688 * - just wake up any readers at the front of the queue
689 */
690__visible
691struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
692{
693 unsigned long flags;
694 DEFINE_WAKE_Q(wake_q);
695
696 raw_spin_lock_irqsave(&sem->wait_lock, flags);
697
698 if (!list_empty(&sem->wait_list))
699 __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
700
701 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
702 wake_up_q(&wake_q);
703
704 return sem;
705}
706EXPORT_SYMBOL(rwsem_downgrade_wake);
1/* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
8 *
9 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
10 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
11 */
12#include <linux/rwsem.h>
13#include <linux/sched.h>
14#include <linux/init.h>
15#include <linux/export.h>
16#include <linux/sched/rt.h>
17#include <linux/osq_lock.h>
18
19#include "rwsem.h"
20
21/*
22 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit)
24 *
25 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
26 * X = #active_readers + #readers attempting to lock
27 * (X*ACTIVE_BIAS)
28 *
29 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
30 * attempting to read lock or write lock.
31 *
32 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
33 * X = #active readers + # readers attempting lock
34 * (X*ACTIVE_BIAS + WAITING_BIAS)
35 * (2) 1 writer attempting lock, no waiters for lock
36 * X-1 = #active readers + #readers attempting lock
37 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
38 * (3) 1 writer active, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 *
42 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
43 * (WAITING_BIAS + ACTIVE_BIAS)
44 * (2) 1 writer active or attempting lock, no waiters for lock
45 * (ACTIVE_WRITE_BIAS)
46 *
47 * 0xffff0000 (1) There are writers or readers queued but none active
48 * or in the process of attempting lock.
49 * (WAITING_BIAS)
50 * Note: writer can attempt to steal lock for this count by adding
51 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
52 *
53 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
54 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
55 *
56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
57 * the count becomes more than 0 for successful lock acquisition,
58 * i.e. the case where there are only readers or nobody has lock.
59 * (1st and 2nd case above).
60 *
61 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
62 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
63 * acquisition (i.e. nobody else has lock or attempts lock). If
64 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
65 * are only waiters but none active (5th case above), and attempt to
66 * steal the lock.
67 *
68 */
69
70/*
71 * Initialize an rwsem:
72 */
73void __init_rwsem(struct rw_semaphore *sem, const char *name,
74 struct lock_class_key *key)
75{
76#ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /*
78 * Make sure we are not reinitializing a held semaphore:
79 */
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
81 lockdep_init_map(&sem->dep_map, name, key, 0);
82#endif
83 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
84 raw_spin_lock_init(&sem->wait_lock);
85 INIT_LIST_HEAD(&sem->wait_list);
86#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
87 sem->owner = NULL;
88 osq_lock_init(&sem->osq);
89#endif
90}
91
92EXPORT_SYMBOL(__init_rwsem);
93
94enum rwsem_waiter_type {
95 RWSEM_WAITING_FOR_WRITE,
96 RWSEM_WAITING_FOR_READ
97};
98
99struct rwsem_waiter {
100 struct list_head list;
101 struct task_struct *task;
102 enum rwsem_waiter_type type;
103};
104
105enum rwsem_wake_type {
106 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
107 RWSEM_WAKE_READERS, /* Wake readers only */
108 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
109};
110
111/*
112 * handle the lock release when processes blocked on it that can now run
113 * - if we come here from up_xxxx(), then:
114 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
115 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
116 * - there must be someone on the queue
117 * - the wait_lock must be held by the caller
118 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
119 * to actually wakeup the blocked task(s) and drop the reference count,
120 * preferably when the wait_lock is released
121 * - woken process blocks are discarded from the list after having task zeroed
122 * - writers are only marked woken if downgrading is false
123 */
124static void __rwsem_mark_wake(struct rw_semaphore *sem,
125 enum rwsem_wake_type wake_type,
126 struct wake_q_head *wake_q)
127{
128 struct rwsem_waiter *waiter, *tmp;
129 long oldcount, woken = 0, adjustment = 0;
130
131 /*
132 * Take a peek at the queue head waiter such that we can determine
133 * the wakeup(s) to perform.
134 */
135 waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
136
137 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
138 if (wake_type == RWSEM_WAKE_ANY) {
139 /*
140 * Mark writer at the front of the queue for wakeup.
141 * Until the task is actually later awoken later by
142 * the caller, other writers are able to steal it.
143 * Readers, on the other hand, will block as they
144 * will notice the queued writer.
145 */
146 wake_q_add(wake_q, waiter->task);
147 }
148
149 return;
150 }
151
152 /*
153 * Writers might steal the lock before we grant it to the next reader.
154 * We prefer to do the first reader grant before counting readers
155 * so we can bail out early if a writer stole the lock.
156 */
157 if (wake_type != RWSEM_WAKE_READ_OWNED) {
158 adjustment = RWSEM_ACTIVE_READ_BIAS;
159 try_reader_grant:
160 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
161 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
162 /*
163 * If the count is still less than RWSEM_WAITING_BIAS
164 * after removing the adjustment, it is assumed that
165 * a writer has stolen the lock. We have to undo our
166 * reader grant.
167 */
168 if (atomic_long_add_return(-adjustment, &sem->count) <
169 RWSEM_WAITING_BIAS)
170 return;
171
172 /* Last active locker left. Retry waking readers. */
173 goto try_reader_grant;
174 }
175 /*
176 * It is not really necessary to set it to reader-owned here,
177 * but it gives the spinners an early indication that the
178 * readers now have the lock.
179 */
180 rwsem_set_reader_owned(sem);
181 }
182
183 /*
184 * Grant an infinite number of read locks to the readers at the front
185 * of the queue. We know that woken will be at least 1 as we accounted
186 * for above. Note we increment the 'active part' of the count by the
187 * number of readers before waking any processes up.
188 */
189 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
190 struct task_struct *tsk;
191
192 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
193 break;
194
195 woken++;
196 tsk = waiter->task;
197
198 wake_q_add(wake_q, tsk);
199 list_del(&waiter->list);
200 /*
201 * Ensure that the last operation is setting the reader
202 * waiter to nil such that rwsem_down_read_failed() cannot
203 * race with do_exit() by always holding a reference count
204 * to the task to wakeup.
205 */
206 smp_store_release(&waiter->task, NULL);
207 }
208
209 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
210 if (list_empty(&sem->wait_list)) {
211 /* hit end of list above */
212 adjustment -= RWSEM_WAITING_BIAS;
213 }
214
215 if (adjustment)
216 atomic_long_add(adjustment, &sem->count);
217}
218
219/*
220 * Wait for the read lock to be granted
221 */
222__visible
223struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
224{
225 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
226 struct rwsem_waiter waiter;
227 struct task_struct *tsk = current;
228 DEFINE_WAKE_Q(wake_q);
229
230 waiter.task = tsk;
231 waiter.type = RWSEM_WAITING_FOR_READ;
232
233 raw_spin_lock_irq(&sem->wait_lock);
234 if (list_empty(&sem->wait_list))
235 adjustment += RWSEM_WAITING_BIAS;
236 list_add_tail(&waiter.list, &sem->wait_list);
237
238 /* we're now waiting on the lock, but no longer actively locking */
239 count = atomic_long_add_return(adjustment, &sem->count);
240
241 /*
242 * If there are no active locks, wake the front queued process(es).
243 *
244 * If there are no writers and we are first in the queue,
245 * wake our own waiter to join the existing active readers !
246 */
247 if (count == RWSEM_WAITING_BIAS ||
248 (count > RWSEM_WAITING_BIAS &&
249 adjustment != -RWSEM_ACTIVE_READ_BIAS))
250 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
251
252 raw_spin_unlock_irq(&sem->wait_lock);
253 wake_up_q(&wake_q);
254
255 /* wait to be given the lock */
256 while (true) {
257 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
258 if (!waiter.task)
259 break;
260 schedule();
261 }
262
263 __set_task_state(tsk, TASK_RUNNING);
264 return sem;
265}
266EXPORT_SYMBOL(rwsem_down_read_failed);
267
268/*
269 * This function must be called with the sem->wait_lock held to prevent
270 * race conditions between checking the rwsem wait list and setting the
271 * sem->count accordingly.
272 */
273static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
274{
275 /*
276 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
277 */
278 if (count != RWSEM_WAITING_BIAS)
279 return false;
280
281 /*
282 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
283 * are other tasks on the wait list, we need to add on WAITING_BIAS.
284 */
285 count = list_is_singular(&sem->wait_list) ?
286 RWSEM_ACTIVE_WRITE_BIAS :
287 RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
288
289 if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
290 == RWSEM_WAITING_BIAS) {
291 rwsem_set_owner(sem);
292 return true;
293 }
294
295 return false;
296}
297
298#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
299/*
300 * Try to acquire write lock before the writer has been put on wait queue.
301 */
302static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
303{
304 long old, count = atomic_long_read(&sem->count);
305
306 while (true) {
307 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
308 return false;
309
310 old = atomic_long_cmpxchg_acquire(&sem->count, count,
311 count + RWSEM_ACTIVE_WRITE_BIAS);
312 if (old == count) {
313 rwsem_set_owner(sem);
314 return true;
315 }
316
317 count = old;
318 }
319}
320
321static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
322{
323 struct task_struct *owner;
324 bool ret = true;
325
326 if (need_resched())
327 return false;
328
329 rcu_read_lock();
330 owner = READ_ONCE(sem->owner);
331 if (!rwsem_owner_is_writer(owner)) {
332 /*
333 * Don't spin if the rwsem is readers owned.
334 */
335 ret = !rwsem_owner_is_reader(owner);
336 goto done;
337 }
338
339 /*
340 * As lock holder preemption issue, we both skip spinning if task is not
341 * on cpu or its cpu is preempted
342 */
343 ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
344done:
345 rcu_read_unlock();
346 return ret;
347}
348
349/*
350 * Return true only if we can still spin on the owner field of the rwsem.
351 */
352static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
353{
354 struct task_struct *owner = READ_ONCE(sem->owner);
355
356 if (!rwsem_owner_is_writer(owner))
357 goto out;
358
359 rcu_read_lock();
360 while (sem->owner == owner) {
361 /*
362 * Ensure we emit the owner->on_cpu, dereference _after_
363 * checking sem->owner still matches owner, if that fails,
364 * owner might point to free()d memory, if it still matches,
365 * the rcu_read_lock() ensures the memory stays valid.
366 */
367 barrier();
368
369 /*
370 * abort spinning when need_resched or owner is not running or
371 * owner's cpu is preempted.
372 */
373 if (!owner->on_cpu || need_resched() ||
374 vcpu_is_preempted(task_cpu(owner))) {
375 rcu_read_unlock();
376 return false;
377 }
378
379 cpu_relax();
380 }
381 rcu_read_unlock();
382out:
383 /*
384 * If there is a new owner or the owner is not set, we continue
385 * spinning.
386 */
387 return !rwsem_owner_is_reader(READ_ONCE(sem->owner));
388}
389
390static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
391{
392 bool taken = false;
393
394 preempt_disable();
395
396 /* sem->wait_lock should not be held when doing optimistic spinning */
397 if (!rwsem_can_spin_on_owner(sem))
398 goto done;
399
400 if (!osq_lock(&sem->osq))
401 goto done;
402
403 /*
404 * Optimistically spin on the owner field and attempt to acquire the
405 * lock whenever the owner changes. Spinning will be stopped when:
406 * 1) the owning writer isn't running; or
407 * 2) readers own the lock as we can't determine if they are
408 * actively running or not.
409 */
410 while (rwsem_spin_on_owner(sem)) {
411 /*
412 * Try to acquire the lock
413 */
414 if (rwsem_try_write_lock_unqueued(sem)) {
415 taken = true;
416 break;
417 }
418
419 /*
420 * When there's no owner, we might have preempted between the
421 * owner acquiring the lock and setting the owner field. If
422 * we're an RT task that will live-lock because we won't let
423 * the owner complete.
424 */
425 if (!sem->owner && (need_resched() || rt_task(current)))
426 break;
427
428 /*
429 * The cpu_relax() call is a compiler barrier which forces
430 * everything in this loop to be re-loaded. We don't need
431 * memory barriers as we'll eventually observe the right
432 * values at the cost of a few extra spins.
433 */
434 cpu_relax();
435 }
436 osq_unlock(&sem->osq);
437done:
438 preempt_enable();
439 return taken;
440}
441
442/*
443 * Return true if the rwsem has active spinner
444 */
445static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
446{
447 return osq_is_locked(&sem->osq);
448}
449
450#else
451static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
452{
453 return false;
454}
455
456static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
457{
458 return false;
459}
460#endif
461
462/*
463 * Wait until we successfully acquire the write lock
464 */
465static inline struct rw_semaphore *
466__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
467{
468 long count;
469 bool waiting = true; /* any queued threads before us */
470 struct rwsem_waiter waiter;
471 struct rw_semaphore *ret = sem;
472 DEFINE_WAKE_Q(wake_q);
473
474 /* undo write bias from down_write operation, stop active locking */
475 count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
476
477 /* do optimistic spinning and steal lock if possible */
478 if (rwsem_optimistic_spin(sem))
479 return sem;
480
481 /*
482 * Optimistic spinning failed, proceed to the slowpath
483 * and block until we can acquire the sem.
484 */
485 waiter.task = current;
486 waiter.type = RWSEM_WAITING_FOR_WRITE;
487
488 raw_spin_lock_irq(&sem->wait_lock);
489
490 /* account for this before adding a new element to the list */
491 if (list_empty(&sem->wait_list))
492 waiting = false;
493
494 list_add_tail(&waiter.list, &sem->wait_list);
495
496 /* we're now waiting on the lock, but no longer actively locking */
497 if (waiting) {
498 count = atomic_long_read(&sem->count);
499
500 /*
501 * If there were already threads queued before us and there are
502 * no active writers, the lock must be read owned; so we try to
503 * wake any read locks that were queued ahead of us.
504 */
505 if (count > RWSEM_WAITING_BIAS) {
506 DEFINE_WAKE_Q(wake_q);
507
508 __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
509 /*
510 * The wakeup is normally called _after_ the wait_lock
511 * is released, but given that we are proactively waking
512 * readers we can deal with the wake_q overhead as it is
513 * similar to releasing and taking the wait_lock again
514 * for attempting rwsem_try_write_lock().
515 */
516 wake_up_q(&wake_q);
517 }
518
519 } else
520 count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
521
522 /* wait until we successfully acquire the lock */
523 set_current_state(state);
524 while (true) {
525 if (rwsem_try_write_lock(count, sem))
526 break;
527 raw_spin_unlock_irq(&sem->wait_lock);
528
529 /* Block until there are no active lockers. */
530 do {
531 if (signal_pending_state(state, current))
532 goto out_nolock;
533
534 schedule();
535 set_current_state(state);
536 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
537
538 raw_spin_lock_irq(&sem->wait_lock);
539 }
540 __set_current_state(TASK_RUNNING);
541 list_del(&waiter.list);
542 raw_spin_unlock_irq(&sem->wait_lock);
543
544 return ret;
545
546out_nolock:
547 __set_current_state(TASK_RUNNING);
548 raw_spin_lock_irq(&sem->wait_lock);
549 list_del(&waiter.list);
550 if (list_empty(&sem->wait_list))
551 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
552 else
553 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
554 raw_spin_unlock_irq(&sem->wait_lock);
555 wake_up_q(&wake_q);
556
557 return ERR_PTR(-EINTR);
558}
559
560__visible struct rw_semaphore * __sched
561rwsem_down_write_failed(struct rw_semaphore *sem)
562{
563 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
564}
565EXPORT_SYMBOL(rwsem_down_write_failed);
566
567__visible struct rw_semaphore * __sched
568rwsem_down_write_failed_killable(struct rw_semaphore *sem)
569{
570 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
571}
572EXPORT_SYMBOL(rwsem_down_write_failed_killable);
573
574/*
575 * handle waking up a waiter on the semaphore
576 * - up_read/up_write has decremented the active part of count if we come here
577 */
578__visible
579struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
580{
581 unsigned long flags;
582 DEFINE_WAKE_Q(wake_q);
583
584 /*
585 * If a spinner is present, it is not necessary to do the wakeup.
586 * Try to do wakeup only if the trylock succeeds to minimize
587 * spinlock contention which may introduce too much delay in the
588 * unlock operation.
589 *
590 * spinning writer up_write/up_read caller
591 * --------------- -----------------------
592 * [S] osq_unlock() [L] osq
593 * MB RMB
594 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
595 *
596 * Here, it is important to make sure that there won't be a missed
597 * wakeup while the rwsem is free and the only spinning writer goes
598 * to sleep without taking the rwsem. Even when the spinning writer
599 * is just going to break out of the waiting loop, it will still do
600 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
601 * rwsem_has_spinner() is true, it will guarantee at least one
602 * trylock attempt on the rwsem later on.
603 */
604 if (rwsem_has_spinner(sem)) {
605 /*
606 * The smp_rmb() here is to make sure that the spinner
607 * state is consulted before reading the wait_lock.
608 */
609 smp_rmb();
610 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
611 return sem;
612 goto locked;
613 }
614 raw_spin_lock_irqsave(&sem->wait_lock, flags);
615locked:
616
617 if (!list_empty(&sem->wait_list))
618 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
619
620 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
621 wake_up_q(&wake_q);
622
623 return sem;
624}
625EXPORT_SYMBOL(rwsem_wake);
626
627/*
628 * downgrade a write lock into a read lock
629 * - caller incremented waiting part of count and discovered it still negative
630 * - just wake up any readers at the front of the queue
631 */
632__visible
633struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
634{
635 unsigned long flags;
636 DEFINE_WAKE_Q(wake_q);
637
638 raw_spin_lock_irqsave(&sem->wait_lock, flags);
639
640 if (!list_empty(&sem->wait_list))
641 __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
642
643 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
644 wake_up_q(&wake_q);
645
646 return sem;
647}
648EXPORT_SYMBOL(rwsem_downgrade_wake);