Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* rwsem.c: R/W semaphores: contention handling functions
  2 *
  3 * Written by David Howells (dhowells@redhat.com).
  4 * Derived from arch/i386/kernel/semaphore.c
  5 *
  6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
  7 * and Michel Lespinasse <walken@google.com>
  8 *
  9 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
 10 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
 11 */
 12#include <linux/rwsem.h>
 13#include <linux/sched.h>
 14#include <linux/init.h>
 15#include <linux/export.h>
 16#include <linux/sched/rt.h>
 17#include <linux/osq_lock.h>
 18
 19#include "rwsem.h"
 20
 21/*
 22 * Guide to the rw_semaphore's count field for common values.
 23 * (32-bit case illustrated, similar for 64-bit)
 24 *
 25 * 0x0000000X	(1) X readers active or attempting lock, no writer waiting
 26 *		    X = #active_readers + #readers attempting to lock
 27 *		    (X*ACTIVE_BIAS)
 28 *
 29 * 0x00000000	rwsem is unlocked, and no one is waiting for the lock or
 30 *		attempting to read lock or write lock.
 31 *
 32 * 0xffff000X	(1) X readers active or attempting lock, with waiters for lock
 33 *		    X = #active readers + # readers attempting lock
 34 *		    (X*ACTIVE_BIAS + WAITING_BIAS)
 35 *		(2) 1 writer attempting lock, no waiters for lock
 36 *		    X-1 = #active readers + #readers attempting lock
 37 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 38 *		(3) 1 writer active, no waiters for lock
 39 *		    X-1 = #active readers + #readers attempting lock
 40 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 41 *
 42 * 0xffff0001	(1) 1 reader active or attempting lock, waiters for lock
 43 *		    (WAITING_BIAS + ACTIVE_BIAS)
 44 *		(2) 1 writer active or attempting lock, no waiters for lock
 45 *		    (ACTIVE_WRITE_BIAS)
 46 *
 47 * 0xffff0000	(1) There are writers or readers queued but none active
 48 *		    or in the process of attempting lock.
 49 *		    (WAITING_BIAS)
 50 *		Note: writer can attempt to steal lock for this count by adding
 51 *		ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
 52 *
 53 * 0xfffe0001	(1) 1 writer active, or attempting lock. Waiters on queue.
 54 *		    (ACTIVE_WRITE_BIAS + WAITING_BIAS)
 55 *
 56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
 57 *	 the count becomes more than 0 for successful lock acquisition,
 58 *	 i.e. the case where there are only readers or nobody has lock.
 59 *	 (1st and 2nd case above).
 60 *
 61 *	 Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
 62 *	 checking the count becomes ACTIVE_WRITE_BIAS for successful lock
 63 *	 acquisition (i.e. nobody else has lock or attempts lock).  If
 64 *	 unsuccessful, in rwsem_down_write_failed, we'll check to see if there
 65 *	 are only waiters but none active (5th case above), and attempt to
 66 *	 steal the lock.
 67 *
 68 */
 69
 70/*
 71 * Initialize an rwsem:
 72 */
 73void __init_rwsem(struct rw_semaphore *sem, const char *name,
 74		  struct lock_class_key *key)
 75{
 76#ifdef CONFIG_DEBUG_LOCK_ALLOC
 77	/*
 78	 * Make sure we are not reinitializing a held semaphore:
 79	 */
 80	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
 81	lockdep_init_map(&sem->dep_map, name, key, 0);
 82#endif
 83	atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
 84	raw_spin_lock_init(&sem->wait_lock);
 85	INIT_LIST_HEAD(&sem->wait_list);
 86#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 87	sem->owner = NULL;
 88	osq_lock_init(&sem->osq);
 89#endif
 90}
 91
 92EXPORT_SYMBOL(__init_rwsem);
 93
 94enum rwsem_waiter_type {
 95	RWSEM_WAITING_FOR_WRITE,
 96	RWSEM_WAITING_FOR_READ
 97};
 98
 99struct rwsem_waiter {
100	struct list_head list;
101	struct task_struct *task;
102	enum rwsem_waiter_type type;
103};
104
105enum rwsem_wake_type {
106	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
107	RWSEM_WAKE_READERS,	/* Wake readers only */
108	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
109};
110
111/*
112 * handle the lock release when processes blocked on it that can now run
113 * - if we come here from up_xxxx(), then:
114 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
115 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
116 * - there must be someone on the queue
117 * - the wait_lock must be held by the caller
118 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
119 *   to actually wakeup the blocked task(s) and drop the reference count,
120 *   preferably when the wait_lock is released
121 * - woken process blocks are discarded from the list after having task zeroed
122 * - writers are only marked woken if downgrading is false
123 */
124static void __rwsem_mark_wake(struct rw_semaphore *sem,
125			      enum rwsem_wake_type wake_type,
126			      struct wake_q_head *wake_q)
127{
128	struct rwsem_waiter *waiter, *tmp;
129	long oldcount, woken = 0, adjustment = 0;
130
131	/*
132	 * Take a peek at the queue head waiter such that we can determine
133	 * the wakeup(s) to perform.
134	 */
135	waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
136
137	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
138		if (wake_type == RWSEM_WAKE_ANY) {
139			/*
140			 * Mark writer at the front of the queue for wakeup.
141			 * Until the task is actually later awoken later by
142			 * the caller, other writers are able to steal it.
143			 * Readers, on the other hand, will block as they
144			 * will notice the queued writer.
145			 */
146			wake_q_add(wake_q, waiter->task);
147		}
148
149		return;
150	}
151
152	/*
153	 * Writers might steal the lock before we grant it to the next reader.
154	 * We prefer to do the first reader grant before counting readers
155	 * so we can bail out early if a writer stole the lock.
156	 */
157	if (wake_type != RWSEM_WAKE_READ_OWNED) {
158		adjustment = RWSEM_ACTIVE_READ_BIAS;
159 try_reader_grant:
160		oldcount = atomic_long_fetch_add(adjustment, &sem->count);
161		if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
162			/*
163			 * If the count is still less than RWSEM_WAITING_BIAS
164			 * after removing the adjustment, it is assumed that
165			 * a writer has stolen the lock. We have to undo our
166			 * reader grant.
167			 */
168			if (atomic_long_add_return(-adjustment, &sem->count) <
169			    RWSEM_WAITING_BIAS)
170				return;
171
172			/* Last active locker left. Retry waking readers. */
173			goto try_reader_grant;
174		}
175		/*
176		 * It is not really necessary to set it to reader-owned here,
177		 * but it gives the spinners an early indication that the
178		 * readers now have the lock.
179		 */
180		rwsem_set_reader_owned(sem);
181	}
182
183	/*
184	 * Grant an infinite number of read locks to the readers at the front
185	 * of the queue. We know that woken will be at least 1 as we accounted
186	 * for above. Note we increment the 'active part' of the count by the
187	 * number of readers before waking any processes up.
188	 */
189	list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
190		struct task_struct *tsk;
191
192		if (waiter->type == RWSEM_WAITING_FOR_WRITE)
193			break;
194
195		woken++;
196		tsk = waiter->task;
197
198		wake_q_add(wake_q, tsk);
199		list_del(&waiter->list);
200		/*
201		 * Ensure that the last operation is setting the reader
202		 * waiter to nil such that rwsem_down_read_failed() cannot
203		 * race with do_exit() by always holding a reference count
204		 * to the task to wakeup.
205		 */
206		smp_store_release(&waiter->task, NULL);
207	}
208
209	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
210	if (list_empty(&sem->wait_list)) {
211		/* hit end of list above */
212		adjustment -= RWSEM_WAITING_BIAS;
213	}
214
215	if (adjustment)
216		atomic_long_add(adjustment, &sem->count);
217}
218
219/*
220 * Wait for the read lock to be granted
221 */
222__visible
223struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
224{
225	long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
226	struct rwsem_waiter waiter;
227	struct task_struct *tsk = current;
228	DEFINE_WAKE_Q(wake_q);
229
230	waiter.task = tsk;
231	waiter.type = RWSEM_WAITING_FOR_READ;
232
233	raw_spin_lock_irq(&sem->wait_lock);
234	if (list_empty(&sem->wait_list))
235		adjustment += RWSEM_WAITING_BIAS;
236	list_add_tail(&waiter.list, &sem->wait_list);
237
238	/* we're now waiting on the lock, but no longer actively locking */
239	count = atomic_long_add_return(adjustment, &sem->count);
240
241	/*
242	 * If there are no active locks, wake the front queued process(es).
243	 *
244	 * If there are no writers and we are first in the queue,
245	 * wake our own waiter to join the existing active readers !
246	 */
247	if (count == RWSEM_WAITING_BIAS ||
248	    (count > RWSEM_WAITING_BIAS &&
249	     adjustment != -RWSEM_ACTIVE_READ_BIAS))
250		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
251
252	raw_spin_unlock_irq(&sem->wait_lock);
253	wake_up_q(&wake_q);
254
255	/* wait to be given the lock */
256	while (true) {
257		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
258		if (!waiter.task)
259			break;
260		schedule();
261	}
262
263	__set_task_state(tsk, TASK_RUNNING);
264	return sem;
265}
266EXPORT_SYMBOL(rwsem_down_read_failed);
267
268/*
269 * This function must be called with the sem->wait_lock held to prevent
270 * race conditions between checking the rwsem wait list and setting the
271 * sem->count accordingly.
272 */
273static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
274{
275	/*
276	 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
277	 */
278	if (count != RWSEM_WAITING_BIAS)
279		return false;
280
281	/*
282	 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
283	 * are other tasks on the wait list, we need to add on WAITING_BIAS.
284	 */
285	count = list_is_singular(&sem->wait_list) ?
286			RWSEM_ACTIVE_WRITE_BIAS :
287			RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
288
289	if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
290							== RWSEM_WAITING_BIAS) {
291		rwsem_set_owner(sem);
292		return true;
293	}
294
295	return false;
296}
297
298#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
299/*
300 * Try to acquire write lock before the writer has been put on wait queue.
301 */
302static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
303{
304	long old, count = atomic_long_read(&sem->count);
305
306	while (true) {
307		if (!(count == 0 || count == RWSEM_WAITING_BIAS))
308			return false;
309
310		old = atomic_long_cmpxchg_acquire(&sem->count, count,
311				      count + RWSEM_ACTIVE_WRITE_BIAS);
312		if (old == count) {
313			rwsem_set_owner(sem);
314			return true;
315		}
316
317		count = old;
318	}
319}
320
321static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
322{
323	struct task_struct *owner;
324	bool ret = true;
325
326	if (need_resched())
327		return false;
328
329	rcu_read_lock();
330	owner = READ_ONCE(sem->owner);
331	if (!rwsem_owner_is_writer(owner)) {
332		/*
333		 * Don't spin if the rwsem is readers owned.
334		 */
335		ret = !rwsem_owner_is_reader(owner);
336		goto done;
337	}
338
339	/*
340	 * As lock holder preemption issue, we both skip spinning if task is not
341	 * on cpu or its cpu is preempted
342	 */
343	ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
344done:
345	rcu_read_unlock();
346	return ret;
347}
348
349/*
350 * Return true only if we can still spin on the owner field of the rwsem.
351 */
352static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
353{
354	struct task_struct *owner = READ_ONCE(sem->owner);
355
356	if (!rwsem_owner_is_writer(owner))
357		goto out;
358
359	rcu_read_lock();
360	while (sem->owner == owner) {
361		/*
362		 * Ensure we emit the owner->on_cpu, dereference _after_
363		 * checking sem->owner still matches owner, if that fails,
364		 * owner might point to free()d memory, if it still matches,
365		 * the rcu_read_lock() ensures the memory stays valid.
366		 */
367		barrier();
368
369		/*
370		 * abort spinning when need_resched or owner is not running or
371		 * owner's cpu is preempted.
372		 */
373		if (!owner->on_cpu || need_resched() ||
374				vcpu_is_preempted(task_cpu(owner))) {
375			rcu_read_unlock();
376			return false;
377		}
378
379		cpu_relax();
380	}
381	rcu_read_unlock();
382out:
383	/*
384	 * If there is a new owner or the owner is not set, we continue
385	 * spinning.
386	 */
387	return !rwsem_owner_is_reader(READ_ONCE(sem->owner));
388}
389
390static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
391{
392	bool taken = false;
393
394	preempt_disable();
395
396	/* sem->wait_lock should not be held when doing optimistic spinning */
397	if (!rwsem_can_spin_on_owner(sem))
398		goto done;
399
400	if (!osq_lock(&sem->osq))
401		goto done;
402
403	/*
404	 * Optimistically spin on the owner field and attempt to acquire the
405	 * lock whenever the owner changes. Spinning will be stopped when:
406	 *  1) the owning writer isn't running; or
407	 *  2) readers own the lock as we can't determine if they are
408	 *     actively running or not.
409	 */
410	while (rwsem_spin_on_owner(sem)) {
411		/*
412		 * Try to acquire the lock
413		 */
414		if (rwsem_try_write_lock_unqueued(sem)) {
415			taken = true;
416			break;
417		}
418
419		/*
420		 * When there's no owner, we might have preempted between the
421		 * owner acquiring the lock and setting the owner field. If
422		 * we're an RT task that will live-lock because we won't let
423		 * the owner complete.
424		 */
425		if (!sem->owner && (need_resched() || rt_task(current)))
426			break;
427
428		/*
429		 * The cpu_relax() call is a compiler barrier which forces
430		 * everything in this loop to be re-loaded. We don't need
431		 * memory barriers as we'll eventually observe the right
432		 * values at the cost of a few extra spins.
433		 */
434		cpu_relax();
435	}
436	osq_unlock(&sem->osq);
437done:
438	preempt_enable();
439	return taken;
440}
441
442/*
443 * Return true if the rwsem has active spinner
444 */
445static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
446{
447	return osq_is_locked(&sem->osq);
448}
449
450#else
451static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
452{
453	return false;
454}
455
456static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
457{
458	return false;
459}
460#endif
461
462/*
463 * Wait until we successfully acquire the write lock
464 */
465static inline struct rw_semaphore *
466__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
467{
468	long count;
469	bool waiting = true; /* any queued threads before us */
470	struct rwsem_waiter waiter;
471	struct rw_semaphore *ret = sem;
472	DEFINE_WAKE_Q(wake_q);
473
474	/* undo write bias from down_write operation, stop active locking */
475	count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
476
477	/* do optimistic spinning and steal lock if possible */
478	if (rwsem_optimistic_spin(sem))
479		return sem;
480
481	/*
482	 * Optimistic spinning failed, proceed to the slowpath
483	 * and block until we can acquire the sem.
484	 */
485	waiter.task = current;
486	waiter.type = RWSEM_WAITING_FOR_WRITE;
487
488	raw_spin_lock_irq(&sem->wait_lock);
489
490	/* account for this before adding a new element to the list */
491	if (list_empty(&sem->wait_list))
492		waiting = false;
493
494	list_add_tail(&waiter.list, &sem->wait_list);
495
496	/* we're now waiting on the lock, but no longer actively locking */
497	if (waiting) {
498		count = atomic_long_read(&sem->count);
499
500		/*
501		 * If there were already threads queued before us and there are
502		 * no active writers, the lock must be read owned; so we try to
503		 * wake any read locks that were queued ahead of us.
504		 */
505		if (count > RWSEM_WAITING_BIAS) {
506			DEFINE_WAKE_Q(wake_q);
507
508			__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
509			/*
510			 * The wakeup is normally called _after_ the wait_lock
511			 * is released, but given that we are proactively waking
512			 * readers we can deal with the wake_q overhead as it is
513			 * similar to releasing and taking the wait_lock again
514			 * for attempting rwsem_try_write_lock().
515			 */
516			wake_up_q(&wake_q);
517		}
518
519	} else
520		count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
521
522	/* wait until we successfully acquire the lock */
523	set_current_state(state);
524	while (true) {
525		if (rwsem_try_write_lock(count, sem))
526			break;
527		raw_spin_unlock_irq(&sem->wait_lock);
528
529		/* Block until there are no active lockers. */
530		do {
531			if (signal_pending_state(state, current))
532				goto out_nolock;
533
534			schedule();
535			set_current_state(state);
536		} while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
537
538		raw_spin_lock_irq(&sem->wait_lock);
539	}
540	__set_current_state(TASK_RUNNING);
541	list_del(&waiter.list);
542	raw_spin_unlock_irq(&sem->wait_lock);
543
544	return ret;
545
546out_nolock:
547	__set_current_state(TASK_RUNNING);
548	raw_spin_lock_irq(&sem->wait_lock);
549	list_del(&waiter.list);
550	if (list_empty(&sem->wait_list))
551		atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
552	else
553		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
554	raw_spin_unlock_irq(&sem->wait_lock);
555	wake_up_q(&wake_q);
556
557	return ERR_PTR(-EINTR);
558}
559
560__visible struct rw_semaphore * __sched
561rwsem_down_write_failed(struct rw_semaphore *sem)
562{
563	return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
564}
565EXPORT_SYMBOL(rwsem_down_write_failed);
566
567__visible struct rw_semaphore * __sched
568rwsem_down_write_failed_killable(struct rw_semaphore *sem)
569{
570	return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
571}
572EXPORT_SYMBOL(rwsem_down_write_failed_killable);
573
574/*
575 * handle waking up a waiter on the semaphore
576 * - up_read/up_write has decremented the active part of count if we come here
577 */
578__visible
579struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
580{
581	unsigned long flags;
582	DEFINE_WAKE_Q(wake_q);
583
584	/*
585	 * If a spinner is present, it is not necessary to do the wakeup.
586	 * Try to do wakeup only if the trylock succeeds to minimize
587	 * spinlock contention which may introduce too much delay in the
588	 * unlock operation.
589	 *
590	 *    spinning writer		up_write/up_read caller
591	 *    ---------------		-----------------------
592	 * [S]   osq_unlock()		[L]   osq
593	 *	 MB			      RMB
594	 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
595	 *
596	 * Here, it is important to make sure that there won't be a missed
597	 * wakeup while the rwsem is free and the only spinning writer goes
598	 * to sleep without taking the rwsem. Even when the spinning writer
599	 * is just going to break out of the waiting loop, it will still do
600	 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
601	 * rwsem_has_spinner() is true, it will guarantee at least one
602	 * trylock attempt on the rwsem later on.
603	 */
604	if (rwsem_has_spinner(sem)) {
605		/*
606		 * The smp_rmb() here is to make sure that the spinner
607		 * state is consulted before reading the wait_lock.
608		 */
609		smp_rmb();
610		if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
611			return sem;
612		goto locked;
613	}
614	raw_spin_lock_irqsave(&sem->wait_lock, flags);
615locked:
616
617	if (!list_empty(&sem->wait_list))
618		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
619
620	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
621	wake_up_q(&wake_q);
622
623	return sem;
624}
625EXPORT_SYMBOL(rwsem_wake);
626
627/*
628 * downgrade a write lock into a read lock
629 * - caller incremented waiting part of count and discovered it still negative
630 * - just wake up any readers at the front of the queue
631 */
632__visible
633struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
634{
635	unsigned long flags;
636	DEFINE_WAKE_Q(wake_q);
637
638	raw_spin_lock_irqsave(&sem->wait_lock, flags);
639
640	if (!list_empty(&sem->wait_list))
641		__rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
642
643	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
644	wake_up_q(&wake_q);
645
646	return sem;
647}
648EXPORT_SYMBOL(rwsem_downgrade_wake);