Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/* rwsem.c: R/W semaphores: contention handling functions
  2 *
  3 * Written by David Howells (dhowells@redhat.com).
  4 * Derived from arch/i386/kernel/semaphore.c
  5 *
  6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
  7 * and Michel Lespinasse <walken@google.com>
  8 */
  9#include <linux/rwsem.h>
 10#include <linux/sched.h>
 11#include <linux/init.h>
 12#include <linux/export.h>
 13
 14/*
 15 * Initialize an rwsem:
 16 */
 17void __init_rwsem(struct rw_semaphore *sem, const char *name,
 18		  struct lock_class_key *key)
 19{
 20#ifdef CONFIG_DEBUG_LOCK_ALLOC
 21	/*
 22	 * Make sure we are not reinitializing a held semaphore:
 23	 */
 24	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
 25	lockdep_init_map(&sem->dep_map, name, key, 0);
 26#endif
 27	sem->count = RWSEM_UNLOCKED_VALUE;
 28	raw_spin_lock_init(&sem->wait_lock);
 29	INIT_LIST_HEAD(&sem->wait_list);
 30}
 31
 32EXPORT_SYMBOL(__init_rwsem);
 33
 34enum rwsem_waiter_type {
 35	RWSEM_WAITING_FOR_WRITE,
 36	RWSEM_WAITING_FOR_READ
 37};
 38
 39struct rwsem_waiter {
 40	struct list_head list;
 41	struct task_struct *task;
 42	enum rwsem_waiter_type type;
 43};
 44
 45enum rwsem_wake_type {
 46	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
 47	RWSEM_WAKE_READERS,	/* Wake readers only */
 48	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
 49};
 50
 51/*
 52 * handle the lock release when processes blocked on it that can now run
 53 * - if we come here from up_xxxx(), then:
 54 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
 55 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
 56 * - there must be someone on the queue
 57 * - the spinlock must be held by the caller
 58 * - woken process blocks are discarded from the list after having task zeroed
 59 * - writers are only woken if downgrading is false
 60 */
 61static struct rw_semaphore *
 62__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
 63{
 64	struct rwsem_waiter *waiter;
 65	struct task_struct *tsk;
 66	struct list_head *next;
 67	long oldcount, woken, loop, adjustment;
 68
 69	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
 70	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
 71		if (wake_type == RWSEM_WAKE_ANY)
 72			/* Wake writer at the front of the queue, but do not
 73			 * grant it the lock yet as we want other writers
 74			 * to be able to steal it.  Readers, on the other hand,
 75			 * will block as they will notice the queued writer.
 76			 */
 77			wake_up_process(waiter->task);
 78		goto out;
 79	}
 80
 81	/* Writers might steal the lock before we grant it to the next reader.
 82	 * We prefer to do the first reader grant before counting readers
 83	 * so we can bail out early if a writer stole the lock.
 84	 */
 85	adjustment = 0;
 86	if (wake_type != RWSEM_WAKE_READ_OWNED) {
 87		adjustment = RWSEM_ACTIVE_READ_BIAS;
 88 try_reader_grant:
 89		oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
 90		if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
 91			/* A writer stole the lock. Undo our reader grant. */
 92			if (rwsem_atomic_update(-adjustment, sem) &
 93						RWSEM_ACTIVE_MASK)
 94				goto out;
 95			/* Last active locker left. Retry waking readers. */
 96			goto try_reader_grant;
 97		}
 98	}
 99
100	/* Grant an infinite number of read locks to the readers at the front
101	 * of the queue.  Note we increment the 'active part' of the count by
102	 * the number of readers before waking any processes up.
103	 */
104	woken = 0;
105	do {
106		woken++;
107
108		if (waiter->list.next == &sem->wait_list)
109			break;
110
111		waiter = list_entry(waiter->list.next,
112					struct rwsem_waiter, list);
113
114	} while (waiter->type != RWSEM_WAITING_FOR_WRITE);
115
116	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
117	if (waiter->type != RWSEM_WAITING_FOR_WRITE)
118		/* hit end of list above */
119		adjustment -= RWSEM_WAITING_BIAS;
120
121	if (adjustment)
122		rwsem_atomic_add(adjustment, sem);
123
124	next = sem->wait_list.next;
125	loop = woken;
126	do {
127		waiter = list_entry(next, struct rwsem_waiter, list);
128		next = waiter->list.next;
129		tsk = waiter->task;
130		smp_mb();
131		waiter->task = NULL;
132		wake_up_process(tsk);
133		put_task_struct(tsk);
134	} while (--loop);
135
136	sem->wait_list.next = next;
137	next->prev = &sem->wait_list;
138
139 out:
140	return sem;
141}
142
143/*
144 * wait for the read lock to be granted
145 */
146__visible
147struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
148{
149	long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
150	struct rwsem_waiter waiter;
151	struct task_struct *tsk = current;
152
153	/* set up my own style of waitqueue */
154	waiter.task = tsk;
155	waiter.type = RWSEM_WAITING_FOR_READ;
156	get_task_struct(tsk);
157
158	raw_spin_lock_irq(&sem->wait_lock);
159	if (list_empty(&sem->wait_list))
160		adjustment += RWSEM_WAITING_BIAS;
161	list_add_tail(&waiter.list, &sem->wait_list);
162
163	/* we're now waiting on the lock, but no longer actively locking */
164	count = rwsem_atomic_update(adjustment, sem);
165
166	/* If there are no active locks, wake the front queued process(es).
167	 *
168	 * If there are no writers and we are first in the queue,
169	 * wake our own waiter to join the existing active readers !
170	 */
171	if (count == RWSEM_WAITING_BIAS ||
172	    (count > RWSEM_WAITING_BIAS &&
173	     adjustment != -RWSEM_ACTIVE_READ_BIAS))
174		sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
175
176	raw_spin_unlock_irq(&sem->wait_lock);
177
178	/* wait to be given the lock */
179	while (true) {
180		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
181		if (!waiter.task)
182			break;
183		schedule();
184	}
185
186	tsk->state = TASK_RUNNING;
187
188	return sem;
189}
190
191/*
192 * wait until we successfully acquire the write lock
193 */
194__visible
195struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
196{
197	long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
198	struct rwsem_waiter waiter;
199	struct task_struct *tsk = current;
200
201	/* set up my own style of waitqueue */
202	waiter.task = tsk;
203	waiter.type = RWSEM_WAITING_FOR_WRITE;
204
205	raw_spin_lock_irq(&sem->wait_lock);
206	if (list_empty(&sem->wait_list))
207		adjustment += RWSEM_WAITING_BIAS;
208	list_add_tail(&waiter.list, &sem->wait_list);
209
210	/* we're now waiting on the lock, but no longer actively locking */
211	count = rwsem_atomic_update(adjustment, sem);
212
213	/* If there were already threads queued before us and there are no
214	 * active writers, the lock must be read owned; so we try to wake
215	 * any read locks that were queued ahead of us. */
216	if (count > RWSEM_WAITING_BIAS &&
217	    adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
218		sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
219
220	/* wait until we successfully acquire the lock */
221	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
222	while (true) {
223		if (!(count & RWSEM_ACTIVE_MASK)) {
224			/* Try acquiring the write lock. */
225			count = RWSEM_ACTIVE_WRITE_BIAS;
226			if (!list_is_singular(&sem->wait_list))
227				count += RWSEM_WAITING_BIAS;
228
229			if (sem->count == RWSEM_WAITING_BIAS &&
230			    cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
231							RWSEM_WAITING_BIAS)
232				break;
233		}
234
235		raw_spin_unlock_irq(&sem->wait_lock);
236
237		/* Block until there are no active lockers. */
238		do {
239			schedule();
240			set_task_state(tsk, TASK_UNINTERRUPTIBLE);
241		} while ((count = sem->count) & RWSEM_ACTIVE_MASK);
242
243		raw_spin_lock_irq(&sem->wait_lock);
244	}
245
246	list_del(&waiter.list);
247	raw_spin_unlock_irq(&sem->wait_lock);
248	tsk->state = TASK_RUNNING;
249
250	return sem;
251}
252
253/*
254 * handle waking up a waiter on the semaphore
255 * - up_read/up_write has decremented the active part of count if we come here
256 */
257__visible
258struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
259{
260	unsigned long flags;
261
262	raw_spin_lock_irqsave(&sem->wait_lock, flags);
263
264	/* do nothing if list empty */
265	if (!list_empty(&sem->wait_list))
266		sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
267
268	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
269
270	return sem;
271}
272
273/*
274 * downgrade a write lock into a read lock
275 * - caller incremented waiting part of count and discovered it still negative
276 * - just wake up any readers at the front of the queue
277 */
278__visible
279struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
280{
281	unsigned long flags;
282
283	raw_spin_lock_irqsave(&sem->wait_lock, flags);
284
285	/* do nothing if list empty */
286	if (!list_empty(&sem->wait_list))
287		sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
288
289	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
290
291	return sem;
292}
293
294EXPORT_SYMBOL(rwsem_down_read_failed);
295EXPORT_SYMBOL(rwsem_down_write_failed);
296EXPORT_SYMBOL(rwsem_wake);
297EXPORT_SYMBOL(rwsem_downgrade_wake);