Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Ldisc rw semaphore
  4 *
  5 * The ldisc semaphore is semantically a rw_semaphore but which enforces
  6 * an alternate policy, namely:
  7 *   1) Supports lock wait timeouts
  8 *   2) Write waiter has priority
  9 *   3) Downgrading is not supported
 10 *
 11 * Implementation notes:
 12 *   1) Upper half of semaphore count is a wait count (differs from rwsem
 13 *	in that rwsem normalizes the upper half to the wait bias)
 14 *   2) Lacks overflow checking
 15 *
 16 * The generic counting was copied and modified from include/asm-generic/rwsem.h
 17 * by Paul Mackerras <paulus@samba.org>.
 18 *
 19 * The scheduling policy was copied and modified from lib/rwsem.c
 20 * Written by David Howells (dhowells@redhat.com).
 21 *
 22 * This implementation incorporates the write lock stealing work of
 23 * Michel Lespinasse <walken@google.com>.
 24 *
 25 * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
 
 
 
 26 */
 27
 28#include <linux/list.h>
 29#include <linux/spinlock.h>
 30#include <linux/atomic.h>
 31#include <linux/tty.h>
 32#include <linux/sched.h>
 33#include <linux/sched/debug.h>
 34#include <linux/sched/task.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35
 36
 37#if BITS_PER_LONG == 64
 38# define LDSEM_ACTIVE_MASK	0xffffffffL
 39#else
 40# define LDSEM_ACTIVE_MASK	0x0000ffffL
 41#endif
 42
 43#define LDSEM_UNLOCKED		0L
 44#define LDSEM_ACTIVE_BIAS	1L
 45#define LDSEM_WAIT_BIAS		(-LDSEM_ACTIVE_MASK-1)
 46#define LDSEM_READ_BIAS		LDSEM_ACTIVE_BIAS
 47#define LDSEM_WRITE_BIAS	(LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
 48
 49struct ldsem_waiter {
 50	struct list_head list;
 51	struct task_struct *task;
 52};
 53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54/*
 55 * Initialize an ldsem:
 56 */
 57void __init_ldsem(struct ld_semaphore *sem, const char *name,
 58		  struct lock_class_key *key)
 59{
 60#ifdef CONFIG_DEBUG_LOCK_ALLOC
 61	/*
 62	 * Make sure we are not reinitializing a held semaphore:
 63	 */
 64	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
 65	lockdep_init_map(&sem->dep_map, name, key, 0);
 66#endif
 67	atomic_long_set(&sem->count, LDSEM_UNLOCKED);
 68	sem->wait_readers = 0;
 69	raw_spin_lock_init(&sem->wait_lock);
 70	INIT_LIST_HEAD(&sem->read_wait);
 71	INIT_LIST_HEAD(&sem->write_wait);
 72}
 73
 74static void __ldsem_wake_readers(struct ld_semaphore *sem)
 75{
 76	struct ldsem_waiter *waiter, *next;
 77	struct task_struct *tsk;
 78	long adjust, count;
 79
 80	/*
 81	 * Try to grant read locks to all readers on the read wait list.
 82	 * Note the 'active part' of the count is incremented by
 83	 * the number of readers before waking any processes up.
 84	 */
 85	adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
 86	count = atomic_long_add_return(adjust, &sem->count);
 87	do {
 88		if (count > 0)
 89			break;
 90		if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
 91			return;
 92	} while (1);
 93
 94	list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
 95		tsk = waiter->task;
 96		smp_store_release(&waiter->task, NULL);
 
 97		wake_up_process(tsk);
 98		put_task_struct(tsk);
 99	}
100	INIT_LIST_HEAD(&sem->read_wait);
101	sem->wait_readers = 0;
102}
103
104static inline int writer_trylock(struct ld_semaphore *sem)
105{
106	/*
107	 * Only wake this writer if the active part of the count can be
108	 * transitioned from 0 -> 1
109	 */
110	long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
111	do {
112		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
113			return 1;
114		if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
115			return 0;
116	} while (1);
117}
118
119static void __ldsem_wake_writer(struct ld_semaphore *sem)
120{
121	struct ldsem_waiter *waiter;
122
123	waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
124	wake_up_process(waiter->task);
125}
126
127/*
128 * handle the lock release when processes blocked on it that can now run
129 * - if we come here from up_xxxx(), then:
130 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
131 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
132 * - the spinlock must be held by the caller
133 * - woken process blocks are discarded from the list after having task zeroed
134 */
135static void __ldsem_wake(struct ld_semaphore *sem)
136{
137	if (!list_empty(&sem->write_wait))
138		__ldsem_wake_writer(sem);
139	else if (!list_empty(&sem->read_wait))
140		__ldsem_wake_readers(sem);
141}
142
143static void ldsem_wake(struct ld_semaphore *sem)
144{
145	unsigned long flags;
146
147	raw_spin_lock_irqsave(&sem->wait_lock, flags);
148	__ldsem_wake(sem);
149	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
150}
151
152/*
153 * wait for the read lock to be granted
154 */
155static struct ld_semaphore __sched *
156down_read_failed(struct ld_semaphore *sem, long count, long timeout)
157{
158	struct ldsem_waiter waiter;
 
159	long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
160
161	/* set up my own style of waitqueue */
162	raw_spin_lock_irq(&sem->wait_lock);
163
164	/*
165	 * Try to reverse the lock attempt but if the count has changed
166	 * so that reversing fails, check if there are are no waiters,
167	 * and early-out if not
168	 */
169	do {
170		if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
171			count += adjust;
172			break;
173		}
174		if (count > 0) {
175			raw_spin_unlock_irq(&sem->wait_lock);
176			return sem;
177		}
178	} while (1);
179
180	list_add_tail(&waiter.list, &sem->read_wait);
181	sem->wait_readers++;
182
183	waiter.task = current;
184	get_task_struct(current);
185
186	/* if there are no active locks, wake the new lock owner(s) */
187	if ((count & LDSEM_ACTIVE_MASK) == 0)
188		__ldsem_wake(sem);
189
190	raw_spin_unlock_irq(&sem->wait_lock);
191
192	/* wait to be given the lock */
193	for (;;) {
194		set_current_state(TASK_UNINTERRUPTIBLE);
195
196		if (!smp_load_acquire(&waiter.task))
197			break;
198		if (!timeout)
199			break;
200		timeout = schedule_timeout(timeout);
201	}
202
203	__set_current_state(TASK_RUNNING);
204
205	if (!timeout) {
206		/*
207		 * Lock timed out but check if this task was just
208		 * granted lock ownership - if so, pretend there
209		 * was no timeout; otherwise, cleanup lock wait.
210		 */
211		raw_spin_lock_irq(&sem->wait_lock);
212		if (waiter.task) {
213			atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
214			sem->wait_readers--;
215			list_del(&waiter.list);
216			raw_spin_unlock_irq(&sem->wait_lock);
217			put_task_struct(waiter.task);
218			return NULL;
219		}
220		raw_spin_unlock_irq(&sem->wait_lock);
221	}
222
223	return sem;
224}
225
226/*
227 * wait for the write lock to be granted
228 */
229static struct ld_semaphore __sched *
230down_write_failed(struct ld_semaphore *sem, long count, long timeout)
231{
232	struct ldsem_waiter waiter;
 
233	long adjust = -LDSEM_ACTIVE_BIAS;
234	int locked = 0;
235
236	/* set up my own style of waitqueue */
237	raw_spin_lock_irq(&sem->wait_lock);
238
239	/*
240	 * Try to reverse the lock attempt but if the count has changed
241	 * so that reversing fails, check if the lock is now owned,
242	 * and early-out if so.
243	 */
244	do {
245		if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
246			break;
247		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
248			raw_spin_unlock_irq(&sem->wait_lock);
249			return sem;
250		}
251	} while (1);
252
253	list_add_tail(&waiter.list, &sem->write_wait);
254
255	waiter.task = current;
256
257	set_current_state(TASK_UNINTERRUPTIBLE);
258	for (;;) {
259		if (!timeout)
260			break;
261		raw_spin_unlock_irq(&sem->wait_lock);
262		timeout = schedule_timeout(timeout);
263		raw_spin_lock_irq(&sem->wait_lock);
264		set_current_state(TASK_UNINTERRUPTIBLE);
265		locked = writer_trylock(sem);
266		if (locked)
267			break;
268	}
269
270	if (!locked)
271		atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
272	list_del(&waiter.list);
273
274	/*
275	 * In case of timeout, wake up every reader who gave the right of way
276	 * to writer. Prevent separation readers into two groups:
277	 * one that helds semaphore and another that sleeps.
278	 * (in case of no contention with a writer)
279	 */
280	if (!locked && list_empty(&sem->write_wait))
281		__ldsem_wake_readers(sem);
282
283	raw_spin_unlock_irq(&sem->wait_lock);
284
285	__set_current_state(TASK_RUNNING);
286
287	/* lock wait may have timed out */
288	if (!locked)
289		return NULL;
290	return sem;
291}
292
293
294
295static int __ldsem_down_read_nested(struct ld_semaphore *sem,
296					   int subclass, long timeout)
297{
298	long count;
299
300	rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
301
302	count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
303	if (count <= 0) {
304		lock_contended(&sem->dep_map, _RET_IP_);
305		if (!down_read_failed(sem, count, timeout)) {
306			rwsem_release(&sem->dep_map, _RET_IP_);
307			return 0;
308		}
309	}
310	lock_acquired(&sem->dep_map, _RET_IP_);
311	return 1;
312}
313
314static int __ldsem_down_write_nested(struct ld_semaphore *sem,
315					    int subclass, long timeout)
316{
317	long count;
318
319	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
320
321	count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
322	if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
323		lock_contended(&sem->dep_map, _RET_IP_);
324		if (!down_write_failed(sem, count, timeout)) {
325			rwsem_release(&sem->dep_map, _RET_IP_);
326			return 0;
327		}
328	}
329	lock_acquired(&sem->dep_map, _RET_IP_);
330	return 1;
331}
332
333
334/*
335 * lock for reading -- returns 1 if successful, 0 if timed out
336 */
337int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
338{
339	might_sleep();
340	return __ldsem_down_read_nested(sem, 0, timeout);
341}
342
343/*
344 * trylock for reading -- returns 1 if successful, 0 if contention
345 */
346int ldsem_down_read_trylock(struct ld_semaphore *sem)
347{
348	long count = atomic_long_read(&sem->count);
349
350	while (count >= 0) {
351		if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
352			rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
353			lock_acquired(&sem->dep_map, _RET_IP_);
354			return 1;
355		}
356	}
357	return 0;
358}
359
360/*
361 * lock for writing -- returns 1 if successful, 0 if timed out
362 */
363int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
364{
365	might_sleep();
366	return __ldsem_down_write_nested(sem, 0, timeout);
367}
368
369/*
370 * trylock for writing -- returns 1 if successful, 0 if contention
371 */
372int ldsem_down_write_trylock(struct ld_semaphore *sem)
373{
374	long count = atomic_long_read(&sem->count);
375
376	while ((count & LDSEM_ACTIVE_MASK) == 0) {
377		if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
378			rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
379			lock_acquired(&sem->dep_map, _RET_IP_);
380			return 1;
381		}
382	}
383	return 0;
384}
385
386/*
387 * release a read lock
388 */
389void ldsem_up_read(struct ld_semaphore *sem)
390{
391	long count;
392
393	rwsem_release(&sem->dep_map, _RET_IP_);
394
395	count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
396	if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
397		ldsem_wake(sem);
398}
399
400/*
401 * release a write lock
402 */
403void ldsem_up_write(struct ld_semaphore *sem)
404{
405	long count;
406
407	rwsem_release(&sem->dep_map, _RET_IP_);
408
409	count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
410	if (count < 0)
411		ldsem_wake(sem);
412}
413
414
415#ifdef CONFIG_DEBUG_LOCK_ALLOC
416
417int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
418{
419	might_sleep();
420	return __ldsem_down_read_nested(sem, subclass, timeout);
421}
422
423int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
424			    long timeout)
425{
426	might_sleep();
427	return __ldsem_down_write_nested(sem, subclass, timeout);
428}
429
430#endif
v4.6
 
  1/*
  2 * Ldisc rw semaphore
  3 *
  4 * The ldisc semaphore is semantically a rw_semaphore but which enforces
  5 * an alternate policy, namely:
  6 *   1) Supports lock wait timeouts
  7 *   2) Write waiter has priority
  8 *   3) Downgrading is not supported
  9 *
 10 * Implementation notes:
 11 *   1) Upper half of semaphore count is a wait count (differs from rwsem
 12 *	in that rwsem normalizes the upper half to the wait bias)
 13 *   2) Lacks overflow checking
 14 *
 15 * The generic counting was copied and modified from include/asm-generic/rwsem.h
 16 * by Paul Mackerras <paulus@samba.org>.
 17 *
 18 * The scheduling policy was copied and modified from lib/rwsem.c
 19 * Written by David Howells (dhowells@redhat.com).
 20 *
 21 * This implementation incorporates the write lock stealing work of
 22 * Michel Lespinasse <walken@google.com>.
 23 *
 24 * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
 25 *
 26 * This file may be redistributed under the terms of the GNU General Public
 27 * License v2.
 28 */
 29
 30#include <linux/list.h>
 31#include <linux/spinlock.h>
 32#include <linux/atomic.h>
 33#include <linux/tty.h>
 34#include <linux/sched.h>
 35
 36
 37#ifdef CONFIG_DEBUG_LOCK_ALLOC
 38# define __acq(l, s, t, r, c, n, i)		\
 39				lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
 40# define __rel(l, n, i)				\
 41				lock_release(&(l)->dep_map, n, i)
 42#define lockdep_acquire(l, s, t, i)		__acq(l, s, t, 0, 1, NULL, i)
 43#define lockdep_acquire_nest(l, s, t, n, i)	__acq(l, s, t, 0, 1, n, i)
 44#define lockdep_acquire_read(l, s, t, i)	__acq(l, s, t, 1, 1, NULL, i)
 45#define lockdep_release(l, n, i)		__rel(l, n, i)
 46#else
 47# define lockdep_acquire(l, s, t, i)		do { } while (0)
 48# define lockdep_acquire_nest(l, s, t, n, i)	do { } while (0)
 49# define lockdep_acquire_read(l, s, t, i)	do { } while (0)
 50# define lockdep_release(l, n, i)		do { } while (0)
 51#endif
 52
 53#ifdef CONFIG_LOCK_STAT
 54# define lock_stat(_lock, stat)		lock_##stat(&(_lock)->dep_map, _RET_IP_)
 55#else
 56# define lock_stat(_lock, stat)		do { } while (0)
 57#endif
 58
 59
 60#if BITS_PER_LONG == 64
 61# define LDSEM_ACTIVE_MASK	0xffffffffL
 62#else
 63# define LDSEM_ACTIVE_MASK	0x0000ffffL
 64#endif
 65
 66#define LDSEM_UNLOCKED		0L
 67#define LDSEM_ACTIVE_BIAS	1L
 68#define LDSEM_WAIT_BIAS		(-LDSEM_ACTIVE_MASK-1)
 69#define LDSEM_READ_BIAS		LDSEM_ACTIVE_BIAS
 70#define LDSEM_WRITE_BIAS	(LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
 71
 72struct ldsem_waiter {
 73	struct list_head list;
 74	struct task_struct *task;
 75};
 76
 77static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
 78{
 79	return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
 80}
 81
 82/*
 83 * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
 84 * Returns 1 if count was successfully changed; @*old will have @new value.
 85 * Returns 0 if count was not changed; @*old will have most recent sem->count
 86 */
 87static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
 88{
 89	long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
 90	if (tmp == *old) {
 91		*old = new;
 92		return 1;
 93	} else {
 94		*old = tmp;
 95		return 0;
 96	}
 97}
 98
 99/*
100 * Initialize an ldsem:
101 */
102void __init_ldsem(struct ld_semaphore *sem, const char *name,
103		  struct lock_class_key *key)
104{
105#ifdef CONFIG_DEBUG_LOCK_ALLOC
106	/*
107	 * Make sure we are not reinitializing a held semaphore:
108	 */
109	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
110	lockdep_init_map(&sem->dep_map, name, key, 0);
111#endif
112	sem->count = LDSEM_UNLOCKED;
113	sem->wait_readers = 0;
114	raw_spin_lock_init(&sem->wait_lock);
115	INIT_LIST_HEAD(&sem->read_wait);
116	INIT_LIST_HEAD(&sem->write_wait);
117}
118
119static void __ldsem_wake_readers(struct ld_semaphore *sem)
120{
121	struct ldsem_waiter *waiter, *next;
122	struct task_struct *tsk;
123	long adjust, count;
124
125	/* Try to grant read locks to all readers on the read wait list.
 
126	 * Note the 'active part' of the count is incremented by
127	 * the number of readers before waking any processes up.
128	 */
129	adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
130	count = ldsem_atomic_update(adjust, sem);
131	do {
132		if (count > 0)
133			break;
134		if (ldsem_cmpxchg(&count, count - adjust, sem))
135			return;
136	} while (1);
137
138	list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
139		tsk = waiter->task;
140		smp_mb();
141		waiter->task = NULL;
142		wake_up_process(tsk);
143		put_task_struct(tsk);
144	}
145	INIT_LIST_HEAD(&sem->read_wait);
146	sem->wait_readers = 0;
147}
148
149static inline int writer_trylock(struct ld_semaphore *sem)
150{
151	/* only wake this writer if the active part of the count can be
 
152	 * transitioned from 0 -> 1
153	 */
154	long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
155	do {
156		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
157			return 1;
158		if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
159			return 0;
160	} while (1);
161}
162
163static void __ldsem_wake_writer(struct ld_semaphore *sem)
164{
165	struct ldsem_waiter *waiter;
166
167	waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
168	wake_up_process(waiter->task);
169}
170
171/*
172 * handle the lock release when processes blocked on it that can now run
173 * - if we come here from up_xxxx(), then:
174 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
175 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
176 * - the spinlock must be held by the caller
177 * - woken process blocks are discarded from the list after having task zeroed
178 */
179static void __ldsem_wake(struct ld_semaphore *sem)
180{
181	if (!list_empty(&sem->write_wait))
182		__ldsem_wake_writer(sem);
183	else if (!list_empty(&sem->read_wait))
184		__ldsem_wake_readers(sem);
185}
186
187static void ldsem_wake(struct ld_semaphore *sem)
188{
189	unsigned long flags;
190
191	raw_spin_lock_irqsave(&sem->wait_lock, flags);
192	__ldsem_wake(sem);
193	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
194}
195
196/*
197 * wait for the read lock to be granted
198 */
199static struct ld_semaphore __sched *
200down_read_failed(struct ld_semaphore *sem, long count, long timeout)
201{
202	struct ldsem_waiter waiter;
203	struct task_struct *tsk = current;
204	long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
205
206	/* set up my own style of waitqueue */
207	raw_spin_lock_irq(&sem->wait_lock);
208
209	/* Try to reverse the lock attempt but if the count has changed
 
210	 * so that reversing fails, check if there are are no waiters,
211	 * and early-out if not */
 
212	do {
213		if (ldsem_cmpxchg(&count, count + adjust, sem))
 
214			break;
 
215		if (count > 0) {
216			raw_spin_unlock_irq(&sem->wait_lock);
217			return sem;
218		}
219	} while (1);
220
221	list_add_tail(&waiter.list, &sem->read_wait);
222	sem->wait_readers++;
223
224	waiter.task = tsk;
225	get_task_struct(tsk);
226
227	/* if there are no active locks, wake the new lock owner(s) */
228	if ((count & LDSEM_ACTIVE_MASK) == 0)
229		__ldsem_wake(sem);
230
231	raw_spin_unlock_irq(&sem->wait_lock);
232
233	/* wait to be given the lock */
234	for (;;) {
235		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
236
237		if (!waiter.task)
238			break;
239		if (!timeout)
240			break;
241		timeout = schedule_timeout(timeout);
242	}
243
244	__set_task_state(tsk, TASK_RUNNING);
245
246	if (!timeout) {
247		/* lock timed out but check if this task was just
 
248		 * granted lock ownership - if so, pretend there
249		 * was no timeout; otherwise, cleanup lock wait */
 
250		raw_spin_lock_irq(&sem->wait_lock);
251		if (waiter.task) {
252			ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
 
253			list_del(&waiter.list);
254			raw_spin_unlock_irq(&sem->wait_lock);
255			put_task_struct(waiter.task);
256			return NULL;
257		}
258		raw_spin_unlock_irq(&sem->wait_lock);
259	}
260
261	return sem;
262}
263
264/*
265 * wait for the write lock to be granted
266 */
267static struct ld_semaphore __sched *
268down_write_failed(struct ld_semaphore *sem, long count, long timeout)
269{
270	struct ldsem_waiter waiter;
271	struct task_struct *tsk = current;
272	long adjust = -LDSEM_ACTIVE_BIAS;
273	int locked = 0;
274
275	/* set up my own style of waitqueue */
276	raw_spin_lock_irq(&sem->wait_lock);
277
278	/* Try to reverse the lock attempt but if the count has changed
 
279	 * so that reversing fails, check if the lock is now owned,
280	 * and early-out if so */
 
281	do {
282		if (ldsem_cmpxchg(&count, count + adjust, sem))
283			break;
284		if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
285			raw_spin_unlock_irq(&sem->wait_lock);
286			return sem;
287		}
288	} while (1);
289
290	list_add_tail(&waiter.list, &sem->write_wait);
291
292	waiter.task = tsk;
293
294	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
295	for (;;) {
296		if (!timeout)
297			break;
298		raw_spin_unlock_irq(&sem->wait_lock);
299		timeout = schedule_timeout(timeout);
300		raw_spin_lock_irq(&sem->wait_lock);
301		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
302		locked = writer_trylock(sem);
303		if (locked)
304			break;
305	}
306
307	if (!locked)
308		ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
309	list_del(&waiter.list);
 
 
 
 
 
 
 
 
 
 
310	raw_spin_unlock_irq(&sem->wait_lock);
311
312	__set_task_state(tsk, TASK_RUNNING);
313
314	/* lock wait may have timed out */
315	if (!locked)
316		return NULL;
317	return sem;
318}
319
320
321
322static int __ldsem_down_read_nested(struct ld_semaphore *sem,
323					   int subclass, long timeout)
324{
325	long count;
326
327	lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
328
329	count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
330	if (count <= 0) {
331		lock_stat(sem, contended);
332		if (!down_read_failed(sem, count, timeout)) {
333			lockdep_release(sem, 1, _RET_IP_);
334			return 0;
335		}
336	}
337	lock_stat(sem, acquired);
338	return 1;
339}
340
341static int __ldsem_down_write_nested(struct ld_semaphore *sem,
342					    int subclass, long timeout)
343{
344	long count;
345
346	lockdep_acquire(sem, subclass, 0, _RET_IP_);
347
348	count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
349	if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
350		lock_stat(sem, contended);
351		if (!down_write_failed(sem, count, timeout)) {
352			lockdep_release(sem, 1, _RET_IP_);
353			return 0;
354		}
355	}
356	lock_stat(sem, acquired);
357	return 1;
358}
359
360
361/*
362 * lock for reading -- returns 1 if successful, 0 if timed out
363 */
364int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
365{
366	might_sleep();
367	return __ldsem_down_read_nested(sem, 0, timeout);
368}
369
370/*
371 * trylock for reading -- returns 1 if successful, 0 if contention
372 */
373int ldsem_down_read_trylock(struct ld_semaphore *sem)
374{
375	long count = sem->count;
376
377	while (count >= 0) {
378		if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
379			lockdep_acquire_read(sem, 0, 1, _RET_IP_);
380			lock_stat(sem, acquired);
381			return 1;
382		}
383	}
384	return 0;
385}
386
387/*
388 * lock for writing -- returns 1 if successful, 0 if timed out
389 */
390int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
391{
392	might_sleep();
393	return __ldsem_down_write_nested(sem, 0, timeout);
394}
395
396/*
397 * trylock for writing -- returns 1 if successful, 0 if contention
398 */
399int ldsem_down_write_trylock(struct ld_semaphore *sem)
400{
401	long count = sem->count;
402
403	while ((count & LDSEM_ACTIVE_MASK) == 0) {
404		if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
405			lockdep_acquire(sem, 0, 1, _RET_IP_);
406			lock_stat(sem, acquired);
407			return 1;
408		}
409	}
410	return 0;
411}
412
413/*
414 * release a read lock
415 */
416void ldsem_up_read(struct ld_semaphore *sem)
417{
418	long count;
419
420	lockdep_release(sem, 1, _RET_IP_);
421
422	count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
423	if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
424		ldsem_wake(sem);
425}
426
427/*
428 * release a write lock
429 */
430void ldsem_up_write(struct ld_semaphore *sem)
431{
432	long count;
433
434	lockdep_release(sem, 1, _RET_IP_);
435
436	count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
437	if (count < 0)
438		ldsem_wake(sem);
439}
440
441
442#ifdef CONFIG_DEBUG_LOCK_ALLOC
443
444int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
445{
446	might_sleep();
447	return __ldsem_down_read_nested(sem, subclass, timeout);
448}
449
450int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
451			    long timeout)
452{
453	might_sleep();
454	return __ldsem_down_write_nested(sem, subclass, timeout);
455}
456
457#endif