Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * R/W semaphores for ia64
  3 *
  4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
  5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
  6 * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
  7 *
  8 * Based on asm-i386/rwsem.h and other architecture implementation.
  9 *
 10 * The MSW of the count is the negated number of active writers and
 11 * waiting lockers, and the LSW is the total number of active locks.
 12 *
 13 * The lock count is initialized to 0 (no active and no waiting lockers).
 14 *
 15 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
 16 * the case of an uncontended lock. Readers increment by 1 and see a positive
 17 * value when uncontended, negative if there are writers (and maybe) readers
 18 * waiting (in which case it goes to sleep).
 19 */
 20
 21#ifndef _ASM_IA64_RWSEM_H
 22#define _ASM_IA64_RWSEM_H
 23
 24#ifndef _LINUX_RWSEM_H
 25#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
 26#endif
 27
 28#include <asm/intrinsics.h>
 29
 30#define RWSEM_UNLOCKED_VALUE		__IA64_UL_CONST(0x0000000000000000)
 31#define RWSEM_ACTIVE_BIAS		(1L)
 32#define RWSEM_ACTIVE_MASK		(0xffffffffL)
 33#define RWSEM_WAITING_BIAS		(-0x100000000L)
 34#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
 35#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 36
 37/*
 38 * lock for reading
 39 */
 
 
 
 
 
 
 
 
 40static inline void
 41__down_read (struct rw_semaphore *sem)
 42{
 43	long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
 44
 45	if (result < 0)
 46		rwsem_down_read_failed(sem);
 47}
 48
 
 
 
 
 
 
 
 
 
 
 49/*
 50 * lock for writing
 51 */
 52static inline void
 53__down_write (struct rw_semaphore *sem)
 54{
 55	long old, new;
 56
 57	do {
 58		old = sem->count;
 59		new = old + RWSEM_ACTIVE_WRITE_BIAS;
 60	} while (cmpxchg_acq(&sem->count, old, new) != old);
 61
 62	if (old != 0)
 
 
 
 
 
 
 63		rwsem_down_write_failed(sem);
 64}
 65
 
 
 
 
 
 
 
 
 
 
 
 66/*
 67 * unlock after reading
 68 */
 69static inline void
 70__up_read (struct rw_semaphore *sem)
 71{
 72	long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
 73
 74	if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
 75		rwsem_wake(sem);
 76}
 77
 78/*
 79 * unlock after writing
 80 */
 81static inline void
 82__up_write (struct rw_semaphore *sem)
 83{
 84	long old, new;
 85
 86	do {
 87		old = sem->count;
 88		new = old - RWSEM_ACTIVE_WRITE_BIAS;
 89	} while (cmpxchg_rel(&sem->count, old, new) != old);
 90
 91	if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
 92		rwsem_wake(sem);
 93}
 94
 95/*
 96 * trylock for reading -- returns 1 if successful, 0 if contention
 97 */
 98static inline int
 99__down_read_trylock (struct rw_semaphore *sem)
100{
101	long tmp;
102	while ((tmp = sem->count) >= 0) {
103		if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
104			return 1;
105		}
106	}
107	return 0;
108}
109
110/*
111 * trylock for writing -- returns 1 if successful, 0 if contention
112 */
113static inline int
114__down_write_trylock (struct rw_semaphore *sem)
115{
116	long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
117			      RWSEM_ACTIVE_WRITE_BIAS);
118	return tmp == RWSEM_UNLOCKED_VALUE;
119}
120
121/*
122 * downgrade write lock to read lock
123 */
124static inline void
125__downgrade_write (struct rw_semaphore *sem)
126{
127	long old, new;
128
129	do {
130		old = sem->count;
131		new = old - RWSEM_WAITING_BIAS;
132	} while (cmpxchg_rel(&sem->count, old, new) != old);
133
134	if (old < 0)
135		rwsem_downgrade_wake(sem);
136}
137
138/*
139 * Implement atomic add functionality.  These used to be "inline" functions, but GCC v3.1
140 * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
141 */
142#define rwsem_atomic_add(delta, sem)	atomic64_add(delta, (atomic64_t *)(&(sem)->count))
143#define rwsem_atomic_update(delta, sem)	atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
144
145#endif /* _ASM_IA64_RWSEM_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * R/W semaphores for ia64
  4 *
  5 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
  6 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
  7 * Copyright (C) 2005 Christoph Lameter <cl@linux.com>
  8 *
  9 * Based on asm-i386/rwsem.h and other architecture implementation.
 10 *
 11 * The MSW of the count is the negated number of active writers and
 12 * waiting lockers, and the LSW is the total number of active locks.
 13 *
 14 * The lock count is initialized to 0 (no active and no waiting lockers).
 15 *
 16 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
 17 * the case of an uncontended lock. Readers increment by 1 and see a positive
 18 * value when uncontended, negative if there are writers (and maybe) readers
 19 * waiting (in which case it goes to sleep).
 20 */
 21
 22#ifndef _ASM_IA64_RWSEM_H
 23#define _ASM_IA64_RWSEM_H
 24
 25#ifndef _LINUX_RWSEM_H
 26#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
 27#endif
 28
 29#include <asm/intrinsics.h>
 30
 31#define RWSEM_UNLOCKED_VALUE		__IA64_UL_CONST(0x0000000000000000)
 32#define RWSEM_ACTIVE_BIAS		(1L)
 33#define RWSEM_ACTIVE_MASK		(0xffffffffL)
 34#define RWSEM_WAITING_BIAS		(-0x100000000L)
 35#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
 36#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 37
 38/*
 39 * lock for reading
 40 */
 41static inline int
 42___down_read (struct rw_semaphore *sem)
 43{
 44	long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
 45
 46	return (result < 0);
 47}
 48
 49static inline void
 50__down_read (struct rw_semaphore *sem)
 51{
 52	if (___down_read(sem))
 
 
 53		rwsem_down_read_failed(sem);
 54}
 55
 56static inline int
 57__down_read_killable (struct rw_semaphore *sem)
 58{
 59	if (___down_read(sem))
 60		if (IS_ERR(rwsem_down_read_failed_killable(sem)))
 61			return -EINTR;
 62
 63	return 0;
 64}
 65
 66/*
 67 * lock for writing
 68 */
 69static inline long
 70___down_write (struct rw_semaphore *sem)
 71{
 72	long old, new;
 73
 74	do {
 75		old = atomic_long_read(&sem->count);
 76		new = old + RWSEM_ACTIVE_WRITE_BIAS;
 77	} while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
 78
 79	return old;
 80}
 81
 82static inline void
 83__down_write (struct rw_semaphore *sem)
 84{
 85	if (___down_write(sem))
 86		rwsem_down_write_failed(sem);
 87}
 88
 89static inline int
 90__down_write_killable (struct rw_semaphore *sem)
 91{
 92	if (___down_write(sem)) {
 93		if (IS_ERR(rwsem_down_write_failed_killable(sem)))
 94			return -EINTR;
 95	}
 96
 97	return 0;
 98}
 99
100/*
101 * unlock after reading
102 */
103static inline void
104__up_read (struct rw_semaphore *sem)
105{
106	long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
107
108	if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
109		rwsem_wake(sem);
110}
111
112/*
113 * unlock after writing
114 */
115static inline void
116__up_write (struct rw_semaphore *sem)
117{
118	long old, new;
119
120	do {
121		old = atomic_long_read(&sem->count);
122		new = old - RWSEM_ACTIVE_WRITE_BIAS;
123	} while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
124
125	if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
126		rwsem_wake(sem);
127}
128
129/*
130 * trylock for reading -- returns 1 if successful, 0 if contention
131 */
132static inline int
133__down_read_trylock (struct rw_semaphore *sem)
134{
135	long tmp;
136	while ((tmp = atomic_long_read(&sem->count)) >= 0) {
137		if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
138			return 1;
139		}
140	}
141	return 0;
142}
143
144/*
145 * trylock for writing -- returns 1 if successful, 0 if contention
146 */
147static inline int
148__down_write_trylock (struct rw_semaphore *sem)
149{
150	long tmp = atomic_long_cmpxchg_acquire(&sem->count,
151			RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
152	return tmp == RWSEM_UNLOCKED_VALUE;
153}
154
155/*
156 * downgrade write lock to read lock
157 */
158static inline void
159__downgrade_write (struct rw_semaphore *sem)
160{
161	long old, new;
162
163	do {
164		old = atomic_long_read(&sem->count);
165		new = old - RWSEM_WAITING_BIAS;
166	} while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
167
168	if (old < 0)
169		rwsem_downgrade_wake(sem);
170}
 
 
 
 
 
 
 
171
172#endif /* _ASM_IA64_RWSEM_H */