Loading...
Note: File does not exist in v6.8.
1/*
2 * R/W semaphores for ia64
3 *
4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2005 Christoph Lameter <cl@linux.com>
7 *
8 * Based on asm-i386/rwsem.h and other architecture implementation.
9 *
10 * The MSW of the count is the negated number of active writers and
11 * waiting lockers, and the LSW is the total number of active locks.
12 *
13 * The lock count is initialized to 0 (no active and no waiting lockers).
14 *
15 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
16 * the case of an uncontended lock. Readers increment by 1 and see a positive
17 * value when uncontended, negative if there are writers (and maybe) readers
18 * waiting (in which case it goes to sleep).
19 */
20
21#ifndef _ASM_IA64_RWSEM_H
22#define _ASM_IA64_RWSEM_H
23
24#ifndef _LINUX_RWSEM_H
25#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
26#endif
27
28#include <asm/intrinsics.h>
29
30#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
31#define RWSEM_ACTIVE_BIAS (1L)
32#define RWSEM_ACTIVE_MASK (0xffffffffL)
33#define RWSEM_WAITING_BIAS (-0x100000000L)
34#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
35#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
36
37/*
38 * lock for reading
39 */
40static inline void
41__down_read (struct rw_semaphore *sem)
42{
43 long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
44
45 if (result < 0)
46 rwsem_down_read_failed(sem);
47}
48
49/*
50 * lock for writing
51 */
52static inline long
53___down_write (struct rw_semaphore *sem)
54{
55 long old, new;
56
57 do {
58 old = atomic_long_read(&sem->count);
59 new = old + RWSEM_ACTIVE_WRITE_BIAS;
60 } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
61
62 return old;
63}
64
65static inline void
66__down_write (struct rw_semaphore *sem)
67{
68 if (___down_write(sem))
69 rwsem_down_write_failed(sem);
70}
71
72static inline int
73__down_write_killable (struct rw_semaphore *sem)
74{
75 if (___down_write(sem))
76 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
77 return -EINTR;
78
79 return 0;
80}
81
82/*
83 * unlock after reading
84 */
85static inline void
86__up_read (struct rw_semaphore *sem)
87{
88 long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
89
90 if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
91 rwsem_wake(sem);
92}
93
94/*
95 * unlock after writing
96 */
97static inline void
98__up_write (struct rw_semaphore *sem)
99{
100 long old, new;
101
102 do {
103 old = atomic_long_read(&sem->count);
104 new = old - RWSEM_ACTIVE_WRITE_BIAS;
105 } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
106
107 if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
108 rwsem_wake(sem);
109}
110
111/*
112 * trylock for reading -- returns 1 if successful, 0 if contention
113 */
114static inline int
115__down_read_trylock (struct rw_semaphore *sem)
116{
117 long tmp;
118 while ((tmp = atomic_long_read(&sem->count)) >= 0) {
119 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
120 return 1;
121 }
122 }
123 return 0;
124}
125
126/*
127 * trylock for writing -- returns 1 if successful, 0 if contention
128 */
129static inline int
130__down_write_trylock (struct rw_semaphore *sem)
131{
132 long tmp = atomic_long_cmpxchg_acquire(&sem->count,
133 RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
134 return tmp == RWSEM_UNLOCKED_VALUE;
135}
136
137/*
138 * downgrade write lock to read lock
139 */
140static inline void
141__downgrade_write (struct rw_semaphore *sem)
142{
143 long old, new;
144
145 do {
146 old = atomic_long_read(&sem->count);
147 new = old - RWSEM_WAITING_BIAS;
148 } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
149
150 if (old < 0)
151 rwsem_downgrade_wake(sem);
152}
153
154#endif /* _ASM_IA64_RWSEM_H */