Loading...
1/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
5#ifndef _ASM_PARISC_ATOMIC_H_
6#define _ASM_PARISC_ATOMIC_H_
7
8#include <linux/types.h>
9#include <asm/cmpxchg.h>
10
11/*
12 * Atomic operations that C can't guarantee us. Useful for
13 * resource counting etc..
14 *
15 * And probably incredibly slow on parisc. OTOH, we don't
16 * have to write any serious assembly. prumpf
17 */
18
19#ifdef CONFIG_SMP
20#include <asm/spinlock.h>
21#include <asm/cache.h> /* we use L1_CACHE_BYTES */
22
23/* Use an array of spinlocks for our atomic_ts.
24 * Hash function to index into a different SPINLOCK.
25 * Since "a" is usually an address, use one spinlock per cacheline.
26 */
27# define ATOMIC_HASH_SIZE 4
28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29
30extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
31
32/* Can't use raw_spin_lock_irq because of #include problems, so
33 * this is the substitute */
34#define _atomic_spin_lock_irqsave(l,f) do { \
35 arch_spinlock_t *s = ATOMIC_HASH(l); \
36 local_irq_save(f); \
37 arch_spin_lock(s); \
38} while(0)
39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \
41 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 arch_spin_unlock(s); \
43 local_irq_restore(f); \
44} while(0)
45
46
47#else
48# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
49# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
50#endif
51
52/*
53 * Note that we need not lock read accesses - aligned word writes/reads
54 * are atomic, so a reader never sees inconsistent values.
55 */
56
57/* It's possible to reduce all atomic operations to either
58 * __atomic_add_return, atomic_set and atomic_read (the latter
59 * is there only for consistency).
60 */
61
62static __inline__ int __atomic_add_return(int i, atomic_t *v)
63{
64 int ret;
65 unsigned long flags;
66 _atomic_spin_lock_irqsave(v, flags);
67
68 ret = (v->counter += i);
69
70 _atomic_spin_unlock_irqrestore(v, flags);
71 return ret;
72}
73
74static __inline__ void atomic_set(atomic_t *v, int i)
75{
76 unsigned long flags;
77 _atomic_spin_lock_irqsave(v, flags);
78
79 v->counter = i;
80
81 _atomic_spin_unlock_irqrestore(v, flags);
82}
83
84static __inline__ int atomic_read(const atomic_t *v)
85{
86 return (*(volatile int *)&(v)->counter);
87}
88
89/* exported interface */
90#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
91#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
92
93/**
94 * __atomic_add_unless - add unless the number is a given value
95 * @v: pointer of type atomic_t
96 * @a: the amount to add to v...
97 * @u: ...unless v is equal to u.
98 *
99 * Atomically adds @a to @v, so long as it was not @u.
100 * Returns the old value of @v.
101 */
102static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
103{
104 int c, old;
105 c = atomic_read(v);
106 for (;;) {
107 if (unlikely(c == (u)))
108 break;
109 old = atomic_cmpxchg((v), c, c + (a));
110 if (likely(old == c))
111 break;
112 c = old;
113 }
114 return c;
115}
116
117
118#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
119#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v))))
120#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
121#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
122
123#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
124#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
125#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
126#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
127
128#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
129
130/*
131 * atomic_inc_and_test - increment and test
132 * @v: pointer of type atomic_t
133 *
134 * Atomically increments @v by 1
135 * and returns true if the result is zero, or false for all
136 * other cases.
137 */
138#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
139
140#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
141
142#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
143
144#define ATOMIC_INIT(i) { (i) }
145
146#define smp_mb__before_atomic_dec() smp_mb()
147#define smp_mb__after_atomic_dec() smp_mb()
148#define smp_mb__before_atomic_inc() smp_mb()
149#define smp_mb__after_atomic_inc() smp_mb()
150
151#ifdef CONFIG_64BIT
152
153#define ATOMIC64_INIT(i) { (i) }
154
155static __inline__ s64
156__atomic64_add_return(s64 i, atomic64_t *v)
157{
158 s64 ret;
159 unsigned long flags;
160 _atomic_spin_lock_irqsave(v, flags);
161
162 ret = (v->counter += i);
163
164 _atomic_spin_unlock_irqrestore(v, flags);
165 return ret;
166}
167
168static __inline__ void
169atomic64_set(atomic64_t *v, s64 i)
170{
171 unsigned long flags;
172 _atomic_spin_lock_irqsave(v, flags);
173
174 v->counter = i;
175
176 _atomic_spin_unlock_irqrestore(v, flags);
177}
178
179static __inline__ s64
180atomic64_read(const atomic64_t *v)
181{
182 return (*(volatile long *)&(v)->counter);
183}
184
185#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
186#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
187#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
188#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
189
190#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
191#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
192#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
193#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
194
195#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
196
197#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
198#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
199#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
200
201/* exported interface */
202#define atomic64_cmpxchg(v, o, n) \
203 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
204#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
205
206/**
207 * atomic64_add_unless - add unless the number is a given value
208 * @v: pointer of type atomic64_t
209 * @a: the amount to add to v...
210 * @u: ...unless v is equal to u.
211 *
212 * Atomically adds @a to @v, so long as it was not @u.
213 * Returns the old value of @v.
214 */
215static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
216{
217 long c, old;
218 c = atomic64_read(v);
219 for (;;) {
220 if (unlikely(c == (u)))
221 break;
222 old = atomic64_cmpxchg((v), c, c + (a));
223 if (likely(old == c))
224 break;
225 c = old;
226 }
227 return c != (u);
228}
229
230#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
231
232/*
233 * atomic64_dec_if_positive - decrement by 1 if old value positive
234 * @v: pointer of type atomic_t
235 *
236 * The function returns the old value of *v minus 1, even if
237 * the atomic variable, v, was not decremented.
238 */
239static inline long atomic64_dec_if_positive(atomic64_t *v)
240{
241 long c, old, dec;
242 c = atomic64_read(v);
243 for (;;) {
244 dec = c - 1;
245 if (unlikely(dec < 0))
246 break;
247 old = atomic64_cmpxchg((v), c, dec);
248 if (likely(old == c))
249 break;
250 c = old;
251 }
252 return dec;
253}
254
255#endif /* !CONFIG_64BIT */
256
257
258#endif /* _ASM_PARISC_ATOMIC_H_ */
1/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
5#ifndef _ASM_PARISC_ATOMIC_H_
6#define _ASM_PARISC_ATOMIC_H_
7
8#include <linux/types.h>
9#include <asm/cmpxchg.h>
10#include <asm/barrier.h>
11
12/*
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
15 *
16 * And probably incredibly slow on parisc. OTOH, we don't
17 * have to write any serious assembly. prumpf
18 */
19
20#ifdef CONFIG_SMP
21#include <asm/spinlock.h>
22#include <asm/cache.h> /* we use L1_CACHE_BYTES */
23
24/* Use an array of spinlocks for our atomic_ts.
25 * Hash function to index into a different SPINLOCK.
26 * Since "a" is usually an address, use one spinlock per cacheline.
27 */
28# define ATOMIC_HASH_SIZE 4
29# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
30
31extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
32
33/* Can't use raw_spin_lock_irq because of #include problems, so
34 * this is the substitute */
35#define _atomic_spin_lock_irqsave(l,f) do { \
36 arch_spinlock_t *s = ATOMIC_HASH(l); \
37 local_irq_save(f); \
38 arch_spin_lock(s); \
39} while(0)
40
41#define _atomic_spin_unlock_irqrestore(l,f) do { \
42 arch_spinlock_t *s = ATOMIC_HASH(l); \
43 arch_spin_unlock(s); \
44 local_irq_restore(f); \
45} while(0)
46
47
48#else
49# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51#endif
52
53/*
54 * Note that we need not lock read accesses - aligned word writes/reads
55 * are atomic, so a reader never sees inconsistent values.
56 */
57
58static __inline__ void atomic_set(atomic_t *v, int i)
59{
60 unsigned long flags;
61 _atomic_spin_lock_irqsave(v, flags);
62
63 v->counter = i;
64
65 _atomic_spin_unlock_irqrestore(v, flags);
66}
67
68static __inline__ int atomic_read(const atomic_t *v)
69{
70 return READ_ONCE((v)->counter);
71}
72
73/* exported interface */
74#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
75#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
76
77/**
78 * __atomic_add_unless - add unless the number is a given value
79 * @v: pointer of type atomic_t
80 * @a: the amount to add to v...
81 * @u: ...unless v is equal to u.
82 *
83 * Atomically adds @a to @v, so long as it was not @u.
84 * Returns the old value of @v.
85 */
86static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
87{
88 int c, old;
89 c = atomic_read(v);
90 for (;;) {
91 if (unlikely(c == (u)))
92 break;
93 old = atomic_cmpxchg((v), c, c + (a));
94 if (likely(old == c))
95 break;
96 c = old;
97 }
98 return c;
99}
100
101#define ATOMIC_OP(op, c_op) \
102static __inline__ void atomic_##op(int i, atomic_t *v) \
103{ \
104 unsigned long flags; \
105 \
106 _atomic_spin_lock_irqsave(v, flags); \
107 v->counter c_op i; \
108 _atomic_spin_unlock_irqrestore(v, flags); \
109} \
110
111#define ATOMIC_OP_RETURN(op, c_op) \
112static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
113{ \
114 unsigned long flags; \
115 int ret; \
116 \
117 _atomic_spin_lock_irqsave(v, flags); \
118 ret = (v->counter c_op i); \
119 _atomic_spin_unlock_irqrestore(v, flags); \
120 \
121 return ret; \
122}
123
124#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
125
126ATOMIC_OPS(add, +=)
127ATOMIC_OPS(sub, -=)
128
129ATOMIC_OP(and, &=)
130ATOMIC_OP(or, |=)
131ATOMIC_OP(xor, ^=)
132
133#undef ATOMIC_OPS
134#undef ATOMIC_OP_RETURN
135#undef ATOMIC_OP
136
137#define atomic_inc(v) (atomic_add( 1,(v)))
138#define atomic_dec(v) (atomic_add( -1,(v)))
139
140#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
141#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
142
143#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
144
145/*
146 * atomic_inc_and_test - increment and test
147 * @v: pointer of type atomic_t
148 *
149 * Atomically increments @v by 1
150 * and returns true if the result is zero, or false for all
151 * other cases.
152 */
153#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
154
155#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
156
157#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
158
159#define ATOMIC_INIT(i) { (i) }
160
161#ifdef CONFIG_64BIT
162
163#define ATOMIC64_INIT(i) { (i) }
164
165#define ATOMIC64_OP(op, c_op) \
166static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
167{ \
168 unsigned long flags; \
169 \
170 _atomic_spin_lock_irqsave(v, flags); \
171 v->counter c_op i; \
172 _atomic_spin_unlock_irqrestore(v, flags); \
173} \
174
175#define ATOMIC64_OP_RETURN(op, c_op) \
176static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
177{ \
178 unsigned long flags; \
179 s64 ret; \
180 \
181 _atomic_spin_lock_irqsave(v, flags); \
182 ret = (v->counter c_op i); \
183 _atomic_spin_unlock_irqrestore(v, flags); \
184 \
185 return ret; \
186}
187
188#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
189
190ATOMIC64_OPS(add, +=)
191ATOMIC64_OPS(sub, -=)
192ATOMIC64_OP(and, &=)
193ATOMIC64_OP(or, |=)
194ATOMIC64_OP(xor, ^=)
195
196#undef ATOMIC64_OPS
197#undef ATOMIC64_OP_RETURN
198#undef ATOMIC64_OP
199
200static __inline__ void
201atomic64_set(atomic64_t *v, s64 i)
202{
203 unsigned long flags;
204 _atomic_spin_lock_irqsave(v, flags);
205
206 v->counter = i;
207
208 _atomic_spin_unlock_irqrestore(v, flags);
209}
210
211static __inline__ s64
212atomic64_read(const atomic64_t *v)
213{
214 return ACCESS_ONCE((v)->counter);
215}
216
217#define atomic64_inc(v) (atomic64_add( 1,(v)))
218#define atomic64_dec(v) (atomic64_add( -1,(v)))
219
220#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
221#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
222
223#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
224
225#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
226#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
227#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
228
229/* exported interface */
230#define atomic64_cmpxchg(v, o, n) \
231 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
232#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
233
234/**
235 * atomic64_add_unless - add unless the number is a given value
236 * @v: pointer of type atomic64_t
237 * @a: the amount to add to v...
238 * @u: ...unless v is equal to u.
239 *
240 * Atomically adds @a to @v, so long as it was not @u.
241 * Returns the old value of @v.
242 */
243static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
244{
245 long c, old;
246 c = atomic64_read(v);
247 for (;;) {
248 if (unlikely(c == (u)))
249 break;
250 old = atomic64_cmpxchg((v), c, c + (a));
251 if (likely(old == c))
252 break;
253 c = old;
254 }
255 return c != (u);
256}
257
258#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
259
260/*
261 * atomic64_dec_if_positive - decrement by 1 if old value positive
262 * @v: pointer of type atomic_t
263 *
264 * The function returns the old value of *v minus 1, even if
265 * the atomic variable, v, was not decremented.
266 */
267static inline long atomic64_dec_if_positive(atomic64_t *v)
268{
269 long c, old, dec;
270 c = atomic64_read(v);
271 for (;;) {
272 dec = c - 1;
273 if (unlikely(dec < 0))
274 break;
275 old = atomic64_cmpxchg((v), c, dec);
276 if (likely(old == c))
277 break;
278 c = old;
279 }
280 return dec;
281}
282
283#endif /* !CONFIG_64BIT */
284
285
286#endif /* _ASM_PARISC_ATOMIC_H_ */