Loading...
1#ifndef _ASM_IA64_ATOMIC_H
2#define _ASM_IA64_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
10 * of the macros.
11 *
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 */
15#include <linux/types.h>
16
17#include <asm/intrinsics.h>
18
19
20#define ATOMIC_INIT(i) { (i) }
21#define ATOMIC64_INIT(i) { (i) }
22
23#define atomic_read(v) (*(volatile int *)&(v)->counter)
24#define atomic64_read(v) (*(volatile long *)&(v)->counter)
25
26#define atomic_set(v,i) (((v)->counter) = (i))
27#define atomic64_set(v,i) (((v)->counter) = (i))
28
29static __inline__ int
30ia64_atomic_add (int i, atomic_t *v)
31{
32 __s32 old, new;
33 CMPXCHG_BUGCHECK_DECL
34
35 do {
36 CMPXCHG_BUGCHECK(v);
37 old = atomic_read(v);
38 new = old + i;
39 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
40 return new;
41}
42
43static __inline__ long
44ia64_atomic64_add (__s64 i, atomic64_t *v)
45{
46 __s64 old, new;
47 CMPXCHG_BUGCHECK_DECL
48
49 do {
50 CMPXCHG_BUGCHECK(v);
51 old = atomic64_read(v);
52 new = old + i;
53 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
54 return new;
55}
56
57static __inline__ int
58ia64_atomic_sub (int i, atomic_t *v)
59{
60 __s32 old, new;
61 CMPXCHG_BUGCHECK_DECL
62
63 do {
64 CMPXCHG_BUGCHECK(v);
65 old = atomic_read(v);
66 new = old - i;
67 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
68 return new;
69}
70
71static __inline__ long
72ia64_atomic64_sub (__s64 i, atomic64_t *v)
73{
74 __s64 old, new;
75 CMPXCHG_BUGCHECK_DECL
76
77 do {
78 CMPXCHG_BUGCHECK(v);
79 old = atomic64_read(v);
80 new = old - i;
81 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
82 return new;
83}
84
85#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
86#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
87
88#define atomic64_cmpxchg(v, old, new) \
89 (cmpxchg(&((v)->counter), old, new))
90#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
91
92static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
93{
94 int c, old;
95 c = atomic_read(v);
96 for (;;) {
97 if (unlikely(c == (u)))
98 break;
99 old = atomic_cmpxchg((v), c, c + (a));
100 if (likely(old == c))
101 break;
102 c = old;
103 }
104 return c;
105}
106
107
108static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
109{
110 long c, old;
111 c = atomic64_read(v);
112 for (;;) {
113 if (unlikely(c == (u)))
114 break;
115 old = atomic64_cmpxchg((v), c, c + (a));
116 if (likely(old == c))
117 break;
118 c = old;
119 }
120 return c != (u);
121}
122
123#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
124
125#define atomic_add_return(i,v) \
126({ \
127 int __ia64_aar_i = (i); \
128 (__builtin_constant_p(i) \
129 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
130 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
131 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
132 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
133 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
134 : ia64_atomic_add(__ia64_aar_i, v); \
135})
136
137#define atomic64_add_return(i,v) \
138({ \
139 long __ia64_aar_i = (i); \
140 (__builtin_constant_p(i) \
141 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
142 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
143 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
144 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
145 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
146 : ia64_atomic64_add(__ia64_aar_i, v); \
147})
148
149/*
150 * Atomically add I to V and return TRUE if the resulting value is
151 * negative.
152 */
153static __inline__ int
154atomic_add_negative (int i, atomic_t *v)
155{
156 return atomic_add_return(i, v) < 0;
157}
158
159static __inline__ long
160atomic64_add_negative (__s64 i, atomic64_t *v)
161{
162 return atomic64_add_return(i, v) < 0;
163}
164
165#define atomic_sub_return(i,v) \
166({ \
167 int __ia64_asr_i = (i); \
168 (__builtin_constant_p(i) \
169 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
170 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
171 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
172 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
173 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
174 : ia64_atomic_sub(__ia64_asr_i, v); \
175})
176
177#define atomic64_sub_return(i,v) \
178({ \
179 long __ia64_asr_i = (i); \
180 (__builtin_constant_p(i) \
181 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
182 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
183 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
184 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
185 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
186 : ia64_atomic64_sub(__ia64_asr_i, v); \
187})
188
189#define atomic_dec_return(v) atomic_sub_return(1, (v))
190#define atomic_inc_return(v) atomic_add_return(1, (v))
191#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
192#define atomic64_inc_return(v) atomic64_add_return(1, (v))
193
194#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
195#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
196#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
197#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
198#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
199#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
200
201#define atomic_add(i,v) atomic_add_return((i), (v))
202#define atomic_sub(i,v) atomic_sub_return((i), (v))
203#define atomic_inc(v) atomic_add(1, (v))
204#define atomic_dec(v) atomic_sub(1, (v))
205
206#define atomic64_add(i,v) atomic64_add_return((i), (v))
207#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
208#define atomic64_inc(v) atomic64_add(1, (v))
209#define atomic64_dec(v) atomic64_sub(1, (v))
210
211/* Atomic operations are already serializing */
212#define smp_mb__before_atomic_dec() barrier()
213#define smp_mb__after_atomic_dec() barrier()
214#define smp_mb__before_atomic_inc() barrier()
215#define smp_mb__after_atomic_inc() barrier()
216
217#endif /* _ASM_IA64_ATOMIC_H */
1#ifndef _ASM_IA64_ATOMIC_H
2#define _ASM_IA64_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
10 * of the macros.
11 *
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 */
15#include <linux/types.h>
16
17#include <asm/intrinsics.h>
18#include <asm/barrier.h>
19
20
21#define ATOMIC_INIT(i) { (i) }
22#define ATOMIC64_INIT(i) { (i) }
23
24#define atomic_read(v) READ_ONCE((v)->counter)
25#define atomic64_read(v) READ_ONCE((v)->counter)
26
27#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
28#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
29
30#define ATOMIC_OP(op, c_op) \
31static __inline__ int \
32ia64_atomic_##op (int i, atomic_t *v) \
33{ \
34 __s32 old, new; \
35 CMPXCHG_BUGCHECK_DECL \
36 \
37 do { \
38 CMPXCHG_BUGCHECK(v); \
39 old = atomic_read(v); \
40 new = old c_op i; \
41 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 return new; \
43}
44
45ATOMIC_OP(add, +)
46ATOMIC_OP(sub, -)
47
48#define atomic_add_return(i,v) \
49({ \
50 int __ia64_aar_i = (i); \
51 (__builtin_constant_p(i) \
52 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
53 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
54 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
55 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
56 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
57 : ia64_atomic_add(__ia64_aar_i, v); \
58})
59
60#define atomic_sub_return(i,v) \
61({ \
62 int __ia64_asr_i = (i); \
63 (__builtin_constant_p(i) \
64 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
65 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
66 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
67 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
68 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
69 : ia64_atomic_sub(__ia64_asr_i, v); \
70})
71
72ATOMIC_OP(and, &)
73ATOMIC_OP(or, |)
74ATOMIC_OP(xor, ^)
75
76#define atomic_and(i,v) (void)ia64_atomic_and(i,v)
77#define atomic_or(i,v) (void)ia64_atomic_or(i,v)
78#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v)
79
80#undef ATOMIC_OP
81
82#define ATOMIC64_OP(op, c_op) \
83static __inline__ long \
84ia64_atomic64_##op (__s64 i, atomic64_t *v) \
85{ \
86 __s64 old, new; \
87 CMPXCHG_BUGCHECK_DECL \
88 \
89 do { \
90 CMPXCHG_BUGCHECK(v); \
91 old = atomic64_read(v); \
92 new = old c_op i; \
93 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
94 return new; \
95}
96
97ATOMIC64_OP(add, +)
98ATOMIC64_OP(sub, -)
99
100#define atomic64_add_return(i,v) \
101({ \
102 long __ia64_aar_i = (i); \
103 (__builtin_constant_p(i) \
104 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
105 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
106 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
107 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
108 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
109 : ia64_atomic64_add(__ia64_aar_i, v); \
110})
111
112#define atomic64_sub_return(i,v) \
113({ \
114 long __ia64_asr_i = (i); \
115 (__builtin_constant_p(i) \
116 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
117 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
118 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
119 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
120 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
121 : ia64_atomic64_sub(__ia64_asr_i, v); \
122})
123
124ATOMIC64_OP(and, &)
125ATOMIC64_OP(or, |)
126ATOMIC64_OP(xor, ^)
127
128#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v)
129#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v)
130#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v)
131
132#undef ATOMIC64_OP
133
134#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
135#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
136
137#define atomic64_cmpxchg(v, old, new) \
138 (cmpxchg(&((v)->counter), old, new))
139#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
140
141static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
142{
143 int c, old;
144 c = atomic_read(v);
145 for (;;) {
146 if (unlikely(c == (u)))
147 break;
148 old = atomic_cmpxchg((v), c, c + (a));
149 if (likely(old == c))
150 break;
151 c = old;
152 }
153 return c;
154}
155
156
157static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
158{
159 long c, old;
160 c = atomic64_read(v);
161 for (;;) {
162 if (unlikely(c == (u)))
163 break;
164 old = atomic64_cmpxchg((v), c, c + (a));
165 if (likely(old == c))
166 break;
167 c = old;
168 }
169 return c != (u);
170}
171
172#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
173
174/*
175 * Atomically add I to V and return TRUE if the resulting value is
176 * negative.
177 */
178static __inline__ int
179atomic_add_negative (int i, atomic_t *v)
180{
181 return atomic_add_return(i, v) < 0;
182}
183
184static __inline__ long
185atomic64_add_negative (__s64 i, atomic64_t *v)
186{
187 return atomic64_add_return(i, v) < 0;
188}
189
190#define atomic_dec_return(v) atomic_sub_return(1, (v))
191#define atomic_inc_return(v) atomic_add_return(1, (v))
192#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
193#define atomic64_inc_return(v) atomic64_add_return(1, (v))
194
195#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
196#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
197#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
198#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
199#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
200#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
201
202#define atomic_add(i,v) (void)atomic_add_return((i), (v))
203#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
204#define atomic_inc(v) atomic_add(1, (v))
205#define atomic_dec(v) atomic_sub(1, (v))
206
207#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
208#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
209#define atomic64_inc(v) atomic64_add(1, (v))
210#define atomic64_dec(v) atomic64_sub(1, (v))
211
212#endif /* _ASM_IA64_ATOMIC_H */