Loading...
1#ifndef _ASM_M32R_ATOMIC_H
2#define _ASM_M32R_ATOMIC_H
3
4/*
5 * linux/include/asm-m32r/atomic.h
6 *
7 * M32R version:
8 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
9 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
10 */
11
12#include <linux/types.h>
13#include <asm/assembler.h>
14#include <asm/cmpxchg.h>
15#include <asm/dcache_clear.h>
16
17/*
18 * Atomic operations that C can't guarantee us. Useful for
19 * resource counting etc..
20 */
21
22#define ATOMIC_INIT(i) { (i) }
23
24/**
25 * atomic_read - read atomic variable
26 * @v: pointer of type atomic_t
27 *
28 * Atomically reads the value of @v.
29 */
30#define atomic_read(v) (*(volatile int *)&(v)->counter)
31
32/**
33 * atomic_set - set atomic variable
34 * @v: pointer of type atomic_t
35 * @i: required value
36 *
37 * Atomically sets the value of @v to @i.
38 */
39#define atomic_set(v,i) (((v)->counter) = (i))
40
41/**
42 * atomic_add_return - add integer to atomic variable and return it
43 * @i: integer value to add
44 * @v: pointer of type atomic_t
45 *
46 * Atomically adds @i to @v and return (@i + @v).
47 */
48static __inline__ int atomic_add_return(int i, atomic_t *v)
49{
50 unsigned long flags;
51 int result;
52
53 local_irq_save(flags);
54 __asm__ __volatile__ (
55 "# atomic_add_return \n\t"
56 DCACHE_CLEAR("%0", "r4", "%1")
57 M32R_LOCK" %0, @%1; \n\t"
58 "add %0, %2; \n\t"
59 M32R_UNLOCK" %0, @%1; \n\t"
60 : "=&r" (result)
61 : "r" (&v->counter), "r" (i)
62 : "memory"
63#ifdef CONFIG_CHIP_M32700_TS1
64 , "r4"
65#endif /* CONFIG_CHIP_M32700_TS1 */
66 );
67 local_irq_restore(flags);
68
69 return result;
70}
71
72/**
73 * atomic_sub_return - subtract integer from atomic variable and return it
74 * @i: integer value to subtract
75 * @v: pointer of type atomic_t
76 *
77 * Atomically subtracts @i from @v and return (@v - @i).
78 */
79static __inline__ int atomic_sub_return(int i, atomic_t *v)
80{
81 unsigned long flags;
82 int result;
83
84 local_irq_save(flags);
85 __asm__ __volatile__ (
86 "# atomic_sub_return \n\t"
87 DCACHE_CLEAR("%0", "r4", "%1")
88 M32R_LOCK" %0, @%1; \n\t"
89 "sub %0, %2; \n\t"
90 M32R_UNLOCK" %0, @%1; \n\t"
91 : "=&r" (result)
92 : "r" (&v->counter), "r" (i)
93 : "memory"
94#ifdef CONFIG_CHIP_M32700_TS1
95 , "r4"
96#endif /* CONFIG_CHIP_M32700_TS1 */
97 );
98 local_irq_restore(flags);
99
100 return result;
101}
102
103/**
104 * atomic_add - add integer to atomic variable
105 * @i: integer value to add
106 * @v: pointer of type atomic_t
107 *
108 * Atomically adds @i to @v.
109 */
110#define atomic_add(i,v) ((void) atomic_add_return((i), (v)))
111
112/**
113 * atomic_sub - subtract the atomic variable
114 * @i: integer value to subtract
115 * @v: pointer of type atomic_t
116 *
117 * Atomically subtracts @i from @v.
118 */
119#define atomic_sub(i,v) ((void) atomic_sub_return((i), (v)))
120
121/**
122 * atomic_sub_and_test - subtract value from variable and test result
123 * @i: integer value to subtract
124 * @v: pointer of type atomic_t
125 *
126 * Atomically subtracts @i from @v and returns
127 * true if the result is zero, or false for all
128 * other cases.
129 */
130#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
131
132/**
133 * atomic_inc_return - increment atomic variable and return it
134 * @v: pointer of type atomic_t
135 *
136 * Atomically increments @v by 1 and returns the result.
137 */
138static __inline__ int atomic_inc_return(atomic_t *v)
139{
140 unsigned long flags;
141 int result;
142
143 local_irq_save(flags);
144 __asm__ __volatile__ (
145 "# atomic_inc_return \n\t"
146 DCACHE_CLEAR("%0", "r4", "%1")
147 M32R_LOCK" %0, @%1; \n\t"
148 "addi %0, #1; \n\t"
149 M32R_UNLOCK" %0, @%1; \n\t"
150 : "=&r" (result)
151 : "r" (&v->counter)
152 : "memory"
153#ifdef CONFIG_CHIP_M32700_TS1
154 , "r4"
155#endif /* CONFIG_CHIP_M32700_TS1 */
156 );
157 local_irq_restore(flags);
158
159 return result;
160}
161
162/**
163 * atomic_dec_return - decrement atomic variable and return it
164 * @v: pointer of type atomic_t
165 *
166 * Atomically decrements @v by 1 and returns the result.
167 */
168static __inline__ int atomic_dec_return(atomic_t *v)
169{
170 unsigned long flags;
171 int result;
172
173 local_irq_save(flags);
174 __asm__ __volatile__ (
175 "# atomic_dec_return \n\t"
176 DCACHE_CLEAR("%0", "r4", "%1")
177 M32R_LOCK" %0, @%1; \n\t"
178 "addi %0, #-1; \n\t"
179 M32R_UNLOCK" %0, @%1; \n\t"
180 : "=&r" (result)
181 : "r" (&v->counter)
182 : "memory"
183#ifdef CONFIG_CHIP_M32700_TS1
184 , "r4"
185#endif /* CONFIG_CHIP_M32700_TS1 */
186 );
187 local_irq_restore(flags);
188
189 return result;
190}
191
192/**
193 * atomic_inc - increment atomic variable
194 * @v: pointer of type atomic_t
195 *
196 * Atomically increments @v by 1.
197 */
198#define atomic_inc(v) ((void)atomic_inc_return(v))
199
200/**
201 * atomic_dec - decrement atomic variable
202 * @v: pointer of type atomic_t
203 *
204 * Atomically decrements @v by 1.
205 */
206#define atomic_dec(v) ((void)atomic_dec_return(v))
207
208/**
209 * atomic_inc_and_test - increment and test
210 * @v: pointer of type atomic_t
211 *
212 * Atomically increments @v by 1
213 * and returns true if the result is zero, or false for all
214 * other cases.
215 */
216#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
217
218/**
219 * atomic_dec_and_test - decrement and test
220 * @v: pointer of type atomic_t
221 *
222 * Atomically decrements @v by 1 and
223 * returns true if the result is 0, or false for all
224 * other cases.
225 */
226#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
227
228/**
229 * atomic_add_negative - add and test if negative
230 * @v: pointer of type atomic_t
231 * @i: integer value to add
232 *
233 * Atomically adds @i to @v and returns true
234 * if the result is negative, or false when
235 * result is greater than or equal to zero.
236 */
237#define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0)
238
239#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
240#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
241
242/**
243 * __atomic_add_unless - add unless the number is a given value
244 * @v: pointer of type atomic_t
245 * @a: the amount to add to v...
246 * @u: ...unless v is equal to u.
247 *
248 * Atomically adds @a to @v, so long as it was not @u.
249 * Returns the old value of @v.
250 */
251static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
252{
253 int c, old;
254 c = atomic_read(v);
255 for (;;) {
256 if (unlikely(c == (u)))
257 break;
258 old = atomic_cmpxchg((v), c, c + (a));
259 if (likely(old == c))
260 break;
261 c = old;
262 }
263 return c;
264}
265
266
267static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
268{
269 unsigned long flags;
270 unsigned long tmp;
271
272 local_irq_save(flags);
273 __asm__ __volatile__ (
274 "# atomic_clear_mask \n\t"
275 DCACHE_CLEAR("%0", "r5", "%1")
276 M32R_LOCK" %0, @%1; \n\t"
277 "and %0, %2; \n\t"
278 M32R_UNLOCK" %0, @%1; \n\t"
279 : "=&r" (tmp)
280 : "r" (addr), "r" (~mask)
281 : "memory"
282#ifdef CONFIG_CHIP_M32700_TS1
283 , "r5"
284#endif /* CONFIG_CHIP_M32700_TS1 */
285 );
286 local_irq_restore(flags);
287}
288
289static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
290{
291 unsigned long flags;
292 unsigned long tmp;
293
294 local_irq_save(flags);
295 __asm__ __volatile__ (
296 "# atomic_set_mask \n\t"
297 DCACHE_CLEAR("%0", "r5", "%1")
298 M32R_LOCK" %0, @%1; \n\t"
299 "or %0, %2; \n\t"
300 M32R_UNLOCK" %0, @%1; \n\t"
301 : "=&r" (tmp)
302 : "r" (addr), "r" (mask)
303 : "memory"
304#ifdef CONFIG_CHIP_M32700_TS1
305 , "r5"
306#endif /* CONFIG_CHIP_M32700_TS1 */
307 );
308 local_irq_restore(flags);
309}
310
311/* Atomic operations are already serializing on m32r */
312#define smp_mb__before_atomic_dec() barrier()
313#define smp_mb__after_atomic_dec() barrier()
314#define smp_mb__before_atomic_inc() barrier()
315#define smp_mb__after_atomic_inc() barrier()
316
317#endif /* _ASM_M32R_ATOMIC_H */
1#ifndef _ASM_M32R_ATOMIC_H
2#define _ASM_M32R_ATOMIC_H
3
4/*
5 * linux/include/asm-m32r/atomic.h
6 *
7 * M32R version:
8 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
9 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
10 */
11
12#include <linux/types.h>
13#include <asm/assembler.h>
14#include <asm/cmpxchg.h>
15#include <asm/dcache_clear.h>
16#include <asm/barrier.h>
17
18/*
19 * Atomic operations that C can't guarantee us. Useful for
20 * resource counting etc..
21 */
22
23#define ATOMIC_INIT(i) { (i) }
24
25/**
26 * atomic_read - read atomic variable
27 * @v: pointer of type atomic_t
28 *
29 * Atomically reads the value of @v.
30 */
31#define atomic_read(v) READ_ONCE((v)->counter)
32
33/**
34 * atomic_set - set atomic variable
35 * @v: pointer of type atomic_t
36 * @i: required value
37 *
38 * Atomically sets the value of @v to @i.
39 */
40#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
41
42#ifdef CONFIG_CHIP_M32700_TS1
43#define __ATOMIC_CLOBBER , "r4"
44#else
45#define __ATOMIC_CLOBBER
46#endif
47
48#define ATOMIC_OP(op) \
49static __inline__ void atomic_##op(int i, atomic_t *v) \
50{ \
51 unsigned long flags; \
52 int result; \
53 \
54 local_irq_save(flags); \
55 __asm__ __volatile__ ( \
56 "# atomic_" #op " \n\t" \
57 DCACHE_CLEAR("%0", "r4", "%1") \
58 M32R_LOCK" %0, @%1; \n\t" \
59 #op " %0, %2; \n\t" \
60 M32R_UNLOCK" %0, @%1; \n\t" \
61 : "=&r" (result) \
62 : "r" (&v->counter), "r" (i) \
63 : "memory" \
64 __ATOMIC_CLOBBER \
65 ); \
66 local_irq_restore(flags); \
67} \
68
69#define ATOMIC_OP_RETURN(op) \
70static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
71{ \
72 unsigned long flags; \
73 int result; \
74 \
75 local_irq_save(flags); \
76 __asm__ __volatile__ ( \
77 "# atomic_" #op "_return \n\t" \
78 DCACHE_CLEAR("%0", "r4", "%1") \
79 M32R_LOCK" %0, @%1; \n\t" \
80 #op " %0, %2; \n\t" \
81 M32R_UNLOCK" %0, @%1; \n\t" \
82 : "=&r" (result) \
83 : "r" (&v->counter), "r" (i) \
84 : "memory" \
85 __ATOMIC_CLOBBER \
86 ); \
87 local_irq_restore(flags); \
88 \
89 return result; \
90}
91
92#define ATOMIC_FETCH_OP(op) \
93static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
94{ \
95 unsigned long flags; \
96 int result, val; \
97 \
98 local_irq_save(flags); \
99 __asm__ __volatile__ ( \
100 "# atomic_fetch_" #op " \n\t" \
101 DCACHE_CLEAR("%0", "r4", "%2") \
102 M32R_LOCK" %1, @%2; \n\t" \
103 "mv %0, %1 \n\t" \
104 #op " %1, %3; \n\t" \
105 M32R_UNLOCK" %1, @%2; \n\t" \
106 : "=&r" (result), "=&r" (val) \
107 : "r" (&v->counter), "r" (i) \
108 : "memory" \
109 __ATOMIC_CLOBBER \
110 ); \
111 local_irq_restore(flags); \
112 \
113 return result; \
114}
115
116#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
117
118ATOMIC_OPS(add)
119ATOMIC_OPS(sub)
120
121#undef ATOMIC_OPS
122#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
123
124ATOMIC_OPS(and)
125ATOMIC_OPS(or)
126ATOMIC_OPS(xor)
127
128#undef ATOMIC_OPS
129#undef ATOMIC_FETCH_OP
130#undef ATOMIC_OP_RETURN
131#undef ATOMIC_OP
132
133/**
134 * atomic_sub_and_test - subtract value from variable and test result
135 * @i: integer value to subtract
136 * @v: pointer of type atomic_t
137 *
138 * Atomically subtracts @i from @v and returns
139 * true if the result is zero, or false for all
140 * other cases.
141 */
142#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
143
144/**
145 * atomic_inc_return - increment atomic variable and return it
146 * @v: pointer of type atomic_t
147 *
148 * Atomically increments @v by 1 and returns the result.
149 */
150static __inline__ int atomic_inc_return(atomic_t *v)
151{
152 unsigned long flags;
153 int result;
154
155 local_irq_save(flags);
156 __asm__ __volatile__ (
157 "# atomic_inc_return \n\t"
158 DCACHE_CLEAR("%0", "r4", "%1")
159 M32R_LOCK" %0, @%1; \n\t"
160 "addi %0, #1; \n\t"
161 M32R_UNLOCK" %0, @%1; \n\t"
162 : "=&r" (result)
163 : "r" (&v->counter)
164 : "memory"
165 __ATOMIC_CLOBBER
166 );
167 local_irq_restore(flags);
168
169 return result;
170}
171
172/**
173 * atomic_dec_return - decrement atomic variable and return it
174 * @v: pointer of type atomic_t
175 *
176 * Atomically decrements @v by 1 and returns the result.
177 */
178static __inline__ int atomic_dec_return(atomic_t *v)
179{
180 unsigned long flags;
181 int result;
182
183 local_irq_save(flags);
184 __asm__ __volatile__ (
185 "# atomic_dec_return \n\t"
186 DCACHE_CLEAR("%0", "r4", "%1")
187 M32R_LOCK" %0, @%1; \n\t"
188 "addi %0, #-1; \n\t"
189 M32R_UNLOCK" %0, @%1; \n\t"
190 : "=&r" (result)
191 : "r" (&v->counter)
192 : "memory"
193 __ATOMIC_CLOBBER
194 );
195 local_irq_restore(flags);
196
197 return result;
198}
199
200/**
201 * atomic_inc - increment atomic variable
202 * @v: pointer of type atomic_t
203 *
204 * Atomically increments @v by 1.
205 */
206#define atomic_inc(v) ((void)atomic_inc_return(v))
207
208/**
209 * atomic_dec - decrement atomic variable
210 * @v: pointer of type atomic_t
211 *
212 * Atomically decrements @v by 1.
213 */
214#define atomic_dec(v) ((void)atomic_dec_return(v))
215
216/**
217 * atomic_inc_and_test - increment and test
218 * @v: pointer of type atomic_t
219 *
220 * Atomically increments @v by 1
221 * and returns true if the result is zero, or false for all
222 * other cases.
223 */
224#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
225
226/**
227 * atomic_dec_and_test - decrement and test
228 * @v: pointer of type atomic_t
229 *
230 * Atomically decrements @v by 1 and
231 * returns true if the result is 0, or false for all
232 * other cases.
233 */
234#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
235
236/**
237 * atomic_add_negative - add and test if negative
238 * @v: pointer of type atomic_t
239 * @i: integer value to add
240 *
241 * Atomically adds @i to @v and returns true
242 * if the result is negative, or false when
243 * result is greater than or equal to zero.
244 */
245#define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0)
246
247#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
248#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
249
250/**
251 * __atomic_add_unless - add unless the number is a given value
252 * @v: pointer of type atomic_t
253 * @a: the amount to add to v...
254 * @u: ...unless v is equal to u.
255 *
256 * Atomically adds @a to @v, so long as it was not @u.
257 * Returns the old value of @v.
258 */
259static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
260{
261 int c, old;
262 c = atomic_read(v);
263 for (;;) {
264 if (unlikely(c == (u)))
265 break;
266 old = atomic_cmpxchg((v), c, c + (a));
267 if (likely(old == c))
268 break;
269 c = old;
270 }
271 return c;
272}
273
274#endif /* _ASM_M32R_ATOMIC_H */