Loading...
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Do not include directly; use <linux/atomic.h>.
15 */
16
17#ifndef _ASM_TILE_ATOMIC_32_H
18#define _ASM_TILE_ATOMIC_32_H
19
20#include <asm/barrier.h>
21#include <arch/chip.h>
22
23#ifndef __ASSEMBLY__
24
25/* Tile-specific routines to support <linux/atomic.h>. */
26int _atomic_xchg(atomic_t *v, int n);
27int _atomic_xchg_add(atomic_t *v, int i);
28int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
29int _atomic_cmpxchg(atomic_t *v, int o, int n);
30
31/**
32 * atomic_xchg - atomically exchange contents of memory with a new value
33 * @v: pointer of type atomic_t
34 * @i: integer value to store in memory
35 *
36 * Atomically sets @v to @i and returns old @v
37 */
38static inline int atomic_xchg(atomic_t *v, int n)
39{
40 smp_mb(); /* barrier for proper semantics */
41 return _atomic_xchg(v, n);
42}
43
44/**
45 * atomic_cmpxchg - atomically exchange contents of memory if it matches
46 * @v: pointer of type atomic_t
47 * @o: old value that memory should have
48 * @n: new value to write to memory if it matches
49 *
50 * Atomically checks if @v holds @o and replaces it with @n if so.
51 * Returns the old value at @v.
52 */
53static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
54{
55 smp_mb(); /* barrier for proper semantics */
56 return _atomic_cmpxchg(v, o, n);
57}
58
59/**
60 * atomic_add - add integer to atomic variable
61 * @i: integer value to add
62 * @v: pointer of type atomic_t
63 *
64 * Atomically adds @i to @v.
65 */
66static inline void atomic_add(int i, atomic_t *v)
67{
68 _atomic_xchg_add(v, i);
69}
70
71/**
72 * atomic_add_return - add integer and return
73 * @v: pointer of type atomic_t
74 * @i: integer value to add
75 *
76 * Atomically adds @i to @v and returns @i + @v
77 */
78static inline int atomic_add_return(int i, atomic_t *v)
79{
80 smp_mb(); /* barrier for proper semantics */
81 return _atomic_xchg_add(v, i) + i;
82}
83
84/**
85 * __atomic_add_unless - add unless the number is already a given value
86 * @v: pointer of type atomic_t
87 * @a: the amount to add to v...
88 * @u: ...unless v is equal to u.
89 *
90 * Atomically adds @a to @v, so long as @v was not already @u.
91 * Returns the old value of @v.
92 */
93static inline int __atomic_add_unless(atomic_t *v, int a, int u)
94{
95 smp_mb(); /* barrier for proper semantics */
96 return _atomic_xchg_add_unless(v, a, u);
97}
98
99/**
100 * atomic_set - set atomic variable
101 * @v: pointer of type atomic_t
102 * @i: required value
103 *
104 * Atomically sets the value of @v to @i.
105 *
106 * atomic_set() can't be just a raw store, since it would be lost if it
107 * fell between the load and store of one of the other atomic ops.
108 */
109static inline void atomic_set(atomic_t *v, int n)
110{
111 _atomic_xchg(v, n);
112}
113
114/* A 64bit atomic type */
115
116typedef struct {
117 u64 __aligned(8) counter;
118} atomic64_t;
119
120#define ATOMIC64_INIT(val) { (val) }
121
122u64 _atomic64_xchg(atomic64_t *v, u64 n);
123u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
124u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
125u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
126
127/**
128 * atomic64_read - read atomic variable
129 * @v: pointer of type atomic64_t
130 *
131 * Atomically reads the value of @v.
132 */
133static inline u64 atomic64_read(const atomic64_t *v)
134{
135 /*
136 * Requires an atomic op to read both 32-bit parts consistently.
137 * Casting away const is safe since the atomic support routines
138 * do not write to memory if the value has not been modified.
139 */
140 return _atomic64_xchg_add((atomic64_t *)v, 0);
141}
142
143/**
144 * atomic64_xchg - atomically exchange contents of memory with a new value
145 * @v: pointer of type atomic64_t
146 * @i: integer value to store in memory
147 *
148 * Atomically sets @v to @i and returns old @v
149 */
150static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
151{
152 smp_mb(); /* barrier for proper semantics */
153 return _atomic64_xchg(v, n);
154}
155
156/**
157 * atomic64_cmpxchg - atomically exchange contents of memory if it matches
158 * @v: pointer of type atomic64_t
159 * @o: old value that memory should have
160 * @n: new value to write to memory if it matches
161 *
162 * Atomically checks if @v holds @o and replaces it with @n if so.
163 * Returns the old value at @v.
164 */
165static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
166{
167 smp_mb(); /* barrier for proper semantics */
168 return _atomic64_cmpxchg(v, o, n);
169}
170
171/**
172 * atomic64_add - add integer to atomic variable
173 * @i: integer value to add
174 * @v: pointer of type atomic64_t
175 *
176 * Atomically adds @i to @v.
177 */
178static inline void atomic64_add(u64 i, atomic64_t *v)
179{
180 _atomic64_xchg_add(v, i);
181}
182
183/**
184 * atomic64_add_return - add integer and return
185 * @v: pointer of type atomic64_t
186 * @i: integer value to add
187 *
188 * Atomically adds @i to @v and returns @i + @v
189 */
190static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
191{
192 smp_mb(); /* barrier for proper semantics */
193 return _atomic64_xchg_add(v, i) + i;
194}
195
196/**
197 * atomic64_add_unless - add unless the number is already a given value
198 * @v: pointer of type atomic64_t
199 * @a: the amount to add to v...
200 * @u: ...unless v is equal to u.
201 *
202 * Atomically adds @a to @v, so long as @v was not already @u.
203 * Returns non-zero if @v was not @u, and zero otherwise.
204 */
205static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
206{
207 smp_mb(); /* barrier for proper semantics */
208 return _atomic64_xchg_add_unless(v, a, u) != u;
209}
210
211/**
212 * atomic64_set - set atomic variable
213 * @v: pointer of type atomic64_t
214 * @i: required value
215 *
216 * Atomically sets the value of @v to @i.
217 *
218 * atomic64_set() can't be just a raw store, since it would be lost if it
219 * fell between the load and store of one of the other atomic ops.
220 */
221static inline void atomic64_set(atomic64_t *v, u64 n)
222{
223 _atomic64_xchg(v, n);
224}
225
226#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
227#define atomic64_inc(v) atomic64_add(1LL, (v))
228#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
229#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
230#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
231#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
232#define atomic64_sub(i, v) atomic64_add(-(i), (v))
233#define atomic64_dec(v) atomic64_sub(1LL, (v))
234#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
235#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
236#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
237
238/*
239 * We need to barrier before modifying the word, since the _atomic_xxx()
240 * routines just tns the lock and then read/modify/write of the word.
241 * But after the word is updated, the routine issues an "mf" before returning,
242 * and since it's a function call, we don't even need a compiler barrier.
243 */
244#define smp_mb__before_atomic_dec() smp_mb()
245#define smp_mb__before_atomic_inc() smp_mb()
246#define smp_mb__after_atomic_dec() do { } while (0)
247#define smp_mb__after_atomic_inc() do { } while (0)
248
249#endif /* !__ASSEMBLY__ */
250
251/*
252 * Internal definitions only beyond this point.
253 */
254
255#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
256 (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
257
258#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
259
260/* Number of entries in atomic_lock_ptr[]. */
261#define ATOMIC_HASH_L1_SHIFT 6
262#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
263
264/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
265#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
266#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
267
268#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
269
270/*
271 * Number of atomic locks in atomic_locks[]. Must be a power of two.
272 * There is no reason for more than PAGE_SIZE / 8 entries, since that
273 * is the maximum number of pointer bits we can use to index this.
274 * And we cannot have more than PAGE_SIZE / 4, since this has to
275 * fit on a single page and each entry takes 4 bytes.
276 */
277#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
278#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
279
280#ifndef __ASSEMBLY__
281extern int atomic_locks[];
282#endif
283
284#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
285
286/*
287 * All the code that may fault while holding an atomic lock must
288 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
289 * can correctly release and reacquire the lock. Note that we
290 * mention the register number in a comment in "lib/atomic_asm.S" to help
291 * assembly coders from using this register by mistake, so if it
292 * is changed here, change that comment as well.
293 */
294#define ATOMIC_LOCK_REG 20
295#define ATOMIC_LOCK_REG_NAME r20
296
297#ifndef __ASSEMBLY__
298/* Called from setup to initialize a hash table to point to per_cpu locks. */
299void __init_atomic_per_cpu(void);
300
301#ifdef CONFIG_SMP
302/* Support releasing the atomic lock in do_page_fault_ics(). */
303void __atomic_fault_unlock(int *lock_ptr);
304#endif
305
306/* Return a pointer to the lock for the given address. */
307int *__atomic_hashed_lock(volatile void *v);
308
309/* Private helper routines in lib/atomic_asm_32.S */
310struct __get_user {
311 unsigned long val;
312 int err;
313};
314extern struct __get_user __atomic_cmpxchg(volatile int *p,
315 int *lock, int o, int n);
316extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
317extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
318extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
319 int *lock, int o, int n);
320extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
321extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
322extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
323extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
324extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
325extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
326extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
327 int *lock, u64 o, u64 n);
328
329/* Return failure from the atomic wrappers. */
330struct __get_user __atomic_bad_address(int __user *addr);
331
332#endif /* !__ASSEMBLY__ */
333
334#endif /* _ASM_TILE_ATOMIC_32_H */
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Do not include directly; use <linux/atomic.h>.
15 */
16
17#ifndef _ASM_TILE_ATOMIC_32_H
18#define _ASM_TILE_ATOMIC_32_H
19
20#include <arch/chip.h>
21
22#ifndef __ASSEMBLY__
23
24/* Tile-specific routines to support <linux/atomic.h>. */
25int _atomic_xchg(atomic_t *v, int n);
26int _atomic_xchg_add(atomic_t *v, int i);
27int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
28int _atomic_cmpxchg(atomic_t *v, int o, int n);
29
30/**
31 * atomic_xchg - atomically exchange contents of memory with a new value
32 * @v: pointer of type atomic_t
33 * @i: integer value to store in memory
34 *
35 * Atomically sets @v to @i and returns old @v
36 */
37static inline int atomic_xchg(atomic_t *v, int n)
38{
39 smp_mb(); /* barrier for proper semantics */
40 return _atomic_xchg(v, n);
41}
42
43/**
44 * atomic_cmpxchg - atomically exchange contents of memory if it matches
45 * @v: pointer of type atomic_t
46 * @o: old value that memory should have
47 * @n: new value to write to memory if it matches
48 *
49 * Atomically checks if @v holds @o and replaces it with @n if so.
50 * Returns the old value at @v.
51 */
52static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
53{
54 smp_mb(); /* barrier for proper semantics */
55 return _atomic_cmpxchg(v, o, n);
56}
57
58/**
59 * atomic_add - add integer to atomic variable
60 * @i: integer value to add
61 * @v: pointer of type atomic_t
62 *
63 * Atomically adds @i to @v.
64 */
65static inline void atomic_add(int i, atomic_t *v)
66{
67 _atomic_xchg_add(v, i);
68}
69
70/**
71 * atomic_add_return - add integer and return
72 * @v: pointer of type atomic_t
73 * @i: integer value to add
74 *
75 * Atomically adds @i to @v and returns @i + @v
76 */
77static inline int atomic_add_return(int i, atomic_t *v)
78{
79 smp_mb(); /* barrier for proper semantics */
80 return _atomic_xchg_add(v, i) + i;
81}
82
83/**
84 * __atomic_add_unless - add unless the number is already a given value
85 * @v: pointer of type atomic_t
86 * @a: the amount to add to v...
87 * @u: ...unless v is equal to u.
88 *
89 * Atomically adds @a to @v, so long as @v was not already @u.
90 * Returns the old value of @v.
91 */
92static inline int __atomic_add_unless(atomic_t *v, int a, int u)
93{
94 smp_mb(); /* barrier for proper semantics */
95 return _atomic_xchg_add_unless(v, a, u);
96}
97
98/**
99 * atomic_set - set atomic variable
100 * @v: pointer of type atomic_t
101 * @i: required value
102 *
103 * Atomically sets the value of @v to @i.
104 *
105 * atomic_set() can't be just a raw store, since it would be lost if it
106 * fell between the load and store of one of the other atomic ops.
107 */
108static inline void atomic_set(atomic_t *v, int n)
109{
110 _atomic_xchg(v, n);
111}
112
113/* A 64bit atomic type */
114
115typedef struct {
116 u64 __aligned(8) counter;
117} atomic64_t;
118
119#define ATOMIC64_INIT(val) { (val) }
120
121u64 _atomic64_xchg(atomic64_t *v, u64 n);
122u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
123u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
124u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
125
126/**
127 * atomic64_read - read atomic variable
128 * @v: pointer of type atomic64_t
129 *
130 * Atomically reads the value of @v.
131 */
132static inline u64 atomic64_read(const atomic64_t *v)
133{
134 /*
135 * Requires an atomic op to read both 32-bit parts consistently.
136 * Casting away const is safe since the atomic support routines
137 * do not write to memory if the value has not been modified.
138 */
139 return _atomic64_xchg_add((atomic64_t *)v, 0);
140}
141
142/**
143 * atomic64_xchg - atomically exchange contents of memory with a new value
144 * @v: pointer of type atomic64_t
145 * @i: integer value to store in memory
146 *
147 * Atomically sets @v to @i and returns old @v
148 */
149static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
150{
151 smp_mb(); /* barrier for proper semantics */
152 return _atomic64_xchg(v, n);
153}
154
155/**
156 * atomic64_cmpxchg - atomically exchange contents of memory if it matches
157 * @v: pointer of type atomic64_t
158 * @o: old value that memory should have
159 * @n: new value to write to memory if it matches
160 *
161 * Atomically checks if @v holds @o and replaces it with @n if so.
162 * Returns the old value at @v.
163 */
164static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
165{
166 smp_mb(); /* barrier for proper semantics */
167 return _atomic64_cmpxchg(v, o, n);
168}
169
170/**
171 * atomic64_add - add integer to atomic variable
172 * @i: integer value to add
173 * @v: pointer of type atomic64_t
174 *
175 * Atomically adds @i to @v.
176 */
177static inline void atomic64_add(u64 i, atomic64_t *v)
178{
179 _atomic64_xchg_add(v, i);
180}
181
182/**
183 * atomic64_add_return - add integer and return
184 * @v: pointer of type atomic64_t
185 * @i: integer value to add
186 *
187 * Atomically adds @i to @v and returns @i + @v
188 */
189static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
190{
191 smp_mb(); /* barrier for proper semantics */
192 return _atomic64_xchg_add(v, i) + i;
193}
194
195/**
196 * atomic64_add_unless - add unless the number is already a given value
197 * @v: pointer of type atomic64_t
198 * @a: the amount to add to v...
199 * @u: ...unless v is equal to u.
200 *
201 * Atomically adds @a to @v, so long as @v was not already @u.
202 * Returns the old value of @v.
203 */
204static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
205{
206 smp_mb(); /* barrier for proper semantics */
207 return _atomic64_xchg_add_unless(v, a, u) != u;
208}
209
210/**
211 * atomic64_set - set atomic variable
212 * @v: pointer of type atomic64_t
213 * @i: required value
214 *
215 * Atomically sets the value of @v to @i.
216 *
217 * atomic64_set() can't be just a raw store, since it would be lost if it
218 * fell between the load and store of one of the other atomic ops.
219 */
220static inline void atomic64_set(atomic64_t *v, u64 n)
221{
222 _atomic64_xchg(v, n);
223}
224
225#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
226#define atomic64_inc(v) atomic64_add(1LL, (v))
227#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
228#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
229#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
230#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
231#define atomic64_sub(i, v) atomic64_add(-(i), (v))
232#define atomic64_dec(v) atomic64_sub(1LL, (v))
233#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
234#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
235#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
236
237/*
238 * We need to barrier before modifying the word, since the _atomic_xxx()
239 * routines just tns the lock and then read/modify/write of the word.
240 * But after the word is updated, the routine issues an "mf" before returning,
241 * and since it's a function call, we don't even need a compiler barrier.
242 */
243#define smp_mb__before_atomic_dec() smp_mb()
244#define smp_mb__before_atomic_inc() smp_mb()
245#define smp_mb__after_atomic_dec() do { } while (0)
246#define smp_mb__after_atomic_inc() do { } while (0)
247
248#endif /* !__ASSEMBLY__ */
249
250/*
251 * Internal definitions only beyond this point.
252 */
253
254#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
255 (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
256
257#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
258
259/* Number of entries in atomic_lock_ptr[]. */
260#define ATOMIC_HASH_L1_SHIFT 6
261#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
262
263/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
264#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
265#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
266
267#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
268
269/*
270 * Number of atomic locks in atomic_locks[]. Must be a power of two.
271 * There is no reason for more than PAGE_SIZE / 8 entries, since that
272 * is the maximum number of pointer bits we can use to index this.
273 * And we cannot have more than PAGE_SIZE / 4, since this has to
274 * fit on a single page and each entry takes 4 bytes.
275 */
276#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
277#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
278
279#ifndef __ASSEMBLY__
280extern int atomic_locks[];
281#endif
282
283#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
284
285/*
286 * All the code that may fault while holding an atomic lock must
287 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
288 * can correctly release and reacquire the lock. Note that we
289 * mention the register number in a comment in "lib/atomic_asm.S" to help
290 * assembly coders from using this register by mistake, so if it
291 * is changed here, change that comment as well.
292 */
293#define ATOMIC_LOCK_REG 20
294#define ATOMIC_LOCK_REG_NAME r20
295
296#ifndef __ASSEMBLY__
297/* Called from setup to initialize a hash table to point to per_cpu locks. */
298void __init_atomic_per_cpu(void);
299
300#ifdef CONFIG_SMP
301/* Support releasing the atomic lock in do_page_fault_ics(). */
302void __atomic_fault_unlock(int *lock_ptr);
303#endif
304
305/* Private helper routines in lib/atomic_asm_32.S */
306extern struct __get_user __atomic_cmpxchg(volatile int *p,
307 int *lock, int o, int n);
308extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
309extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
310extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
311 int *lock, int o, int n);
312extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
313extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
314extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
315extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
316extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
317extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
318extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
319 int *lock, u64 o, u64 n);
320
321#endif /* !__ASSEMBLY__ */
322
323#endif /* _ASM_TILE_ATOMIC_32_H */