Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Atomic operations.
  4 *
  5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  6 */
  7#ifndef _ASM_ATOMIC_H
  8#define _ASM_ATOMIC_H
  9
 10#include <linux/types.h>
 11#include <asm/barrier.h>
 12#include <asm/cmpxchg.h>
 13
 14#if __SIZEOF_LONG__ == 4
 15#define __LL		"ll.w	"
 16#define __SC		"sc.w	"
 17#define __AMADD		"amadd.w	"
 18#define __AMAND_DB	"amand_db.w	"
 19#define __AMOR_DB	"amor_db.w	"
 20#define __AMXOR_DB	"amxor_db.w	"
 21#elif __SIZEOF_LONG__ == 8
 22#define __LL		"ll.d	"
 23#define __SC		"sc.d	"
 24#define __AMADD		"amadd.d	"
 25#define __AMAND_DB	"amand_db.d	"
 26#define __AMOR_DB	"amor_db.d	"
 27#define __AMXOR_DB	"amxor_db.d	"
 28#endif
 29
 30#define ATOMIC_INIT(i)	  { (i) }
 31
 32/*
 33 * arch_atomic_read - read atomic variable
 34 * @v: pointer of type atomic_t
 35 *
 36 * Atomically reads the value of @v.
 37 */
 38#define arch_atomic_read(v)	READ_ONCE((v)->counter)
 39
 40/*
 41 * arch_atomic_set - set atomic variable
 42 * @v: pointer of type atomic_t
 43 * @i: required value
 44 *
 45 * Atomically sets the value of @v to @i.
 46 */
 47#define arch_atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
 48
 49#define ATOMIC_OP(op, I, asm_op)					\
 50static inline void arch_atomic_##op(int i, atomic_t *v)			\
 51{									\
 52	__asm__ __volatile__(						\
 53	"am"#asm_op"_db.w" " $zero, %1, %0	\n"			\
 54	: "+ZB" (v->counter)						\
 55	: "r" (I)							\
 56	: "memory");							\
 57}
 58
 59#define ATOMIC_OP_RETURN(op, I, asm_op, c_op)				\
 60static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 61{									\
 62	int result;							\
 63									\
 64	__asm__ __volatile__(						\
 65	"am"#asm_op"_db.w" " %1, %2, %0		\n"			\
 66	: "+ZB" (v->counter), "=&r" (result)				\
 67	: "r" (I)							\
 68	: "memory");							\
 69									\
 70	return result c_op I;						\
 71}
 72
 73#define ATOMIC_FETCH_OP(op, I, asm_op)					\
 74static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
 75{									\
 76	int result;							\
 77									\
 78	__asm__ __volatile__(						\
 79	"am"#asm_op"_db.w" " %1, %2, %0		\n"			\
 80	: "+ZB" (v->counter), "=&r" (result)				\
 81	: "r" (I)							\
 82	: "memory");							\
 83									\
 84	return result;							\
 85}
 86
 87#define ATOMIC_OPS(op, I, asm_op, c_op)					\
 88	ATOMIC_OP(op, I, asm_op)					\
 89	ATOMIC_OP_RETURN(op, I, asm_op, c_op)				\
 90	ATOMIC_FETCH_OP(op, I, asm_op)
 
 
 91
 92ATOMIC_OPS(add, i, add, +)
 93ATOMIC_OPS(sub, -i, add, +)
 94
 
 
 
 95#define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
 
 
 
 96#define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
 
 
 
 97#define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
 
 
 
 98#define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
 99
100#undef ATOMIC_OPS
101
102#define ATOMIC_OPS(op, I, asm_op)					\
103	ATOMIC_OP(op, I, asm_op)					\
104	ATOMIC_FETCH_OP(op, I, asm_op)
 
105
106ATOMIC_OPS(and, i, and)
107ATOMIC_OPS(or, i, or)
108ATOMIC_OPS(xor, i, xor)
109
 
 
 
110#define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
 
 
 
111#define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
 
 
 
112#define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
113
114#undef ATOMIC_OPS
115#undef ATOMIC_FETCH_OP
116#undef ATOMIC_OP_RETURN
117#undef ATOMIC_OP
118
119static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
120{
121       int prev, rc;
122
123	__asm__ __volatile__ (
124		"0:	ll.w	%[p],  %[c]\n"
125		"	beq	%[p],  %[u], 1f\n"
126		"	add.w	%[rc], %[p], %[a]\n"
127		"	sc.w	%[rc], %[c]\n"
128		"	beqz	%[rc], 0b\n"
129		"	b	2f\n"
130		"1:\n"
131		__WEAK_LLSC_MB
132		"2:\n"
133		: [p]"=&r" (prev), [rc]"=&r" (rc),
134		  [c]"=ZB" (v->counter)
135		: [a]"r" (a), [u]"r" (u)
136		: "memory");
137
138	return prev;
139}
140#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
141
142/*
143 * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable
144 * @i: integer value to subtract
145 * @v: pointer of type atomic_t
146 *
147 * Atomically test @v and subtract @i if @v is greater or equal than @i.
148 * The function returns the old value of @v minus @i.
149 */
150static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
151{
152	int result;
153	int temp;
154
155	if (__builtin_constant_p(i)) {
156		__asm__ __volatile__(
157		"1:	ll.w	%1, %2		# atomic_sub_if_positive\n"
158		"	addi.w	%0, %1, %3				\n"
159		"	move	%1, %0					\n"
160		"	bltz	%0, 2f					\n"
161		"	sc.w	%1, %2					\n"
162		"	beqz	%1, 1b					\n"
163		"2:							\n"
164		__WEAK_LLSC_MB
165		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
166		: "I" (-i));
167	} else {
168		__asm__ __volatile__(
169		"1:	ll.w	%1, %2		# atomic_sub_if_positive\n"
170		"	sub.w	%0, %1, %3				\n"
171		"	move	%1, %0					\n"
172		"	bltz	%0, 2f					\n"
173		"	sc.w	%1, %2					\n"
174		"	beqz	%1, 1b					\n"
175		"2:							\n"
176		__WEAK_LLSC_MB
177		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
178		: "r" (i));
179	}
180
181	return result;
182}
183
184#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
185#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
186
187/*
188 * arch_atomic_dec_if_positive - decrement by 1 if old value positive
189 * @v: pointer of type atomic_t
190 */
191#define arch_atomic_dec_if_positive(v)	arch_atomic_sub_if_positive(1, v)
192
193#ifdef CONFIG_64BIT
194
195#define ATOMIC64_INIT(i)    { (i) }
196
197/*
198 * arch_atomic64_read - read atomic variable
199 * @v: pointer of type atomic64_t
200 *
201 */
202#define arch_atomic64_read(v)	READ_ONCE((v)->counter)
203
204/*
205 * arch_atomic64_set - set atomic variable
206 * @v: pointer of type atomic64_t
207 * @i: required value
208 */
209#define arch_atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
210
211#define ATOMIC64_OP(op, I, asm_op)					\
212static inline void arch_atomic64_##op(long i, atomic64_t *v)		\
213{									\
214	__asm__ __volatile__(						\
215	"am"#asm_op"_db.d " " $zero, %1, %0	\n"			\
216	: "+ZB" (v->counter)						\
217	: "r" (I)							\
218	: "memory");							\
219}
220
221#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op)					\
222static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)	\
223{										\
224	long result;								\
225	__asm__ __volatile__(							\
226	"am"#asm_op"_db.d " " %1, %2, %0		\n"			\
227	: "+ZB" (v->counter), "=&r" (result)					\
228	: "r" (I)								\
229	: "memory");								\
230										\
231	return result c_op I;							\
232}
233
234#define ATOMIC64_FETCH_OP(op, I, asm_op)					\
235static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v)	\
236{										\
237	long result;								\
238										\
239	__asm__ __volatile__(							\
240	"am"#asm_op"_db.d " " %1, %2, %0		\n"			\
241	: "+ZB" (v->counter), "=&r" (result)					\
242	: "r" (I)								\
243	: "memory");								\
244										\
245	return result;								\
246}
247
248#define ATOMIC64_OPS(op, I, asm_op, c_op)				      \
249	ATOMIC64_OP(op, I, asm_op)					      \
250	ATOMIC64_OP_RETURN(op, I, asm_op, c_op)				      \
251	ATOMIC64_FETCH_OP(op, I, asm_op)
 
 
252
253ATOMIC64_OPS(add, i, add, +)
254ATOMIC64_OPS(sub, -i, add, +)
255
 
 
 
256#define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
 
 
 
257#define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
 
 
 
258#define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
 
 
 
259#define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
260
261#undef ATOMIC64_OPS
262
263#define ATOMIC64_OPS(op, I, asm_op)					      \
264	ATOMIC64_OP(op, I, asm_op)					      \
265	ATOMIC64_FETCH_OP(op, I, asm_op)
 
266
267ATOMIC64_OPS(and, i, and)
268ATOMIC64_OPS(or, i, or)
269ATOMIC64_OPS(xor, i, xor)
270
 
 
 
271#define arch_atomic64_fetch_and_relaxed	arch_atomic64_fetch_and_relaxed
 
 
 
272#define arch_atomic64_fetch_or_relaxed	arch_atomic64_fetch_or_relaxed
 
 
 
273#define arch_atomic64_fetch_xor_relaxed	arch_atomic64_fetch_xor_relaxed
274
275#undef ATOMIC64_OPS
276#undef ATOMIC64_FETCH_OP
277#undef ATOMIC64_OP_RETURN
278#undef ATOMIC64_OP
279
280static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
281{
282       long prev, rc;
283
284	__asm__ __volatile__ (
285		"0:	ll.d	%[p],  %[c]\n"
286		"	beq	%[p],  %[u], 1f\n"
287		"	add.d	%[rc], %[p], %[a]\n"
288		"	sc.d	%[rc], %[c]\n"
289		"	beqz	%[rc], 0b\n"
290		"	b	2f\n"
291		"1:\n"
292		__WEAK_LLSC_MB
293		"2:\n"
294		: [p]"=&r" (prev), [rc]"=&r" (rc),
295		  [c] "=ZB" (v->counter)
296		: [a]"r" (a), [u]"r" (u)
297		: "memory");
298
299	return prev;
300}
301#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
302
303/*
304 * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable
305 * @i: integer value to subtract
306 * @v: pointer of type atomic64_t
307 *
308 * Atomically test @v and subtract @i if @v is greater or equal than @i.
309 * The function returns the old value of @v minus @i.
310 */
311static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
312{
313	long result;
314	long temp;
315
316	if (__builtin_constant_p(i)) {
317		__asm__ __volatile__(
318		"1:	ll.d	%1, %2	# atomic64_sub_if_positive	\n"
319		"	addi.d	%0, %1, %3				\n"
320		"	move	%1, %0					\n"
321		"	bltz	%0, 2f					\n"
322		"	sc.d	%1, %2					\n"
323		"	beqz	%1, 1b					\n"
324		"2:							\n"
325		__WEAK_LLSC_MB
326		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
327		: "I" (-i));
328	} else {
329		__asm__ __volatile__(
330		"1:	ll.d	%1, %2	# atomic64_sub_if_positive	\n"
331		"	sub.d	%0, %1, %3				\n"
332		"	move	%1, %0					\n"
333		"	bltz	%0, 2f					\n"
334		"	sc.d	%1, %2					\n"
335		"	beqz	%1, 1b					\n"
336		"2:							\n"
337		__WEAK_LLSC_MB
338		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
339		: "r" (i));
340	}
341
342	return result;
343}
344
345#define arch_atomic64_cmpxchg(v, o, n) \
346	((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
347#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
348
349/*
350 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
351 * @v: pointer of type atomic64_t
352 */
353#define arch_atomic64_dec_if_positive(v)	arch_atomic64_sub_if_positive(1, v)
354
355#endif /* CONFIG_64BIT */
356
357#endif /* _ASM_ATOMIC_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Atomic operations.
  4 *
  5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  6 */
  7#ifndef _ASM_ATOMIC_H
  8#define _ASM_ATOMIC_H
  9
 10#include <linux/types.h>
 11#include <asm/barrier.h>
 12#include <asm/cmpxchg.h>
 13
 14#if __SIZEOF_LONG__ == 4
 15#define __LL		"ll.w	"
 16#define __SC		"sc.w	"
 17#define __AMADD		"amadd.w	"
 18#define __AMAND_DB	"amand_db.w	"
 19#define __AMOR_DB	"amor_db.w	"
 20#define __AMXOR_DB	"amxor_db.w	"
 21#elif __SIZEOF_LONG__ == 8
 22#define __LL		"ll.d	"
 23#define __SC		"sc.d	"
 24#define __AMADD		"amadd.d	"
 25#define __AMAND_DB	"amand_db.d	"
 26#define __AMOR_DB	"amor_db.d	"
 27#define __AMXOR_DB	"amxor_db.d	"
 28#endif
 29
 30#define ATOMIC_INIT(i)	  { (i) }
 31
 
 
 
 
 
 
 32#define arch_atomic_read(v)	READ_ONCE((v)->counter)
 
 
 
 
 
 
 
 
 33#define arch_atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
 34
 35#define ATOMIC_OP(op, I, asm_op)					\
 36static inline void arch_atomic_##op(int i, atomic_t *v)			\
 37{									\
 38	__asm__ __volatile__(						\
 39	"am"#asm_op".w" " $zero, %1, %0	\n"				\
 40	: "+ZB" (v->counter)						\
 41	: "r" (I)							\
 42	: "memory");							\
 43}
 44
 45#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix)		\
 46static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v)	\
 47{									\
 48	int result;							\
 49									\
 50	__asm__ __volatile__(						\
 51	"am"#asm_op#mb".w" " %1, %2, %0		\n"			\
 52	: "+ZB" (v->counter), "=&r" (result)				\
 53	: "r" (I)							\
 54	: "memory");							\
 55									\
 56	return result c_op I;						\
 57}
 58
 59#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix)			\
 60static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v)	\
 61{									\
 62	int result;							\
 63									\
 64	__asm__ __volatile__(						\
 65	"am"#asm_op#mb".w" " %1, %2, %0		\n"			\
 66	: "+ZB" (v->counter), "=&r" (result)				\
 67	: "r" (I)							\
 68	: "memory");							\
 69									\
 70	return result;							\
 71}
 72
 73#define ATOMIC_OPS(op, I, asm_op, c_op)					\
 74	ATOMIC_OP(op, I, asm_op)					\
 75	ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db,         )		\
 76	ATOMIC_OP_RETURN(op, I, asm_op, c_op,    , _relaxed)		\
 77	ATOMIC_FETCH_OP(op, I, asm_op, _db,         )			\
 78	ATOMIC_FETCH_OP(op, I, asm_op,    , _relaxed)
 79
 80ATOMIC_OPS(add, i, add, +)
 81ATOMIC_OPS(sub, -i, add, +)
 82
 83#define arch_atomic_add_return		arch_atomic_add_return
 84#define arch_atomic_add_return_acquire	arch_atomic_add_return
 85#define arch_atomic_add_return_release	arch_atomic_add_return
 86#define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
 87#define arch_atomic_sub_return		arch_atomic_sub_return
 88#define arch_atomic_sub_return_acquire	arch_atomic_sub_return
 89#define arch_atomic_sub_return_release	arch_atomic_sub_return
 90#define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
 91#define arch_atomic_fetch_add		arch_atomic_fetch_add
 92#define arch_atomic_fetch_add_acquire	arch_atomic_fetch_add
 93#define arch_atomic_fetch_add_release	arch_atomic_fetch_add
 94#define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
 95#define arch_atomic_fetch_sub		arch_atomic_fetch_sub
 96#define arch_atomic_fetch_sub_acquire	arch_atomic_fetch_sub
 97#define arch_atomic_fetch_sub_release	arch_atomic_fetch_sub
 98#define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
 99
100#undef ATOMIC_OPS
101
102#define ATOMIC_OPS(op, I, asm_op)					\
103	ATOMIC_OP(op, I, asm_op)					\
104	ATOMIC_FETCH_OP(op, I, asm_op, _db,         )			\
105	ATOMIC_FETCH_OP(op, I, asm_op,    , _relaxed)
106
107ATOMIC_OPS(and, i, and)
108ATOMIC_OPS(or, i, or)
109ATOMIC_OPS(xor, i, xor)
110
111#define arch_atomic_fetch_and		arch_atomic_fetch_and
112#define arch_atomic_fetch_and_acquire	arch_atomic_fetch_and
113#define arch_atomic_fetch_and_release	arch_atomic_fetch_and
114#define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
115#define arch_atomic_fetch_or		arch_atomic_fetch_or
116#define arch_atomic_fetch_or_acquire	arch_atomic_fetch_or
117#define arch_atomic_fetch_or_release	arch_atomic_fetch_or
118#define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
119#define arch_atomic_fetch_xor		arch_atomic_fetch_xor
120#define arch_atomic_fetch_xor_acquire	arch_atomic_fetch_xor
121#define arch_atomic_fetch_xor_release	arch_atomic_fetch_xor
122#define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
123
124#undef ATOMIC_OPS
125#undef ATOMIC_FETCH_OP
126#undef ATOMIC_OP_RETURN
127#undef ATOMIC_OP
128
129static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
130{
131       int prev, rc;
132
133	__asm__ __volatile__ (
134		"0:	ll.w	%[p],  %[c]\n"
135		"	beq	%[p],  %[u], 1f\n"
136		"	add.w	%[rc], %[p], %[a]\n"
137		"	sc.w	%[rc], %[c]\n"
138		"	beqz	%[rc], 0b\n"
139		"	b	2f\n"
140		"1:\n"
141		__WEAK_LLSC_MB
142		"2:\n"
143		: [p]"=&r" (prev), [rc]"=&r" (rc),
144		  [c]"=ZB" (v->counter)
145		: [a]"r" (a), [u]"r" (u)
146		: "memory");
147
148	return prev;
149}
150#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
151
 
 
 
 
 
 
 
 
152static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
153{
154	int result;
155	int temp;
156
157	if (__builtin_constant_p(i)) {
158		__asm__ __volatile__(
159		"1:	ll.w	%1, %2		# atomic_sub_if_positive\n"
160		"	addi.w	%0, %1, %3				\n"
161		"	move	%1, %0					\n"
162		"	bltz	%0, 2f					\n"
163		"	sc.w	%1, %2					\n"
164		"	beqz	%1, 1b					\n"
165		"2:							\n"
166		__WEAK_LLSC_MB
167		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
168		: "I" (-i));
169	} else {
170		__asm__ __volatile__(
171		"1:	ll.w	%1, %2		# atomic_sub_if_positive\n"
172		"	sub.w	%0, %1, %3				\n"
173		"	move	%1, %0					\n"
174		"	bltz	%0, 2f					\n"
175		"	sc.w	%1, %2					\n"
176		"	beqz	%1, 1b					\n"
177		"2:							\n"
178		__WEAK_LLSC_MB
179		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
180		: "r" (i));
181	}
182
183	return result;
184}
185
 
 
 
 
 
 
 
186#define arch_atomic_dec_if_positive(v)	arch_atomic_sub_if_positive(1, v)
187
188#ifdef CONFIG_64BIT
189
190#define ATOMIC64_INIT(i)    { (i) }
191
 
 
 
 
 
192#define arch_atomic64_read(v)	READ_ONCE((v)->counter)
 
 
 
 
 
 
193#define arch_atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
194
195#define ATOMIC64_OP(op, I, asm_op)					\
196static inline void arch_atomic64_##op(long i, atomic64_t *v)		\
197{									\
198	__asm__ __volatile__(						\
199	"am"#asm_op".d " " $zero, %1, %0	\n"			\
200	: "+ZB" (v->counter)						\
201	: "r" (I)							\
202	: "memory");							\
203}
204
205#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix)			\
206static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v)	\
207{										\
208	long result;								\
209	__asm__ __volatile__(							\
210	"am"#asm_op#mb".d " " %1, %2, %0		\n"			\
211	: "+ZB" (v->counter), "=&r" (result)					\
212	: "r" (I)								\
213	: "memory");								\
214										\
215	return result c_op I;							\
216}
217
218#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix)				\
219static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v)	\
220{										\
221	long result;								\
222										\
223	__asm__ __volatile__(							\
224	"am"#asm_op#mb".d " " %1, %2, %0		\n"			\
225	: "+ZB" (v->counter), "=&r" (result)					\
226	: "r" (I)								\
227	: "memory");								\
228										\
229	return result;								\
230}
231
232#define ATOMIC64_OPS(op, I, asm_op, c_op)				      \
233	ATOMIC64_OP(op, I, asm_op)					      \
234	ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db,         )		      \
235	ATOMIC64_OP_RETURN(op, I, asm_op, c_op,    , _relaxed)		      \
236	ATOMIC64_FETCH_OP(op, I, asm_op, _db,         )			      \
237	ATOMIC64_FETCH_OP(op, I, asm_op,    , _relaxed)
238
239ATOMIC64_OPS(add, i, add, +)
240ATOMIC64_OPS(sub, -i, add, +)
241
242#define arch_atomic64_add_return		arch_atomic64_add_return
243#define arch_atomic64_add_return_acquire	arch_atomic64_add_return
244#define arch_atomic64_add_return_release	arch_atomic64_add_return
245#define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
246#define arch_atomic64_sub_return		arch_atomic64_sub_return
247#define arch_atomic64_sub_return_acquire	arch_atomic64_sub_return
248#define arch_atomic64_sub_return_release	arch_atomic64_sub_return
249#define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
250#define arch_atomic64_fetch_add			arch_atomic64_fetch_add
251#define arch_atomic64_fetch_add_acquire		arch_atomic64_fetch_add
252#define arch_atomic64_fetch_add_release		arch_atomic64_fetch_add
253#define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
254#define arch_atomic64_fetch_sub			arch_atomic64_fetch_sub
255#define arch_atomic64_fetch_sub_acquire		arch_atomic64_fetch_sub
256#define arch_atomic64_fetch_sub_release		arch_atomic64_fetch_sub
257#define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
258
259#undef ATOMIC64_OPS
260
261#define ATOMIC64_OPS(op, I, asm_op)					      \
262	ATOMIC64_OP(op, I, asm_op)					      \
263	ATOMIC64_FETCH_OP(op, I, asm_op, _db,         )			      \
264	ATOMIC64_FETCH_OP(op, I, asm_op,    , _relaxed)
265
266ATOMIC64_OPS(and, i, and)
267ATOMIC64_OPS(or, i, or)
268ATOMIC64_OPS(xor, i, xor)
269
270#define arch_atomic64_fetch_and		arch_atomic64_fetch_and
271#define arch_atomic64_fetch_and_acquire	arch_atomic64_fetch_and
272#define arch_atomic64_fetch_and_release	arch_atomic64_fetch_and
273#define arch_atomic64_fetch_and_relaxed	arch_atomic64_fetch_and_relaxed
274#define arch_atomic64_fetch_or		arch_atomic64_fetch_or
275#define arch_atomic64_fetch_or_acquire	arch_atomic64_fetch_or
276#define arch_atomic64_fetch_or_release	arch_atomic64_fetch_or
277#define arch_atomic64_fetch_or_relaxed	arch_atomic64_fetch_or_relaxed
278#define arch_atomic64_fetch_xor		arch_atomic64_fetch_xor
279#define arch_atomic64_fetch_xor_acquire	arch_atomic64_fetch_xor
280#define arch_atomic64_fetch_xor_release	arch_atomic64_fetch_xor
281#define arch_atomic64_fetch_xor_relaxed	arch_atomic64_fetch_xor_relaxed
282
283#undef ATOMIC64_OPS
284#undef ATOMIC64_FETCH_OP
285#undef ATOMIC64_OP_RETURN
286#undef ATOMIC64_OP
287
288static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
289{
290       long prev, rc;
291
292	__asm__ __volatile__ (
293		"0:	ll.d	%[p],  %[c]\n"
294		"	beq	%[p],  %[u], 1f\n"
295		"	add.d	%[rc], %[p], %[a]\n"
296		"	sc.d	%[rc], %[c]\n"
297		"	beqz	%[rc], 0b\n"
298		"	b	2f\n"
299		"1:\n"
300		__WEAK_LLSC_MB
301		"2:\n"
302		: [p]"=&r" (prev), [rc]"=&r" (rc),
303		  [c] "=ZB" (v->counter)
304		: [a]"r" (a), [u]"r" (u)
305		: "memory");
306
307	return prev;
308}
309#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
310
 
 
 
 
 
 
 
 
311static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
312{
313	long result;
314	long temp;
315
316	if (__builtin_constant_p(i)) {
317		__asm__ __volatile__(
318		"1:	ll.d	%1, %2	# atomic64_sub_if_positive	\n"
319		"	addi.d	%0, %1, %3				\n"
320		"	move	%1, %0					\n"
321		"	bltz	%0, 2f					\n"
322		"	sc.d	%1, %2					\n"
323		"	beqz	%1, 1b					\n"
324		"2:							\n"
325		__WEAK_LLSC_MB
326		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
327		: "I" (-i));
328	} else {
329		__asm__ __volatile__(
330		"1:	ll.d	%1, %2	# atomic64_sub_if_positive	\n"
331		"	sub.d	%0, %1, %3				\n"
332		"	move	%1, %0					\n"
333		"	bltz	%0, 2f					\n"
334		"	sc.d	%1, %2					\n"
335		"	beqz	%1, 1b					\n"
336		"2:							\n"
337		__WEAK_LLSC_MB
338		: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
339		: "r" (i));
340	}
341
342	return result;
343}
344
 
 
 
 
 
 
 
 
345#define arch_atomic64_dec_if_positive(v)	arch_atomic64_sub_if_positive(1, v)
346
347#endif /* CONFIG_64BIT */
348
349#endif /* _ASM_ATOMIC_H */