Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 
 
 
 
  4 */
  5
  6#ifndef _ASM_ARC_ATOMIC_H
  7#define _ASM_ARC_ATOMIC_H
  8
  9#ifndef __ASSEMBLY__
 10
 11#include <linux/types.h>
 12#include <linux/compiler.h>
 13#include <asm/cmpxchg.h>
 14#include <asm/barrier.h>
 15#include <asm/smp.h>
 16
 17#define arch_atomic_read(v)  READ_ONCE((v)->counter)
 
 
 
 18
 19#ifdef CONFIG_ARC_HAS_LLSC
 20
 21#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 22
 23#define ATOMIC_OP(op, c_op, asm_op)					\
 24static inline void arch_atomic_##op(int i, atomic_t *v)			\
 25{									\
 26	unsigned int val;						\
 27									\
 28	__asm__ __volatile__(						\
 29	"1:	llock   %[val], [%[ctr]]		\n"		\
 30	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 31	"	scond   %[val], [%[ctr]]		\n"		\
 32	"	bnz     1b				\n"		\
 33	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
 34	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
 35	  [i]	"ir"	(i)						\
 36	: "cc");							\
 37}									\
 38
 39#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 40static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
 41{									\
 42	unsigned int val;						\
 43									\
 44	/*								\
 45	 * Explicit full memory barrier needed before/after as		\
 46	 * LLOCK/SCOND themselves don't provide any such semantics	\
 47	 */								\
 48	smp_mb();							\
 49									\
 50	__asm__ __volatile__(						\
 51	"1:	llock   %[val], [%[ctr]]		\n"		\
 52	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 53	"	scond   %[val], [%[ctr]]		\n"		\
 54	"	bnz     1b				\n"		\
 55	: [val]	"=&r"	(val)						\
 56	: [ctr]	"r"	(&v->counter),					\
 57	  [i]	"ir"	(i)						\
 58	: "cc");							\
 59									\
 60	smp_mb();							\
 61									\
 62	return val;							\
 63}
 64
 65#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 66static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
 67{									\
 68	unsigned int val, orig;						\
 69									\
 70	/*								\
 71	 * Explicit full memory barrier needed before/after as		\
 72	 * LLOCK/SCOND themselves don't provide any such semantics	\
 73	 */								\
 74	smp_mb();							\
 75									\
 76	__asm__ __volatile__(						\
 77	"1:	llock   %[orig], [%[ctr]]		\n"		\
 78	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
 79	"	scond   %[val], [%[ctr]]		\n"		\
 80	"	bnz     1b				\n"		\
 81	: [val]	"=&r"	(val),						\
 82	  [orig] "=&r" (orig)						\
 83	: [ctr]	"r"	(&v->counter),					\
 84	  [i]	"ir"	(i)						\
 85	: "cc");							\
 86									\
 87	smp_mb();							\
 88									\
 89	return orig;							\
 90}
 91
 92#else	/* !CONFIG_ARC_HAS_LLSC */
 93
 94#ifndef CONFIG_SMP
 95
 96 /* violating atomic_xxx API locking protocol in UP for optimization sake */
 97#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 98
 99#else
100
101static inline void arch_atomic_set(atomic_t *v, int i)
102{
103	/*
104	 * Independent of hardware support, all of the atomic_xxx() APIs need
105	 * to follow the same locking rules to make sure that a "hardware"
106	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
107	 * sequence
108	 *
109	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
110	 * requires the locking.
111	 */
112	unsigned long flags;
113
114	atomic_ops_lock(flags);
115	WRITE_ONCE(v->counter, i);
116	atomic_ops_unlock(flags);
117}
118
119#define arch_atomic_set_release(v, i)	arch_atomic_set((v), (i))
120
121#endif
122
123/*
124 * Non hardware assisted Atomic-R-M-W
125 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
126 */
127
128#define ATOMIC_OP(op, c_op, asm_op)					\
129static inline void arch_atomic_##op(int i, atomic_t *v)			\
130{									\
131	unsigned long flags;						\
132									\
133	atomic_ops_lock(flags);						\
134	v->counter c_op i;						\
135	atomic_ops_unlock(flags);					\
136}
137
138#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
139static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
140{									\
141	unsigned long flags;						\
142	unsigned long temp;						\
143									\
144	/*								\
145	 * spin lock/unlock provides the needed smp_mb() before/after	\
146	 */								\
147	atomic_ops_lock(flags);						\
148	temp = v->counter;						\
149	temp c_op i;							\
150	v->counter = temp;						\
151	atomic_ops_unlock(flags);					\
152									\
153	return temp;							\
154}
155
156#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
157static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
158{									\
159	unsigned long flags;						\
160	unsigned long orig;						\
161									\
162	/*								\
163	 * spin lock/unlock provides the needed smp_mb() before/after	\
164	 */								\
165	atomic_ops_lock(flags);						\
166	orig = v->counter;						\
167	v->counter c_op i;						\
168	atomic_ops_unlock(flags);					\
169									\
170	return orig;							\
171}
172
173#endif /* !CONFIG_ARC_HAS_LLSC */
174
175#define ATOMIC_OPS(op, c_op, asm_op)					\
176	ATOMIC_OP(op, c_op, asm_op)					\
177	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
178	ATOMIC_FETCH_OP(op, c_op, asm_op)
179
180ATOMIC_OPS(add, +=, add)
181ATOMIC_OPS(sub, -=, sub)
182
 
 
183#undef ATOMIC_OPS
184#define ATOMIC_OPS(op, c_op, asm_op)					\
185	ATOMIC_OP(op, c_op, asm_op)					\
186	ATOMIC_FETCH_OP(op, c_op, asm_op)
187
188ATOMIC_OPS(and, &=, and)
189ATOMIC_OPS(andnot, &= ~, bic)
190ATOMIC_OPS(or, |=, or)
191ATOMIC_OPS(xor, ^=, xor)
192
193#define arch_atomic_andnot		arch_atomic_andnot
194#define arch_atomic_fetch_andnot	arch_atomic_fetch_andnot
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
196#undef ATOMIC_OPS
197#undef ATOMIC_FETCH_OP
198#undef ATOMIC_OP_RETURN
199#undef ATOMIC_OP
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201#ifdef CONFIG_GENERIC_ATOMIC64
202
203#include <asm-generic/atomic64.h>
204
205#else	/* Kconfig ensures this is only enabled with needed h/w assist */
206
207/*
208 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
209 *  - The address HAS to be 64-bit aligned
210 *  - There are 2 semantics involved here:
211 *    = exclusive implies no interim update between load/store to same addr
212 *    = both words are observed/updated together: this is guaranteed even
213 *      for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
214 *      is NOT required to use LLOCKD+SCONDD, STD suffices
215 */
216
217typedef struct {
218	s64 __aligned(8) counter;
219} atomic64_t;
220
221#define ATOMIC64_INIT(a) { (a) }
222
223static inline s64 arch_atomic64_read(const atomic64_t *v)
224{
225	s64 val;
226
227	__asm__ __volatile__(
228	"	ldd   %0, [%1]	\n"
229	: "=r"(val)
230	: "r"(&v->counter));
231
232	return val;
233}
234
235static inline void arch_atomic64_set(atomic64_t *v, s64 a)
236{
237	/*
238	 * This could have been a simple assignment in "C" but would need
239	 * explicit volatile. Otherwise gcc optimizers could elide the store
240	 * which borked atomic64 self-test
241	 * In the inline asm version, memory clobber needed for exact same
242	 * reason, to tell gcc about the store.
243	 *
244	 * This however is not needed for sibling atomic64_add() etc since both
245	 * load/store are explicitly done in inline asm. As long as API is used
246	 * for each access, gcc has no way to optimize away any load/store
247	 */
248	__asm__ __volatile__(
249	"	std   %0, [%1]	\n"
250	:
251	: "r"(a), "r"(&v->counter)
252	: "memory");
253}
254
255#define ATOMIC64_OP(op, op1, op2)					\
256static inline void arch_atomic64_##op(s64 a, atomic64_t *v)		\
257{									\
258	s64 val;							\
259									\
260	__asm__ __volatile__(						\
261	"1:				\n"				\
262	"	llockd  %0, [%1]	\n"				\
263	"	" #op1 " %L0, %L0, %L2	\n"				\
264	"	" #op2 " %H0, %H0, %H2	\n"				\
265	"	scondd   %0, [%1]	\n"				\
266	"	bnz     1b		\n"				\
267	: "=&r"(val)							\
268	: "r"(&v->counter), "ir"(a)					\
269	: "cc");							\
270}									\
271
272#define ATOMIC64_OP_RETURN(op, op1, op2)		        	\
273static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v)	\
274{									\
275	s64 val;							\
276									\
277	smp_mb();							\
278									\
279	__asm__ __volatile__(						\
280	"1:				\n"				\
281	"	llockd   %0, [%1]	\n"				\
282	"	" #op1 " %L0, %L0, %L2	\n"				\
283	"	" #op2 " %H0, %H0, %H2	\n"				\
284	"	scondd   %0, [%1]	\n"				\
285	"	bnz     1b		\n"				\
286	: [val] "=&r"(val)						\
287	: "r"(&v->counter), "ir"(a)					\
288	: "cc");	/* memory clobber comes from smp_mb() */	\
289									\
290	smp_mb();							\
291									\
292	return val;							\
293}
294
295#define ATOMIC64_FETCH_OP(op, op1, op2)		        		\
296static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v)	\
297{									\
298	s64 val, orig;							\
299									\
300	smp_mb();							\
301									\
302	__asm__ __volatile__(						\
303	"1:				\n"				\
304	"	llockd   %0, [%2]	\n"				\
305	"	" #op1 " %L1, %L0, %L3	\n"				\
306	"	" #op2 " %H1, %H0, %H3	\n"				\
307	"	scondd   %1, [%2]	\n"				\
308	"	bnz     1b		\n"				\
309	: "=&r"(orig), "=&r"(val)					\
310	: "r"(&v->counter), "ir"(a)					\
311	: "cc");	/* memory clobber comes from smp_mb() */	\
312									\
313	smp_mb();							\
314									\
315	return orig;							\
316}
317
318#define ATOMIC64_OPS(op, op1, op2)					\
319	ATOMIC64_OP(op, op1, op2)					\
320	ATOMIC64_OP_RETURN(op, op1, op2)				\
321	ATOMIC64_FETCH_OP(op, op1, op2)
322
 
 
323ATOMIC64_OPS(add, add.f, adc)
324ATOMIC64_OPS(sub, sub.f, sbc)
325ATOMIC64_OPS(and, and, and)
326ATOMIC64_OPS(andnot, bic, bic)
327ATOMIC64_OPS(or, or, or)
328ATOMIC64_OPS(xor, xor, xor)
329
330#define arch_atomic64_andnot		arch_atomic64_andnot
331#define arch_atomic64_fetch_andnot	arch_atomic64_fetch_andnot
332
333#undef ATOMIC64_OPS
334#undef ATOMIC64_FETCH_OP
335#undef ATOMIC64_OP_RETURN
336#undef ATOMIC64_OP
337
338static inline s64
339arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
340{
341	s64 prev;
342
343	smp_mb();
344
345	__asm__ __volatile__(
346	"1:	llockd  %0, [%1]	\n"
347	"	brne    %L0, %L2, 2f	\n"
348	"	brne    %H0, %H2, 2f	\n"
349	"	scondd  %3, [%1]	\n"
350	"	bnz     1b		\n"
351	"2:				\n"
352	: "=&r"(prev)
353	: "r"(ptr), "ir"(expected), "r"(new)
354	: "cc");	/* memory clobber comes from smp_mb() */
355
356	smp_mb();
357
358	return prev;
359}
360
361static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
362{
363	s64 prev;
364
365	smp_mb();
366
367	__asm__ __volatile__(
368	"1:	llockd  %0, [%1]	\n"
369	"	scondd  %2, [%1]	\n"
370	"	bnz     1b		\n"
371	"2:				\n"
372	: "=&r"(prev)
373	: "r"(ptr), "r"(new)
374	: "cc");	/* memory clobber comes from smp_mb() */
375
376	smp_mb();
377
378	return prev;
379}
380
381/**
382 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
383 * @v: pointer of type atomic64_t
384 *
385 * The function returns the old value of *v minus 1, even if
386 * the atomic variable, v, was not decremented.
387 */
388
389static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
390{
391	s64 val;
392
393	smp_mb();
394
395	__asm__ __volatile__(
396	"1:	llockd  %0, [%1]	\n"
397	"	sub.f   %L0, %L0, 1	# w0 - 1, set C on borrow\n"
398	"	sub.c   %H0, %H0, 1	# if C set, w1 - 1\n"
399	"	brlt    %H0, 0, 2f	\n"
400	"	scondd  %0, [%1]	\n"
401	"	bnz     1b		\n"
402	"2:				\n"
403	: "=&r"(val)
404	: "r"(&v->counter)
405	: "cc");	/* memory clobber comes from smp_mb() */
406
407	smp_mb();
408
409	return val;
410}
411#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
412
413/**
414 * arch_atomic64_fetch_add_unless - add unless the number is a given value
415 * @v: pointer of type atomic64_t
416 * @a: the amount to add to v...
417 * @u: ...unless v is equal to u.
418 *
419 * Atomically adds @a to @v, if it was not @u.
420 * Returns the old value of @v
421 */
422static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
423{
424	s64 old, temp;
 
425
426	smp_mb();
427
428	__asm__ __volatile__(
429	"1:	llockd  %0, [%2]	\n"
 
430	"	brne	%L0, %L4, 2f	# continue to add since v != u \n"
431	"	breq.d	%H0, %H4, 3f	# return since v == u \n"
 
432	"2:				\n"
433	"	add.f   %L1, %L0, %L3	\n"
434	"	adc     %H1, %H0, %H3	\n"
435	"	scondd  %1, [%2]	\n"
436	"	bnz     1b		\n"
437	"3:				\n"
438	: "=&r"(old), "=&r" (temp)
439	: "r"(&v->counter), "r"(a), "r"(u)
440	: "cc");	/* memory clobber comes from smp_mb() */
441
442	smp_mb();
443
444	return old;
445}
446#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
 
 
 
 
 
 
 
 
 
447
448#endif	/* !CONFIG_GENERIC_ATOMIC64 */
449
450#endif	/* !__ASSEMBLY__ */
451
452#endif
v4.10.11
 
  1/*
  2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
  9#ifndef _ASM_ARC_ATOMIC_H
 10#define _ASM_ARC_ATOMIC_H
 11
 12#ifndef __ASSEMBLY__
 13
 14#include <linux/types.h>
 15#include <linux/compiler.h>
 16#include <asm/cmpxchg.h>
 17#include <asm/barrier.h>
 18#include <asm/smp.h>
 19
 20#ifndef CONFIG_ARC_PLAT_EZNPS
 21
 22#define atomic_read(v)  READ_ONCE((v)->counter)
 23#define ATOMIC_INIT(i)	{ (i) }
 24
 25#ifdef CONFIG_ARC_HAS_LLSC
 26
 27#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 28
 29#define ATOMIC_OP(op, c_op, asm_op)					\
 30static inline void atomic_##op(int i, atomic_t *v)			\
 31{									\
 32	unsigned int val;						\
 33									\
 34	__asm__ __volatile__(						\
 35	"1:	llock   %[val], [%[ctr]]		\n"		\
 36	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 37	"	scond   %[val], [%[ctr]]		\n"		\
 38	"	bnz     1b				\n"		\
 39	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
 40	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
 41	  [i]	"ir"	(i)						\
 42	: "cc");							\
 43}									\
 44
 45#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 46static inline int atomic_##op##_return(int i, atomic_t *v)		\
 47{									\
 48	unsigned int val;						\
 49									\
 50	/*								\
 51	 * Explicit full memory barrier needed before/after as		\
 52	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
 53	 */								\
 54	smp_mb();							\
 55									\
 56	__asm__ __volatile__(						\
 57	"1:	llock   %[val], [%[ctr]]		\n"		\
 58	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 59	"	scond   %[val], [%[ctr]]		\n"		\
 60	"	bnz     1b				\n"		\
 61	: [val]	"=&r"	(val)						\
 62	: [ctr]	"r"	(&v->counter),					\
 63	  [i]	"ir"	(i)						\
 64	: "cc");							\
 65									\
 66	smp_mb();							\
 67									\
 68	return val;							\
 69}
 70
 71#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 72static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 73{									\
 74	unsigned int val, orig;						\
 75									\
 76	/*								\
 77	 * Explicit full memory barrier needed before/after as		\
 78	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
 79	 */								\
 80	smp_mb();							\
 81									\
 82	__asm__ __volatile__(						\
 83	"1:	llock   %[orig], [%[ctr]]		\n"		\
 84	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
 85	"	scond   %[val], [%[ctr]]		\n"		\
 86	"						\n"		\
 87	: [val]	"=&r"	(val),						\
 88	  [orig] "=&r" (orig)						\
 89	: [ctr]	"r"	(&v->counter),					\
 90	  [i]	"ir"	(i)						\
 91	: "cc");							\
 92									\
 93	smp_mb();							\
 94									\
 95	return orig;							\
 96}
 97
 98#else	/* !CONFIG_ARC_HAS_LLSC */
 99
100#ifndef CONFIG_SMP
101
102 /* violating atomic_xxx API locking protocol in UP for optimization sake */
103#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
104
105#else
106
107static inline void atomic_set(atomic_t *v, int i)
108{
109	/*
110	 * Independent of hardware support, all of the atomic_xxx() APIs need
111	 * to follow the same locking rules to make sure that a "hardware"
112	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
113	 * sequence
114	 *
115	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
116	 * requires the locking.
117	 */
118	unsigned long flags;
119
120	atomic_ops_lock(flags);
121	WRITE_ONCE(v->counter, i);
122	atomic_ops_unlock(flags);
123}
124
 
 
125#endif
126
127/*
128 * Non hardware assisted Atomic-R-M-W
129 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
130 */
131
132#define ATOMIC_OP(op, c_op, asm_op)					\
133static inline void atomic_##op(int i, atomic_t *v)			\
134{									\
135	unsigned long flags;						\
136									\
137	atomic_ops_lock(flags);						\
138	v->counter c_op i;						\
139	atomic_ops_unlock(flags);					\
140}
141
142#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
143static inline int atomic_##op##_return(int i, atomic_t *v)		\
144{									\
145	unsigned long flags;						\
146	unsigned long temp;						\
147									\
148	/*								\
149	 * spin lock/unlock provides the needed smp_mb() before/after	\
150	 */								\
151	atomic_ops_lock(flags);						\
152	temp = v->counter;						\
153	temp c_op i;							\
154	v->counter = temp;						\
155	atomic_ops_unlock(flags);					\
156									\
157	return temp;							\
158}
159
160#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
161static inline int atomic_fetch_##op(int i, atomic_t *v)			\
162{									\
163	unsigned long flags;						\
164	unsigned long orig;						\
165									\
166	/*								\
167	 * spin lock/unlock provides the needed smp_mb() before/after	\
168	 */								\
169	atomic_ops_lock(flags);						\
170	orig = v->counter;						\
171	v->counter c_op i;						\
172	atomic_ops_unlock(flags);					\
173									\
174	return orig;							\
175}
176
177#endif /* !CONFIG_ARC_HAS_LLSC */
178
179#define ATOMIC_OPS(op, c_op, asm_op)					\
180	ATOMIC_OP(op, c_op, asm_op)					\
181	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
182	ATOMIC_FETCH_OP(op, c_op, asm_op)
183
184ATOMIC_OPS(add, +=, add)
185ATOMIC_OPS(sub, -=, sub)
186
187#define atomic_andnot atomic_andnot
188
189#undef ATOMIC_OPS
190#define ATOMIC_OPS(op, c_op, asm_op)					\
191	ATOMIC_OP(op, c_op, asm_op)					\
192	ATOMIC_FETCH_OP(op, c_op, asm_op)
193
194ATOMIC_OPS(and, &=, and)
195ATOMIC_OPS(andnot, &= ~, bic)
196ATOMIC_OPS(or, |=, or)
197ATOMIC_OPS(xor, ^=, xor)
198
199#else /* CONFIG_ARC_PLAT_EZNPS */
200
201static inline int atomic_read(const atomic_t *v)
202{
203	int temp;
204
205	__asm__ __volatile__(
206	"	ld.di %0, [%1]"
207	: "=r"(temp)
208	: "r"(&v->counter)
209	: "memory");
210	return temp;
211}
212
213static inline void atomic_set(atomic_t *v, int i)
214{
215	__asm__ __volatile__(
216	"	st.di %0,[%1]"
217	:
218	: "r"(i), "r"(&v->counter)
219	: "memory");
220}
221
222#define ATOMIC_OP(op, c_op, asm_op)					\
223static inline void atomic_##op(int i, atomic_t *v)			\
224{									\
225	__asm__ __volatile__(						\
226	"	mov r2, %0\n"						\
227	"	mov r3, %1\n"						\
228	"       .word %2\n"						\
229	:								\
230	: "r"(i), "r"(&v->counter), "i"(asm_op)				\
231	: "r2", "r3", "memory");					\
232}									\
233
234#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
235static inline int atomic_##op##_return(int i, atomic_t *v)		\
236{									\
237	unsigned int temp = i;						\
238									\
239	/* Explicit full memory barrier needed before/after */		\
240	smp_mb();							\
241									\
242	__asm__ __volatile__(						\
243	"	mov r2, %0\n"						\
244	"	mov r3, %1\n"						\
245	"       .word %2\n"						\
246	"	mov %0, r2"						\
247	: "+r"(temp)							\
248	: "r"(&v->counter), "i"(asm_op)					\
249	: "r2", "r3", "memory");					\
250									\
251	smp_mb();							\
252									\
253	temp c_op i;							\
254									\
255	return temp;							\
256}
257
258#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
259static inline int atomic_fetch_##op(int i, atomic_t *v)			\
260{									\
261	unsigned int temp = i;						\
262									\
263	/* Explicit full memory barrier needed before/after */		\
264	smp_mb();							\
265									\
266	__asm__ __volatile__(						\
267	"	mov r2, %0\n"						\
268	"	mov r3, %1\n"						\
269	"       .word %2\n"						\
270	"	mov %0, r2"						\
271	: "+r"(temp)							\
272	: "r"(&v->counter), "i"(asm_op)					\
273	: "r2", "r3", "memory");					\
274									\
275	smp_mb();							\
276									\
277	return temp;							\
278}
279
280#define ATOMIC_OPS(op, c_op, asm_op)					\
281	ATOMIC_OP(op, c_op, asm_op)					\
282	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
283	ATOMIC_FETCH_OP(op, c_op, asm_op)
284
285ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
286#define atomic_sub(i, v) atomic_add(-(i), (v))
287#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
288#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
289
290#undef ATOMIC_OPS
291#define ATOMIC_OPS(op, c_op, asm_op)					\
292	ATOMIC_OP(op, c_op, asm_op)					\
293	ATOMIC_FETCH_OP(op, c_op, asm_op)
294
295ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
296#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
297#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
298ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
299ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
300
301#endif /* CONFIG_ARC_PLAT_EZNPS */
302
303#undef ATOMIC_OPS
304#undef ATOMIC_FETCH_OP
305#undef ATOMIC_OP_RETURN
306#undef ATOMIC_OP
307
308/**
309 * __atomic_add_unless - add unless the number is a given value
310 * @v: pointer of type atomic_t
311 * @a: the amount to add to v...
312 * @u: ...unless v is equal to u.
313 *
314 * Atomically adds @a to @v, so long as it was not @u.
315 * Returns the old value of @v
316 */
317#define __atomic_add_unless(v, a, u)					\
318({									\
319	int c, old;							\
320									\
321	/*								\
322	 * Explicit full memory barrier needed before/after as		\
323	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
324	 */								\
325	smp_mb();							\
326									\
327	c = atomic_read(v);						\
328	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
329		c = old;						\
330									\
331	smp_mb();							\
332									\
333	c;								\
334})
335
336#define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
337
338#define atomic_inc(v)			atomic_add(1, v)
339#define atomic_dec(v)			atomic_sub(1, v)
340
341#define atomic_inc_and_test(v)		(atomic_add_return(1, v) == 0)
342#define atomic_dec_and_test(v)		(atomic_sub_return(1, v) == 0)
343#define atomic_inc_return(v)		atomic_add_return(1, (v))
344#define atomic_dec_return(v)		atomic_sub_return(1, (v))
345#define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)
346
347#define atomic_add_negative(i, v)	(atomic_add_return(i, v) < 0)
348
349
350#ifdef CONFIG_GENERIC_ATOMIC64
351
352#include <asm-generic/atomic64.h>
353
354#else	/* Kconfig ensures this is only enabled with needed h/w assist */
355
356/*
357 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
358 *  - The address HAS to be 64-bit aligned
359 *  - There are 2 semantics involved here:
360 *    = exclusive implies no interim update between load/store to same addr
361 *    = both words are observed/updated together: this is guaranteed even
362 *      for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
363 *      is NOT required to use LLOCKD+SCONDD, STD suffices
364 */
365
366typedef struct {
367	aligned_u64 counter;
368} atomic64_t;
369
370#define ATOMIC64_INIT(a) { (a) }
371
372static inline long long atomic64_read(const atomic64_t *v)
373{
374	unsigned long long val;
375
376	__asm__ __volatile__(
377	"	ldd   %0, [%1]	\n"
378	: "=r"(val)
379	: "r"(&v->counter));
380
381	return val;
382}
383
384static inline void atomic64_set(atomic64_t *v, long long a)
385{
386	/*
387	 * This could have been a simple assignment in "C" but would need
388	 * explicit volatile. Otherwise gcc optimizers could elide the store
389	 * which borked atomic64 self-test
390	 * In the inline asm version, memory clobber needed for exact same
391	 * reason, to tell gcc about the store.
392	 *
393	 * This however is not needed for sibling atomic64_add() etc since both
394	 * load/store are explicitly done in inline asm. As long as API is used
395	 * for each access, gcc has no way to optimize away any load/store
396	 */
397	__asm__ __volatile__(
398	"	std   %0, [%1]	\n"
399	:
400	: "r"(a), "r"(&v->counter)
401	: "memory");
402}
403
404#define ATOMIC64_OP(op, op1, op2)					\
405static inline void atomic64_##op(long long a, atomic64_t *v)		\
406{									\
407	unsigned long long val;						\
408									\
409	__asm__ __volatile__(						\
410	"1:				\n"				\
411	"	llockd  %0, [%1]	\n"				\
412	"	" #op1 " %L0, %L0, %L2	\n"				\
413	"	" #op2 " %H0, %H0, %H2	\n"				\
414	"	scondd   %0, [%1]	\n"				\
415	"	bnz     1b		\n"				\
416	: "=&r"(val)							\
417	: "r"(&v->counter), "ir"(a)					\
418	: "cc");						\
419}									\
420
421#define ATOMIC64_OP_RETURN(op, op1, op2)		        	\
422static inline long long atomic64_##op##_return(long long a, atomic64_t *v)	\
423{									\
424	unsigned long long val;						\
425									\
426	smp_mb();							\
427									\
428	__asm__ __volatile__(						\
429	"1:				\n"				\
430	"	llockd   %0, [%1]	\n"				\
431	"	" #op1 " %L0, %L0, %L2	\n"				\
432	"	" #op2 " %H0, %H0, %H2	\n"				\
433	"	scondd   %0, [%1]	\n"				\
434	"	bnz     1b		\n"				\
435	: [val] "=&r"(val)						\
436	: "r"(&v->counter), "ir"(a)					\
437	: "cc");	/* memory clobber comes from smp_mb() */	\
438									\
439	smp_mb();							\
440									\
441	return val;							\
442}
443
444#define ATOMIC64_FETCH_OP(op, op1, op2)		        		\
445static inline long long atomic64_fetch_##op(long long a, atomic64_t *v)	\
446{									\
447	unsigned long long val, orig;					\
448									\
449	smp_mb();							\
450									\
451	__asm__ __volatile__(						\
452	"1:				\n"				\
453	"	llockd   %0, [%2]	\n"				\
454	"	" #op1 " %L1, %L0, %L3	\n"				\
455	"	" #op2 " %H1, %H0, %H3	\n"				\
456	"	scondd   %1, [%2]	\n"				\
457	"	bnz     1b		\n"				\
458	: "=&r"(orig), "=&r"(val)					\
459	: "r"(&v->counter), "ir"(a)					\
460	: "cc");	/* memory clobber comes from smp_mb() */	\
461									\
462	smp_mb();							\
463									\
464	return orig;							\
465}
466
467#define ATOMIC64_OPS(op, op1, op2)					\
468	ATOMIC64_OP(op, op1, op2)					\
469	ATOMIC64_OP_RETURN(op, op1, op2)				\
470	ATOMIC64_FETCH_OP(op, op1, op2)
471
472#define atomic64_andnot atomic64_andnot
473
474ATOMIC64_OPS(add, add.f, adc)
475ATOMIC64_OPS(sub, sub.f, sbc)
476ATOMIC64_OPS(and, and, and)
477ATOMIC64_OPS(andnot, bic, bic)
478ATOMIC64_OPS(or, or, or)
479ATOMIC64_OPS(xor, xor, xor)
480
 
 
 
481#undef ATOMIC64_OPS
482#undef ATOMIC64_FETCH_OP
483#undef ATOMIC64_OP_RETURN
484#undef ATOMIC64_OP
485
486static inline long long
487atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
488{
489	long long prev;
490
491	smp_mb();
492
493	__asm__ __volatile__(
494	"1:	llockd  %0, [%1]	\n"
495	"	brne    %L0, %L2, 2f	\n"
496	"	brne    %H0, %H2, 2f	\n"
497	"	scondd  %3, [%1]	\n"
498	"	bnz     1b		\n"
499	"2:				\n"
500	: "=&r"(prev)
501	: "r"(ptr), "ir"(expected), "r"(new)
502	: "cc");	/* memory clobber comes from smp_mb() */
503
504	smp_mb();
505
506	return prev;
507}
508
509static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
510{
511	long long prev;
512
513	smp_mb();
514
515	__asm__ __volatile__(
516	"1:	llockd  %0, [%1]	\n"
517	"	scondd  %2, [%1]	\n"
518	"	bnz     1b		\n"
519	"2:				\n"
520	: "=&r"(prev)
521	: "r"(ptr), "r"(new)
522	: "cc");	/* memory clobber comes from smp_mb() */
523
524	smp_mb();
525
526	return prev;
527}
528
529/**
530 * atomic64_dec_if_positive - decrement by 1 if old value positive
531 * @v: pointer of type atomic64_t
532 *
533 * The function returns the old value of *v minus 1, even if
534 * the atomic variable, v, was not decremented.
535 */
536
537static inline long long atomic64_dec_if_positive(atomic64_t *v)
538{
539	long long val;
540
541	smp_mb();
542
543	__asm__ __volatile__(
544	"1:	llockd  %0, [%1]	\n"
545	"	sub.f   %L0, %L0, 1	# w0 - 1, set C on borrow\n"
546	"	sub.c   %H0, %H0, 1	# if C set, w1 - 1\n"
547	"	brlt    %H0, 0, 2f	\n"
548	"	scondd  %0, [%1]	\n"
549	"	bnz     1b		\n"
550	"2:				\n"
551	: "=&r"(val)
552	: "r"(&v->counter)
553	: "cc");	/* memory clobber comes from smp_mb() */
554
555	smp_mb();
556
557	return val;
558}
 
559
560/**
561 * atomic64_add_unless - add unless the number is a given value
562 * @v: pointer of type atomic64_t
563 * @a: the amount to add to v...
564 * @u: ...unless v is equal to u.
565 *
566 * if (v != u) { v += a; ret = 1} else {ret = 0}
567 * Returns 1 iff @v was not @u (i.e. if add actually happened)
568 */
569static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
570{
571	long long val;
572	int op_done;
573
574	smp_mb();
575
576	__asm__ __volatile__(
577	"1:	llockd  %0, [%2]	\n"
578	"	mov	%1, 1		\n"
579	"	brne	%L0, %L4, 2f	# continue to add since v != u \n"
580	"	breq.d	%H0, %H4, 3f	# return since v == u \n"
581	"	mov	%1, 0		\n"
582	"2:				\n"
583	"	add.f   %L0, %L0, %L3	\n"
584	"	adc     %H0, %H0, %H3	\n"
585	"	scondd  %0, [%2]	\n"
586	"	bnz     1b		\n"
587	"3:				\n"
588	: "=&r"(val), "=&r" (op_done)
589	: "r"(&v->counter), "r"(a), "r"(u)
590	: "cc");	/* memory clobber comes from smp_mb() */
591
592	smp_mb();
593
594	return op_done;
595}
596
597#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
598#define atomic64_inc(v)			atomic64_add(1LL, (v))
599#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
600#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
601#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
602#define atomic64_dec(v)			atomic64_sub(1LL, (v))
603#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
604#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
605#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
606
607#endif	/* !CONFIG_GENERIC_ATOMIC64 */
608
609#endif	/* !__ASSEMBLY__ */
610
611#endif