Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
  9#ifndef _ASM_ARC_ATOMIC_H
 10#define _ASM_ARC_ATOMIC_H
 11
 12#ifndef __ASSEMBLY__
 13
 14#include <linux/types.h>
 15#include <linux/compiler.h>
 16#include <asm/cmpxchg.h>
 17#include <asm/barrier.h>
 18#include <asm/smp.h>
 19
 20#define ATOMIC_INIT(i)	{ (i) }
 21
 22#ifndef CONFIG_ARC_PLAT_EZNPS
 23
 24#define atomic_read(v)  READ_ONCE((v)->counter)
 25
 26#ifdef CONFIG_ARC_HAS_LLSC
 27
 28#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 29
 30#define ATOMIC_OP(op, c_op, asm_op)					\
 31static inline void atomic_##op(int i, atomic_t *v)			\
 32{									\
 33	unsigned int val;						\
 34									\
 35	__asm__ __volatile__(						\
 36	"1:	llock   %[val], [%[ctr]]		\n"		\
 37	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 38	"	scond   %[val], [%[ctr]]		\n"		\
 39	"	bnz     1b				\n"		\
 40	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
 41	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
 42	  [i]	"ir"	(i)						\
 43	: "cc");							\
 44}									\
 45
 46#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 47static inline int atomic_##op##_return(int i, atomic_t *v)		\
 48{									\
 49	unsigned int val;						\
 50									\
 51	/*								\
 52	 * Explicit full memory barrier needed before/after as		\
 53	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
 54	 */								\
 55	smp_mb();							\
 56									\
 57	__asm__ __volatile__(						\
 58	"1:	llock   %[val], [%[ctr]]		\n"		\
 59	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 60	"	scond   %[val], [%[ctr]]		\n"		\
 61	"	bnz     1b				\n"		\
 62	: [val]	"=&r"	(val)						\
 63	: [ctr]	"r"	(&v->counter),					\
 64	  [i]	"ir"	(i)						\
 65	: "cc");							\
 66									\
 67	smp_mb();							\
 68									\
 69	return val;							\
 70}
 71
 72#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 73static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 74{									\
 75	unsigned int val, orig;						\
 76									\
 77	/*								\
 78	 * Explicit full memory barrier needed before/after as		\
 79	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
 80	 */								\
 81	smp_mb();							\
 82									\
 83	__asm__ __volatile__(						\
 84	"1:	llock   %[orig], [%[ctr]]		\n"		\
 85	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
 86	"	scond   %[val], [%[ctr]]		\n"		\
 87	"						\n"		\
 88	: [val]	"=&r"	(val),						\
 89	  [orig] "=&r" (orig)						\
 90	: [ctr]	"r"	(&v->counter),					\
 91	  [i]	"ir"	(i)						\
 92	: "cc");							\
 93									\
 94	smp_mb();							\
 95									\
 96	return orig;							\
 97}
 98
 99#else	/* !CONFIG_ARC_HAS_LLSC */
100
101#ifndef CONFIG_SMP
102
103 /* violating atomic_xxx API locking protocol in UP for optimization sake */
104#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
105
106#else
107
108static inline void atomic_set(atomic_t *v, int i)
109{
110	/*
111	 * Independent of hardware support, all of the atomic_xxx() APIs need
112	 * to follow the same locking rules to make sure that a "hardware"
113	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
114	 * sequence
115	 *
116	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
117	 * requires the locking.
118	 */
119	unsigned long flags;
120
121	atomic_ops_lock(flags);
122	WRITE_ONCE(v->counter, i);
123	atomic_ops_unlock(flags);
124}
125
126#define atomic_set_release(v, i)	atomic_set((v), (i))
127
128#endif
129
130/*
131 * Non hardware assisted Atomic-R-M-W
132 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
133 */
134
135#define ATOMIC_OP(op, c_op, asm_op)					\
136static inline void atomic_##op(int i, atomic_t *v)			\
137{									\
138	unsigned long flags;						\
139									\
140	atomic_ops_lock(flags);						\
141	v->counter c_op i;						\
142	atomic_ops_unlock(flags);					\
143}
144
145#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
146static inline int atomic_##op##_return(int i, atomic_t *v)		\
147{									\
148	unsigned long flags;						\
149	unsigned long temp;						\
150									\
151	/*								\
152	 * spin lock/unlock provides the needed smp_mb() before/after	\
153	 */								\
154	atomic_ops_lock(flags);						\
155	temp = v->counter;						\
156	temp c_op i;							\
157	v->counter = temp;						\
158	atomic_ops_unlock(flags);					\
159									\
160	return temp;							\
161}
162
163#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
164static inline int atomic_fetch_##op(int i, atomic_t *v)			\
165{									\
166	unsigned long flags;						\
167	unsigned long orig;						\
168									\
169	/*								\
170	 * spin lock/unlock provides the needed smp_mb() before/after	\
171	 */								\
172	atomic_ops_lock(flags);						\
173	orig = v->counter;						\
174	v->counter c_op i;						\
175	atomic_ops_unlock(flags);					\
176									\
177	return orig;							\
178}
179
180#endif /* !CONFIG_ARC_HAS_LLSC */
181
182#define ATOMIC_OPS(op, c_op, asm_op)					\
183	ATOMIC_OP(op, c_op, asm_op)					\
184	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
185	ATOMIC_FETCH_OP(op, c_op, asm_op)
186
187ATOMIC_OPS(add, +=, add)
188ATOMIC_OPS(sub, -=, sub)
189
190#define atomic_andnot atomic_andnot
 
191
192#undef ATOMIC_OPS
193#define ATOMIC_OPS(op, c_op, asm_op)					\
194	ATOMIC_OP(op, c_op, asm_op)					\
195	ATOMIC_FETCH_OP(op, c_op, asm_op)
196
197ATOMIC_OPS(and, &=, and)
198ATOMIC_OPS(andnot, &= ~, bic)
199ATOMIC_OPS(or, |=, or)
200ATOMIC_OPS(xor, ^=, xor)
201
202#else /* CONFIG_ARC_PLAT_EZNPS */
203
204static inline int atomic_read(const atomic_t *v)
205{
206	int temp;
207
208	__asm__ __volatile__(
209	"	ld.di %0, [%1]"
210	: "=r"(temp)
211	: "r"(&v->counter)
212	: "memory");
213	return temp;
214}
215
216static inline void atomic_set(atomic_t *v, int i)
217{
218	__asm__ __volatile__(
219	"	st.di %0,[%1]"
220	:
221	: "r"(i), "r"(&v->counter)
222	: "memory");
223}
224
225#define ATOMIC_OP(op, c_op, asm_op)					\
226static inline void atomic_##op(int i, atomic_t *v)			\
227{									\
228	__asm__ __volatile__(						\
229	"	mov r2, %0\n"						\
230	"	mov r3, %1\n"						\
231	"       .word %2\n"						\
232	:								\
233	: "r"(i), "r"(&v->counter), "i"(asm_op)				\
234	: "r2", "r3", "memory");					\
235}									\
236
237#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
238static inline int atomic_##op##_return(int i, atomic_t *v)		\
239{									\
240	unsigned int temp = i;						\
241									\
242	/* Explicit full memory barrier needed before/after */		\
243	smp_mb();							\
244									\
245	__asm__ __volatile__(						\
246	"	mov r2, %0\n"						\
247	"	mov r3, %1\n"						\
248	"       .word %2\n"						\
249	"	mov %0, r2"						\
250	: "+r"(temp)							\
251	: "r"(&v->counter), "i"(asm_op)					\
252	: "r2", "r3", "memory");					\
253									\
254	smp_mb();							\
255									\
256	temp c_op i;							\
257									\
258	return temp;							\
259}
260
261#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
262static inline int atomic_fetch_##op(int i, atomic_t *v)			\
263{									\
264	unsigned int temp = i;						\
265									\
266	/* Explicit full memory barrier needed before/after */		\
267	smp_mb();							\
268									\
269	__asm__ __volatile__(						\
270	"	mov r2, %0\n"						\
271	"	mov r3, %1\n"						\
272	"       .word %2\n"						\
273	"	mov %0, r2"						\
274	: "+r"(temp)							\
275	: "r"(&v->counter), "i"(asm_op)					\
276	: "r2", "r3", "memory");					\
277									\
278	smp_mb();							\
279									\
280	return temp;							\
281}
282
283#define ATOMIC_OPS(op, c_op, asm_op)					\
284	ATOMIC_OP(op, c_op, asm_op)					\
285	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
286	ATOMIC_FETCH_OP(op, c_op, asm_op)
287
288ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
289#define atomic_sub(i, v) atomic_add(-(i), (v))
290#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
291#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
292
293#undef ATOMIC_OPS
294#define ATOMIC_OPS(op, c_op, asm_op)					\
295	ATOMIC_OP(op, c_op, asm_op)					\
296	ATOMIC_FETCH_OP(op, c_op, asm_op)
297
298ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
299#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
300#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
301ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
302ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
303
304#endif /* CONFIG_ARC_PLAT_EZNPS */
305
306#undef ATOMIC_OPS
307#undef ATOMIC_FETCH_OP
308#undef ATOMIC_OP_RETURN
309#undef ATOMIC_OP
310
311/**
312 * __atomic_add_unless - add unless the number is a given value
313 * @v: pointer of type atomic_t
314 * @a: the amount to add to v...
315 * @u: ...unless v is equal to u.
316 *
317 * Atomically adds @a to @v, so long as it was not @u.
318 * Returns the old value of @v
319 */
320#define __atomic_add_unless(v, a, u)					\
321({									\
322	int c, old;							\
323									\
324	/*								\
325	 * Explicit full memory barrier needed before/after as		\
326	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
327	 */								\
328	smp_mb();							\
329									\
330	c = atomic_read(v);						\
331	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
332		c = old;						\
333									\
334	smp_mb();							\
335									\
336	c;								\
337})
338
339#define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
340
341#define atomic_inc(v)			atomic_add(1, v)
342#define atomic_dec(v)			atomic_sub(1, v)
343
344#define atomic_inc_and_test(v)		(atomic_add_return(1, v) == 0)
345#define atomic_dec_and_test(v)		(atomic_sub_return(1, v) == 0)
346#define atomic_inc_return(v)		atomic_add_return(1, (v))
347#define atomic_dec_return(v)		atomic_sub_return(1, (v))
348#define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)
349
350#define atomic_add_negative(i, v)	(atomic_add_return(i, v) < 0)
351
352
353#ifdef CONFIG_GENERIC_ATOMIC64
354
355#include <asm-generic/atomic64.h>
356
357#else	/* Kconfig ensures this is only enabled with needed h/w assist */
358
359/*
360 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
361 *  - The address HAS to be 64-bit aligned
362 *  - There are 2 semantics involved here:
363 *    = exclusive implies no interim update between load/store to same addr
364 *    = both words are observed/updated together: this is guaranteed even
365 *      for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
366 *      is NOT required to use LLOCKD+SCONDD, STD suffices
367 */
368
369typedef struct {
370	aligned_u64 counter;
371} atomic64_t;
372
373#define ATOMIC64_INIT(a) { (a) }
374
375static inline long long atomic64_read(const atomic64_t *v)
376{
377	unsigned long long val;
378
379	__asm__ __volatile__(
380	"	ldd   %0, [%1]	\n"
381	: "=r"(val)
382	: "r"(&v->counter));
383
384	return val;
385}
386
387static inline void atomic64_set(atomic64_t *v, long long a)
388{
389	/*
390	 * This could have been a simple assignment in "C" but would need
391	 * explicit volatile. Otherwise gcc optimizers could elide the store
392	 * which borked atomic64 self-test
393	 * In the inline asm version, memory clobber needed for exact same
394	 * reason, to tell gcc about the store.
395	 *
396	 * This however is not needed for sibling atomic64_add() etc since both
397	 * load/store are explicitly done in inline asm. As long as API is used
398	 * for each access, gcc has no way to optimize away any load/store
399	 */
400	__asm__ __volatile__(
401	"	std   %0, [%1]	\n"
402	:
403	: "r"(a), "r"(&v->counter)
404	: "memory");
405}
406
407#define ATOMIC64_OP(op, op1, op2)					\
408static inline void atomic64_##op(long long a, atomic64_t *v)		\
409{									\
410	unsigned long long val;						\
411									\
412	__asm__ __volatile__(						\
413	"1:				\n"				\
414	"	llockd  %0, [%1]	\n"				\
415	"	" #op1 " %L0, %L0, %L2	\n"				\
416	"	" #op2 " %H0, %H0, %H2	\n"				\
417	"	scondd   %0, [%1]	\n"				\
418	"	bnz     1b		\n"				\
419	: "=&r"(val)							\
420	: "r"(&v->counter), "ir"(a)					\
421	: "cc");						\
422}									\
423
424#define ATOMIC64_OP_RETURN(op, op1, op2)		        	\
425static inline long long atomic64_##op##_return(long long a, atomic64_t *v)	\
426{									\
427	unsigned long long val;						\
428									\
429	smp_mb();							\
430									\
431	__asm__ __volatile__(						\
432	"1:				\n"				\
433	"	llockd   %0, [%1]	\n"				\
434	"	" #op1 " %L0, %L0, %L2	\n"				\
435	"	" #op2 " %H0, %H0, %H2	\n"				\
436	"	scondd   %0, [%1]	\n"				\
437	"	bnz     1b		\n"				\
438	: [val] "=&r"(val)						\
439	: "r"(&v->counter), "ir"(a)					\
440	: "cc");	/* memory clobber comes from smp_mb() */	\
441									\
442	smp_mb();							\
443									\
444	return val;							\
445}
446
447#define ATOMIC64_FETCH_OP(op, op1, op2)		        		\
448static inline long long atomic64_fetch_##op(long long a, atomic64_t *v)	\
449{									\
450	unsigned long long val, orig;					\
451									\
452	smp_mb();							\
453									\
454	__asm__ __volatile__(						\
455	"1:				\n"				\
456	"	llockd   %0, [%2]	\n"				\
457	"	" #op1 " %L1, %L0, %L3	\n"				\
458	"	" #op2 " %H1, %H0, %H3	\n"				\
459	"	scondd   %1, [%2]	\n"				\
460	"	bnz     1b		\n"				\
461	: "=&r"(orig), "=&r"(val)					\
462	: "r"(&v->counter), "ir"(a)					\
463	: "cc");	/* memory clobber comes from smp_mb() */	\
464									\
465	smp_mb();							\
466									\
467	return orig;							\
468}
469
470#define ATOMIC64_OPS(op, op1, op2)					\
471	ATOMIC64_OP(op, op1, op2)					\
472	ATOMIC64_OP_RETURN(op, op1, op2)				\
473	ATOMIC64_FETCH_OP(op, op1, op2)
474
475#define atomic64_andnot atomic64_andnot
 
476
477ATOMIC64_OPS(add, add.f, adc)
478ATOMIC64_OPS(sub, sub.f, sbc)
479ATOMIC64_OPS(and, and, and)
480ATOMIC64_OPS(andnot, bic, bic)
481ATOMIC64_OPS(or, or, or)
482ATOMIC64_OPS(xor, xor, xor)
483
484#undef ATOMIC64_OPS
485#undef ATOMIC64_FETCH_OP
486#undef ATOMIC64_OP_RETURN
487#undef ATOMIC64_OP
488
489static inline long long
490atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
491{
492	long long prev;
493
494	smp_mb();
495
496	__asm__ __volatile__(
497	"1:	llockd  %0, [%1]	\n"
498	"	brne    %L0, %L2, 2f	\n"
499	"	brne    %H0, %H2, 2f	\n"
500	"	scondd  %3, [%1]	\n"
501	"	bnz     1b		\n"
502	"2:				\n"
503	: "=&r"(prev)
504	: "r"(ptr), "ir"(expected), "r"(new)
505	: "cc");	/* memory clobber comes from smp_mb() */
506
507	smp_mb();
508
509	return prev;
510}
511
512static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
513{
514	long long prev;
515
516	smp_mb();
517
518	__asm__ __volatile__(
519	"1:	llockd  %0, [%1]	\n"
520	"	scondd  %2, [%1]	\n"
521	"	bnz     1b		\n"
522	"2:				\n"
523	: "=&r"(prev)
524	: "r"(ptr), "r"(new)
525	: "cc");	/* memory clobber comes from smp_mb() */
526
527	smp_mb();
528
529	return prev;
530}
531
532/**
533 * atomic64_dec_if_positive - decrement by 1 if old value positive
534 * @v: pointer of type atomic64_t
535 *
536 * The function returns the old value of *v minus 1, even if
537 * the atomic variable, v, was not decremented.
538 */
539
540static inline long long atomic64_dec_if_positive(atomic64_t *v)
541{
542	long long val;
543
544	smp_mb();
545
546	__asm__ __volatile__(
547	"1:	llockd  %0, [%1]	\n"
548	"	sub.f   %L0, %L0, 1	# w0 - 1, set C on borrow\n"
549	"	sub.c   %H0, %H0, 1	# if C set, w1 - 1\n"
550	"	brlt    %H0, 0, 2f	\n"
551	"	scondd  %0, [%1]	\n"
552	"	bnz     1b		\n"
553	"2:				\n"
554	: "=&r"(val)
555	: "r"(&v->counter)
556	: "cc");	/* memory clobber comes from smp_mb() */
557
558	smp_mb();
559
560	return val;
561}
 
562
563/**
564 * atomic64_add_unless - add unless the number is a given value
565 * @v: pointer of type atomic64_t
566 * @a: the amount to add to v...
567 * @u: ...unless v is equal to u.
568 *
569 * if (v != u) { v += a; ret = 1} else {ret = 0}
570 * Returns 1 iff @v was not @u (i.e. if add actually happened)
571 */
572static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
573{
574	long long val;
575	int op_done;
576
577	smp_mb();
578
579	__asm__ __volatile__(
580	"1:	llockd  %0, [%2]	\n"
581	"	mov	%1, 1		\n"
582	"	brne	%L0, %L4, 2f	# continue to add since v != u \n"
583	"	breq.d	%H0, %H4, 3f	# return since v == u \n"
584	"	mov	%1, 0		\n"
585	"2:				\n"
586	"	add.f   %L0, %L0, %L3	\n"
587	"	adc     %H0, %H0, %H3	\n"
588	"	scondd  %0, [%2]	\n"
589	"	bnz     1b		\n"
590	"3:				\n"
591	: "=&r"(val), "=&r" (op_done)
592	: "r"(&v->counter), "r"(a), "r"(u)
593	: "cc");	/* memory clobber comes from smp_mb() */
594
595	smp_mb();
596
597	return op_done;
598}
599
600#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
601#define atomic64_inc(v)			atomic64_add(1LL, (v))
602#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
603#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
604#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
605#define atomic64_dec(v)			atomic64_sub(1LL, (v))
606#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
607#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
608#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
609
610#endif	/* !CONFIG_GENERIC_ATOMIC64 */
611
612#endif	/* !__ASSEMBLY__ */
613
614#endif
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 
 
 
 
  4 */
  5
  6#ifndef _ASM_ARC_ATOMIC_H
  7#define _ASM_ARC_ATOMIC_H
  8
  9#ifndef __ASSEMBLY__
 10
 11#include <linux/types.h>
 12#include <linux/compiler.h>
 13#include <asm/cmpxchg.h>
 14#include <asm/barrier.h>
 15#include <asm/smp.h>
 16
 17#define ATOMIC_INIT(i)	{ (i) }
 18
 19#ifndef CONFIG_ARC_PLAT_EZNPS
 20
 21#define atomic_read(v)  READ_ONCE((v)->counter)
 22
 23#ifdef CONFIG_ARC_HAS_LLSC
 24
 25#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 26
 27#define ATOMIC_OP(op, c_op, asm_op)					\
 28static inline void atomic_##op(int i, atomic_t *v)			\
 29{									\
 30	unsigned int val;						\
 31									\
 32	__asm__ __volatile__(						\
 33	"1:	llock   %[val], [%[ctr]]		\n"		\
 34	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 35	"	scond   %[val], [%[ctr]]		\n"		\
 36	"	bnz     1b				\n"		\
 37	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
 38	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
 39	  [i]	"ir"	(i)						\
 40	: "cc");							\
 41}									\
 42
 43#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 44static inline int atomic_##op##_return(int i, atomic_t *v)		\
 45{									\
 46	unsigned int val;						\
 47									\
 48	/*								\
 49	 * Explicit full memory barrier needed before/after as		\
 50	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
 51	 */								\
 52	smp_mb();							\
 53									\
 54	__asm__ __volatile__(						\
 55	"1:	llock   %[val], [%[ctr]]		\n"		\
 56	"	" #asm_op " %[val], %[val], %[i]	\n"		\
 57	"	scond   %[val], [%[ctr]]		\n"		\
 58	"	bnz     1b				\n"		\
 59	: [val]	"=&r"	(val)						\
 60	: [ctr]	"r"	(&v->counter),					\
 61	  [i]	"ir"	(i)						\
 62	: "cc");							\
 63									\
 64	smp_mb();							\
 65									\
 66	return val;							\
 67}
 68
 69#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 70static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 71{									\
 72	unsigned int val, orig;						\
 73									\
 74	/*								\
 75	 * Explicit full memory barrier needed before/after as		\
 76	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
 77	 */								\
 78	smp_mb();							\
 79									\
 80	__asm__ __volatile__(						\
 81	"1:	llock   %[orig], [%[ctr]]		\n"		\
 82	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
 83	"	scond   %[val], [%[ctr]]		\n"		\
 84	"	bnz     1b				\n"		\
 85	: [val]	"=&r"	(val),						\
 86	  [orig] "=&r" (orig)						\
 87	: [ctr]	"r"	(&v->counter),					\
 88	  [i]	"ir"	(i)						\
 89	: "cc");							\
 90									\
 91	smp_mb();							\
 92									\
 93	return orig;							\
 94}
 95
 96#else	/* !CONFIG_ARC_HAS_LLSC */
 97
 98#ifndef CONFIG_SMP
 99
100 /* violating atomic_xxx API locking protocol in UP for optimization sake */
101#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
102
103#else
104
105static inline void atomic_set(atomic_t *v, int i)
106{
107	/*
108	 * Independent of hardware support, all of the atomic_xxx() APIs need
109	 * to follow the same locking rules to make sure that a "hardware"
110	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
111	 * sequence
112	 *
113	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
114	 * requires the locking.
115	 */
116	unsigned long flags;
117
118	atomic_ops_lock(flags);
119	WRITE_ONCE(v->counter, i);
120	atomic_ops_unlock(flags);
121}
122
123#define atomic_set_release(v, i)	atomic_set((v), (i))
124
125#endif
126
127/*
128 * Non hardware assisted Atomic-R-M-W
129 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
130 */
131
132#define ATOMIC_OP(op, c_op, asm_op)					\
133static inline void atomic_##op(int i, atomic_t *v)			\
134{									\
135	unsigned long flags;						\
136									\
137	atomic_ops_lock(flags);						\
138	v->counter c_op i;						\
139	atomic_ops_unlock(flags);					\
140}
141
142#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
143static inline int atomic_##op##_return(int i, atomic_t *v)		\
144{									\
145	unsigned long flags;						\
146	unsigned long temp;						\
147									\
148	/*								\
149	 * spin lock/unlock provides the needed smp_mb() before/after	\
150	 */								\
151	atomic_ops_lock(flags);						\
152	temp = v->counter;						\
153	temp c_op i;							\
154	v->counter = temp;						\
155	atomic_ops_unlock(flags);					\
156									\
157	return temp;							\
158}
159
160#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
161static inline int atomic_fetch_##op(int i, atomic_t *v)			\
162{									\
163	unsigned long flags;						\
164	unsigned long orig;						\
165									\
166	/*								\
167	 * spin lock/unlock provides the needed smp_mb() before/after	\
168	 */								\
169	atomic_ops_lock(flags);						\
170	orig = v->counter;						\
171	v->counter c_op i;						\
172	atomic_ops_unlock(flags);					\
173									\
174	return orig;							\
175}
176
177#endif /* !CONFIG_ARC_HAS_LLSC */
178
179#define ATOMIC_OPS(op, c_op, asm_op)					\
180	ATOMIC_OP(op, c_op, asm_op)					\
181	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
182	ATOMIC_FETCH_OP(op, c_op, asm_op)
183
184ATOMIC_OPS(add, +=, add)
185ATOMIC_OPS(sub, -=, sub)
186
187#define atomic_andnot		atomic_andnot
188#define atomic_fetch_andnot	atomic_fetch_andnot
189
190#undef ATOMIC_OPS
191#define ATOMIC_OPS(op, c_op, asm_op)					\
192	ATOMIC_OP(op, c_op, asm_op)					\
193	ATOMIC_FETCH_OP(op, c_op, asm_op)
194
195ATOMIC_OPS(and, &=, and)
196ATOMIC_OPS(andnot, &= ~, bic)
197ATOMIC_OPS(or, |=, or)
198ATOMIC_OPS(xor, ^=, xor)
199
200#else /* CONFIG_ARC_PLAT_EZNPS */
201
202static inline int atomic_read(const atomic_t *v)
203{
204	int temp;
205
206	__asm__ __volatile__(
207	"	ld.di %0, [%1]"
208	: "=r"(temp)
209	: "r"(&v->counter)
210	: "memory");
211	return temp;
212}
213
214static inline void atomic_set(atomic_t *v, int i)
215{
216	__asm__ __volatile__(
217	"	st.di %0,[%1]"
218	:
219	: "r"(i), "r"(&v->counter)
220	: "memory");
221}
222
223#define ATOMIC_OP(op, c_op, asm_op)					\
224static inline void atomic_##op(int i, atomic_t *v)			\
225{									\
226	__asm__ __volatile__(						\
227	"	mov r2, %0\n"						\
228	"	mov r3, %1\n"						\
229	"       .word %2\n"						\
230	:								\
231	: "r"(i), "r"(&v->counter), "i"(asm_op)				\
232	: "r2", "r3", "memory");					\
233}									\
234
235#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
236static inline int atomic_##op##_return(int i, atomic_t *v)		\
237{									\
238	unsigned int temp = i;						\
239									\
240	/* Explicit full memory barrier needed before/after */		\
241	smp_mb();							\
242									\
243	__asm__ __volatile__(						\
244	"	mov r2, %0\n"						\
245	"	mov r3, %1\n"						\
246	"       .word %2\n"						\
247	"	mov %0, r2"						\
248	: "+r"(temp)							\
249	: "r"(&v->counter), "i"(asm_op)					\
250	: "r2", "r3", "memory");					\
251									\
252	smp_mb();							\
253									\
254	temp c_op i;							\
255									\
256	return temp;							\
257}
258
259#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
260static inline int atomic_fetch_##op(int i, atomic_t *v)			\
261{									\
262	unsigned int temp = i;						\
263									\
264	/* Explicit full memory barrier needed before/after */		\
265	smp_mb();							\
266									\
267	__asm__ __volatile__(						\
268	"	mov r2, %0\n"						\
269	"	mov r3, %1\n"						\
270	"       .word %2\n"						\
271	"	mov %0, r2"						\
272	: "+r"(temp)							\
273	: "r"(&v->counter), "i"(asm_op)					\
274	: "r2", "r3", "memory");					\
275									\
276	smp_mb();							\
277									\
278	return temp;							\
279}
280
281#define ATOMIC_OPS(op, c_op, asm_op)					\
282	ATOMIC_OP(op, c_op, asm_op)					\
283	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
284	ATOMIC_FETCH_OP(op, c_op, asm_op)
285
286ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
287#define atomic_sub(i, v) atomic_add(-(i), (v))
288#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
289#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
290
291#undef ATOMIC_OPS
292#define ATOMIC_OPS(op, c_op, asm_op)					\
293	ATOMIC_OP(op, c_op, asm_op)					\
294	ATOMIC_FETCH_OP(op, c_op, asm_op)
295
296ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
 
 
297ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
298ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
299
300#endif /* CONFIG_ARC_PLAT_EZNPS */
301
302#undef ATOMIC_OPS
303#undef ATOMIC_FETCH_OP
304#undef ATOMIC_OP_RETURN
305#undef ATOMIC_OP
306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307#ifdef CONFIG_GENERIC_ATOMIC64
308
309#include <asm-generic/atomic64.h>
310
311#else	/* Kconfig ensures this is only enabled with needed h/w assist */
312
313/*
314 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
315 *  - The address HAS to be 64-bit aligned
316 *  - There are 2 semantics involved here:
317 *    = exclusive implies no interim update between load/store to same addr
318 *    = both words are observed/updated together: this is guaranteed even
319 *      for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
320 *      is NOT required to use LLOCKD+SCONDD, STD suffices
321 */
322
323typedef struct {
324	s64 __aligned(8) counter;
325} atomic64_t;
326
327#define ATOMIC64_INIT(a) { (a) }
328
329static inline s64 atomic64_read(const atomic64_t *v)
330{
331	s64 val;
332
333	__asm__ __volatile__(
334	"	ldd   %0, [%1]	\n"
335	: "=r"(val)
336	: "r"(&v->counter));
337
338	return val;
339}
340
341static inline void atomic64_set(atomic64_t *v, s64 a)
342{
343	/*
344	 * This could have been a simple assignment in "C" but would need
345	 * explicit volatile. Otherwise gcc optimizers could elide the store
346	 * which borked atomic64 self-test
347	 * In the inline asm version, memory clobber needed for exact same
348	 * reason, to tell gcc about the store.
349	 *
350	 * This however is not needed for sibling atomic64_add() etc since both
351	 * load/store are explicitly done in inline asm. As long as API is used
352	 * for each access, gcc has no way to optimize away any load/store
353	 */
354	__asm__ __volatile__(
355	"	std   %0, [%1]	\n"
356	:
357	: "r"(a), "r"(&v->counter)
358	: "memory");
359}
360
361#define ATOMIC64_OP(op, op1, op2)					\
362static inline void atomic64_##op(s64 a, atomic64_t *v)			\
363{									\
364	s64 val;							\
365									\
366	__asm__ __volatile__(						\
367	"1:				\n"				\
368	"	llockd  %0, [%1]	\n"				\
369	"	" #op1 " %L0, %L0, %L2	\n"				\
370	"	" #op2 " %H0, %H0, %H2	\n"				\
371	"	scondd   %0, [%1]	\n"				\
372	"	bnz     1b		\n"				\
373	: "=&r"(val)							\
374	: "r"(&v->counter), "ir"(a)					\
375	: "cc");							\
376}									\
377
378#define ATOMIC64_OP_RETURN(op, op1, op2)		        	\
379static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v)		\
380{									\
381	s64 val;							\
382									\
383	smp_mb();							\
384									\
385	__asm__ __volatile__(						\
386	"1:				\n"				\
387	"	llockd   %0, [%1]	\n"				\
388	"	" #op1 " %L0, %L0, %L2	\n"				\
389	"	" #op2 " %H0, %H0, %H2	\n"				\
390	"	scondd   %0, [%1]	\n"				\
391	"	bnz     1b		\n"				\
392	: [val] "=&r"(val)						\
393	: "r"(&v->counter), "ir"(a)					\
394	: "cc");	/* memory clobber comes from smp_mb() */	\
395									\
396	smp_mb();							\
397									\
398	return val;							\
399}
400
401#define ATOMIC64_FETCH_OP(op, op1, op2)		        		\
402static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v)		\
403{									\
404	s64 val, orig;							\
405									\
406	smp_mb();							\
407									\
408	__asm__ __volatile__(						\
409	"1:				\n"				\
410	"	llockd   %0, [%2]	\n"				\
411	"	" #op1 " %L1, %L0, %L3	\n"				\
412	"	" #op2 " %H1, %H0, %H3	\n"				\
413	"	scondd   %1, [%2]	\n"				\
414	"	bnz     1b		\n"				\
415	: "=&r"(orig), "=&r"(val)					\
416	: "r"(&v->counter), "ir"(a)					\
417	: "cc");	/* memory clobber comes from smp_mb() */	\
418									\
419	smp_mb();							\
420									\
421	return orig;							\
422}
423
424#define ATOMIC64_OPS(op, op1, op2)					\
425	ATOMIC64_OP(op, op1, op2)					\
426	ATOMIC64_OP_RETURN(op, op1, op2)				\
427	ATOMIC64_FETCH_OP(op, op1, op2)
428
429#define atomic64_andnot		atomic64_andnot
430#define atomic64_fetch_andnot	atomic64_fetch_andnot
431
432ATOMIC64_OPS(add, add.f, adc)
433ATOMIC64_OPS(sub, sub.f, sbc)
434ATOMIC64_OPS(and, and, and)
435ATOMIC64_OPS(andnot, bic, bic)
436ATOMIC64_OPS(or, or, or)
437ATOMIC64_OPS(xor, xor, xor)
438
439#undef ATOMIC64_OPS
440#undef ATOMIC64_FETCH_OP
441#undef ATOMIC64_OP_RETURN
442#undef ATOMIC64_OP
443
444static inline s64
445atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
446{
447	s64 prev;
448
449	smp_mb();
450
451	__asm__ __volatile__(
452	"1:	llockd  %0, [%1]	\n"
453	"	brne    %L0, %L2, 2f	\n"
454	"	brne    %H0, %H2, 2f	\n"
455	"	scondd  %3, [%1]	\n"
456	"	bnz     1b		\n"
457	"2:				\n"
458	: "=&r"(prev)
459	: "r"(ptr), "ir"(expected), "r"(new)
460	: "cc");	/* memory clobber comes from smp_mb() */
461
462	smp_mb();
463
464	return prev;
465}
466
467static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
468{
469	s64 prev;
470
471	smp_mb();
472
473	__asm__ __volatile__(
474	"1:	llockd  %0, [%1]	\n"
475	"	scondd  %2, [%1]	\n"
476	"	bnz     1b		\n"
477	"2:				\n"
478	: "=&r"(prev)
479	: "r"(ptr), "r"(new)
480	: "cc");	/* memory clobber comes from smp_mb() */
481
482	smp_mb();
483
484	return prev;
485}
486
487/**
488 * atomic64_dec_if_positive - decrement by 1 if old value positive
489 * @v: pointer of type atomic64_t
490 *
491 * The function returns the old value of *v minus 1, even if
492 * the atomic variable, v, was not decremented.
493 */
494
495static inline s64 atomic64_dec_if_positive(atomic64_t *v)
496{
497	s64 val;
498
499	smp_mb();
500
501	__asm__ __volatile__(
502	"1:	llockd  %0, [%1]	\n"
503	"	sub.f   %L0, %L0, 1	# w0 - 1, set C on borrow\n"
504	"	sub.c   %H0, %H0, 1	# if C set, w1 - 1\n"
505	"	brlt    %H0, 0, 2f	\n"
506	"	scondd  %0, [%1]	\n"
507	"	bnz     1b		\n"
508	"2:				\n"
509	: "=&r"(val)
510	: "r"(&v->counter)
511	: "cc");	/* memory clobber comes from smp_mb() */
512
513	smp_mb();
514
515	return val;
516}
517#define atomic64_dec_if_positive atomic64_dec_if_positive
518
519/**
520 * atomic64_fetch_add_unless - add unless the number is a given value
521 * @v: pointer of type atomic64_t
522 * @a: the amount to add to v...
523 * @u: ...unless v is equal to u.
524 *
525 * Atomically adds @a to @v, if it was not @u.
526 * Returns the old value of @v
527 */
528static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
529{
530	s64 old, temp;
 
531
532	smp_mb();
533
534	__asm__ __volatile__(
535	"1:	llockd  %0, [%2]	\n"
 
536	"	brne	%L0, %L4, 2f	# continue to add since v != u \n"
537	"	breq.d	%H0, %H4, 3f	# return since v == u \n"
 
538	"2:				\n"
539	"	add.f   %L1, %L0, %L3	\n"
540	"	adc     %H1, %H0, %H3	\n"
541	"	scondd  %1, [%2]	\n"
542	"	bnz     1b		\n"
543	"3:				\n"
544	: "=&r"(old), "=&r" (temp)
545	: "r"(&v->counter), "r"(a), "r"(u)
546	: "cc");	/* memory clobber comes from smp_mb() */
547
548	smp_mb();
549
550	return old;
551}
552#define atomic64_fetch_add_unless atomic64_fetch_add_unless
 
 
 
 
 
 
 
 
 
553
554#endif	/* !CONFIG_GENERIC_ATOMIC64 */
555
556#endif	/* !__ASSEMBLY__ */
557
558#endif