Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ARCH_M68K_ATOMIC__
  3#define __ARCH_M68K_ATOMIC__
  4
  5#include <linux/types.h>
  6#include <linux/irqflags.h>
  7#include <asm/cmpxchg.h>
  8#include <asm/barrier.h>
  9
 10/*
 11 * Atomic operations that C can't guarantee us.  Useful for
 12 * resource counting etc..
 13 */
 14
 15/*
 16 * We do not have SMP m68k systems, so we don't have to deal with that.
 17 */
 18
 19#define arch_atomic_read(v)	READ_ONCE((v)->counter)
 20#define arch_atomic_set(v, i)	WRITE_ONCE(((v)->counter), (i))
 
 
 21
 22/*
 23 * The ColdFire parts cannot do some immediate to memory operations,
 24 * so for them we do not specify the "i" asm constraint.
 25 */
 26#ifdef CONFIG_COLDFIRE
 27#define	ASM_DI	"d"
 28#else
 29#define	ASM_DI	"di"
 30#endif
 31
 32#define ATOMIC_OP(op, c_op, asm_op)					\
 33static inline void arch_atomic_##op(int i, atomic_t *v)			\
 34{									\
 35	__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
 36}									\
 37
 38#ifdef CONFIG_RMW_INSNS
 39
 40#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 41static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
 42{									\
 43	int t, tmp;							\
 44									\
 45	__asm__ __volatile__(						\
 46			"1:	movel %2,%1\n"				\
 47			"	" #asm_op "l %3,%1\n"			\
 48			"	casl %2,%1,%0\n"			\
 49			"	jne 1b"					\
 50			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
 51			: "di" (i), "2" (arch_atomic_read(v)));		\
 52	return t;							\
 53}
 54
 55#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 56static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
 57{									\
 58	int t, tmp;							\
 59									\
 60	__asm__ __volatile__(						\
 61			"1:	movel %2,%1\n"				\
 62			"	" #asm_op "l %3,%1\n"			\
 63			"	casl %2,%1,%0\n"			\
 64			"	jne 1b"					\
 65			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
 66			: "di" (i), "2" (arch_atomic_read(v)));		\
 67	return tmp;							\
 68}
 69
 70#else
 71
 72#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 73static inline int arch_atomic_##op##_return(int i, atomic_t * v)	\
 74{									\
 75	unsigned long flags;						\
 76	int t;								\
 77									\
 78	local_irq_save(flags);						\
 79	t = (v->counter c_op i);					\
 80	local_irq_restore(flags);					\
 81									\
 82	return t;							\
 83}
 84
 85#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 86static inline int arch_atomic_fetch_##op(int i, atomic_t * v)		\
 87{									\
 88	unsigned long flags;						\
 89	int t;								\
 90									\
 91	local_irq_save(flags);						\
 92	t = v->counter;							\
 93	v->counter c_op i;						\
 94	local_irq_restore(flags);					\
 95									\
 96	return t;							\
 97}
 98
 99#endif /* CONFIG_RMW_INSNS */
100
101#define ATOMIC_OPS(op, c_op, asm_op)					\
102	ATOMIC_OP(op, c_op, asm_op)					\
103	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
104	ATOMIC_FETCH_OP(op, c_op, asm_op)
105
106ATOMIC_OPS(add, +=, add)
107ATOMIC_OPS(sub, -=, sub)
108
109#define arch_atomic_add_return			arch_atomic_add_return
110#define arch_atomic_sub_return			arch_atomic_sub_return
111#define arch_atomic_fetch_add			arch_atomic_fetch_add
112#define arch_atomic_fetch_sub			arch_atomic_fetch_sub
113
114#undef ATOMIC_OPS
115#define ATOMIC_OPS(op, c_op, asm_op)					\
116	ATOMIC_OP(op, c_op, asm_op)					\
117	ATOMIC_FETCH_OP(op, c_op, asm_op)
118
119ATOMIC_OPS(and, &=, and)
120ATOMIC_OPS(or, |=, or)
121ATOMIC_OPS(xor, ^=, eor)
122
123#define arch_atomic_fetch_and			arch_atomic_fetch_and
124#define arch_atomic_fetch_or			arch_atomic_fetch_or
125#define arch_atomic_fetch_xor			arch_atomic_fetch_xor
126
127#undef ATOMIC_OPS
128#undef ATOMIC_FETCH_OP
129#undef ATOMIC_OP_RETURN
130#undef ATOMIC_OP
131
132static inline void arch_atomic_inc(atomic_t *v)
133{
134	__asm__ __volatile__("addql #1,%0" : "+m" (*v));
135}
136#define arch_atomic_inc arch_atomic_inc
137
138static inline void arch_atomic_dec(atomic_t *v)
139{
140	__asm__ __volatile__("subql #1,%0" : "+m" (*v));
141}
142#define arch_atomic_dec arch_atomic_dec
143
144static inline int arch_atomic_dec_and_test(atomic_t *v)
145{
146	char c;
147	__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
148	return c != 0;
149}
150#define arch_atomic_dec_and_test arch_atomic_dec_and_test
151
152static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
153{
154	char c;
155	__asm__ __volatile__(
156		"subql #1,%1; slt %0"
157		: "=d" (c), "=m" (*v)
158		: "m" (*v));
159	return c != 0;
160}
161
162static inline int arch_atomic_inc_and_test(atomic_t *v)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163{
164	char c;
165	__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
166	return c != 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167}
168#define arch_atomic_inc_and_test arch_atomic_inc_and_test
169
170#ifndef CONFIG_RMW_INSNS
 
 
 
 
 
 
 
 
 
 
 
 
171
172static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
173{
174	unsigned long flags;
175	int prev;
176
177	local_irq_save(flags);
178	prev = arch_atomic_read(v);
179	if (prev == old)
180		arch_atomic_set(v, new);
181	local_irq_restore(flags);
182	return prev;
183}
184#define arch_atomic_cmpxchg arch_atomic_cmpxchg
185
186static inline int arch_atomic_xchg(atomic_t *v, int new)
187{
188	unsigned long flags;
189	int prev;
190
191	local_irq_save(flags);
192	prev = arch_atomic_read(v);
193	arch_atomic_set(v, new);
194	local_irq_restore(flags);
195	return prev;
196}
197#define arch_atomic_xchg arch_atomic_xchg
198
199#endif /* !CONFIG_RMW_INSNS */
200
201static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
 
 
 
202{
203	char c;
204	__asm__ __volatile__("subl %2,%1; seq %0"
205			     : "=d" (c), "+m" (*v)
206			     : ASM_DI (i));
207	return c != 0;
208}
209#define arch_atomic_sub_and_test arch_atomic_sub_and_test
210
211static inline int arch_atomic_add_negative(int i, atomic_t *v)
212{
213	char c;
214	__asm__ __volatile__("addl %2,%1; smi %0"
215			     : "=d" (c), "+m" (*v)
216			     : ASM_DI (i));
217	return c != 0;
218}
219#define arch_atomic_add_negative arch_atomic_add_negative
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
221#endif /* __ARCH_M68K_ATOMIC __ */
v3.1
 
  1#ifndef __ARCH_M68K_ATOMIC__
  2#define __ARCH_M68K_ATOMIC__
  3
  4#include <linux/types.h>
  5#include <asm/system.h>
 
 
  6
  7/*
  8 * Atomic operations that C can't guarantee us.  Useful for
  9 * resource counting etc..
 10 */
 11
 12/*
 13 * We do not have SMP m68k systems, so we don't have to deal with that.
 14 */
 15
 16#define ATOMIC_INIT(i)	{ (i) }
 17
 18#define atomic_read(v)		(*(volatile int *)&(v)->counter)
 19#define atomic_set(v, i)	(((v)->counter) = i)
 20
 21/*
 22 * The ColdFire parts cannot do some immediate to memory operations,
 23 * so for them we do not specify the "i" asm constraint.
 24 */
 25#ifdef CONFIG_COLDFIRE
 26#define	ASM_DI	"d"
 27#else
 28#define	ASM_DI	"di"
 29#endif
 30
 31static inline void atomic_add(int i, atomic_t *v)
 32{
 33	__asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34}
 35
 36static inline void atomic_sub(int i, atomic_t *v)
 37{
 38	__asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
 39}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40
 41static inline void atomic_inc(atomic_t *v)
 42{
 43	__asm__ __volatile__("addql #1,%0" : "+m" (*v));
 44}
 
 45
 46static inline void atomic_dec(atomic_t *v)
 47{
 48	__asm__ __volatile__("subql #1,%0" : "+m" (*v));
 49}
 
 50
 51static inline int atomic_dec_and_test(atomic_t *v)
 52{
 53	char c;
 54	__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
 55	return c != 0;
 56}
 
 57
 58static inline int atomic_inc_and_test(atomic_t *v)
 59{
 60	char c;
 61	__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
 
 
 
 62	return c != 0;
 63}
 64
 65#ifdef CONFIG_RMW_INSNS
 66
 67static inline int atomic_add_return(int i, atomic_t *v)
 68{
 69	int t, tmp;
 70
 71	__asm__ __volatile__(
 72			"1:	movel %2,%1\n"
 73			"	addl %3,%1\n"
 74			"	casl %2,%1,%0\n"
 75			"	jne 1b"
 76			: "+m" (*v), "=&d" (t), "=&d" (tmp)
 77			: "g" (i), "2" (atomic_read(v)));
 78	return t;
 79}
 80
 81static inline int atomic_sub_return(int i, atomic_t *v)
 82{
 83	int t, tmp;
 84
 85	__asm__ __volatile__(
 86			"1:	movel %2,%1\n"
 87			"	subl %3,%1\n"
 88			"	casl %2,%1,%0\n"
 89			"	jne 1b"
 90			: "+m" (*v), "=&d" (t), "=&d" (tmp)
 91			: "g" (i), "2" (atomic_read(v)));
 92	return t;
 93}
 94
 95#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 96#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 97
 98#else /* !CONFIG_RMW_INSNS */
 99
100static inline int atomic_add_return(int i, atomic_t * v)
101{
102	unsigned long flags;
103	int t;
104
105	local_irq_save(flags);
106	t = atomic_read(v);
107	t += i;
108	atomic_set(v, t);
109	local_irq_restore(flags);
110
111	return t;
112}
 
113
114static inline int atomic_sub_return(int i, atomic_t * v)
115{
116	unsigned long flags;
117	int t;
118
119	local_irq_save(flags);
120	t = atomic_read(v);
121	t -= i;
122	atomic_set(v, t);
123	local_irq_restore(flags);
124
125	return t;
126}
127
128static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
129{
130	unsigned long flags;
131	int prev;
132
133	local_irq_save(flags);
134	prev = atomic_read(v);
135	if (prev == old)
136		atomic_set(v, new);
137	local_irq_restore(flags);
138	return prev;
139}
 
140
141static inline int atomic_xchg(atomic_t *v, int new)
142{
143	unsigned long flags;
144	int prev;
145
146	local_irq_save(flags);
147	prev = atomic_read(v);
148	atomic_set(v, new);
149	local_irq_restore(flags);
150	return prev;
151}
 
152
153#endif /* !CONFIG_RMW_INSNS */
154
155#define atomic_dec_return(v)	atomic_sub_return(1, (v))
156#define atomic_inc_return(v)	atomic_add_return(1, (v))
157
158static inline int atomic_sub_and_test(int i, atomic_t *v)
159{
160	char c;
161	__asm__ __volatile__("subl %2,%1; seq %0"
162			     : "=d" (c), "+m" (*v)
163			     : ASM_DI (i));
164	return c != 0;
165}
 
166
167static inline int atomic_add_negative(int i, atomic_t *v)
168{
169	char c;
170	__asm__ __volatile__("addl %2,%1; smi %0"
171			     : "=d" (c), "+m" (*v)
172			     : ASM_DI (i));
173	return c != 0;
174}
175
176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
177{
178	__asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
179}
180
181static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
182{
183	__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
184}
185
186static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
187{
188	int c, old;
189	c = atomic_read(v);
190	for (;;) {
191		if (unlikely(c == (u)))
192			break;
193		old = atomic_cmpxchg((v), c, c + (a));
194		if (likely(old == c))
195			break;
196		c = old;
197	}
198	return c;
199}
200
201
202/* Atomic operations are already serializing */
203#define smp_mb__before_atomic_dec()	barrier()
204#define smp_mb__after_atomic_dec()	barrier()
205#define smp_mb__before_atomic_inc()	barrier()
206#define smp_mb__after_atomic_inc()	barrier()
207
208#endif /* __ARCH_M68K_ATOMIC __ */