Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ARCH_M68K_ATOMIC__
3#define __ARCH_M68K_ATOMIC__
4
5#include <linux/types.h>
6#include <linux/irqflags.h>
7#include <asm/cmpxchg.h>
8#include <asm/barrier.h>
9
10/*
11 * Atomic operations that C can't guarantee us. Useful for
12 * resource counting etc..
13 */
14
15/*
16 * We do not have SMP m68k systems, so we don't have to deal with that.
17 */
18
19#define arch_atomic_read(v) READ_ONCE((v)->counter)
20#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
21
22/*
23 * The ColdFire parts cannot do some immediate to memory operations,
24 * so for them we do not specify the "i" asm constraint.
25 */
26#ifdef CONFIG_COLDFIRE
27#define ASM_DI "d"
28#else
29#define ASM_DI "di"
30#endif
31
32#define ATOMIC_OP(op, c_op, asm_op) \
33static inline void arch_atomic_##op(int i, atomic_t *v) \
34{ \
35 __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
36} \
37
38#ifdef CONFIG_RMW_INSNS
39
40#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
41static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
42{ \
43 int t, tmp; \
44 \
45 __asm__ __volatile__( \
46 "1: movel %2,%1\n" \
47 " " #asm_op "l %3,%1\n" \
48 " casl %2,%1,%0\n" \
49 " jne 1b" \
50 : "+m" (*v), "=&d" (t), "=&d" (tmp) \
51 : "di" (i), "2" (arch_atomic_read(v))); \
52 return t; \
53}
54
55#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
56static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
57{ \
58 int t, tmp; \
59 \
60 __asm__ __volatile__( \
61 "1: movel %2,%1\n" \
62 " " #asm_op "l %3,%1\n" \
63 " casl %2,%1,%0\n" \
64 " jne 1b" \
65 : "+m" (*v), "=&d" (t), "=&d" (tmp) \
66 : "di" (i), "2" (arch_atomic_read(v))); \
67 return tmp; \
68}
69
70#else
71
72#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
73static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
74{ \
75 unsigned long flags; \
76 int t; \
77 \
78 local_irq_save(flags); \
79 t = (v->counter c_op i); \
80 local_irq_restore(flags); \
81 \
82 return t; \
83}
84
85#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
86static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
87{ \
88 unsigned long flags; \
89 int t; \
90 \
91 local_irq_save(flags); \
92 t = v->counter; \
93 v->counter c_op i; \
94 local_irq_restore(flags); \
95 \
96 return t; \
97}
98
99#endif /* CONFIG_RMW_INSNS */
100
101#define ATOMIC_OPS(op, c_op, asm_op) \
102 ATOMIC_OP(op, c_op, asm_op) \
103 ATOMIC_OP_RETURN(op, c_op, asm_op) \
104 ATOMIC_FETCH_OP(op, c_op, asm_op)
105
106ATOMIC_OPS(add, +=, add)
107ATOMIC_OPS(sub, -=, sub)
108
109#define arch_atomic_add_return arch_atomic_add_return
110#define arch_atomic_sub_return arch_atomic_sub_return
111#define arch_atomic_fetch_add arch_atomic_fetch_add
112#define arch_atomic_fetch_sub arch_atomic_fetch_sub
113
114#undef ATOMIC_OPS
115#define ATOMIC_OPS(op, c_op, asm_op) \
116 ATOMIC_OP(op, c_op, asm_op) \
117 ATOMIC_FETCH_OP(op, c_op, asm_op)
118
119ATOMIC_OPS(and, &=, and)
120ATOMIC_OPS(or, |=, or)
121ATOMIC_OPS(xor, ^=, eor)
122
123#define arch_atomic_fetch_and arch_atomic_fetch_and
124#define arch_atomic_fetch_or arch_atomic_fetch_or
125#define arch_atomic_fetch_xor arch_atomic_fetch_xor
126
127#undef ATOMIC_OPS
128#undef ATOMIC_FETCH_OP
129#undef ATOMIC_OP_RETURN
130#undef ATOMIC_OP
131
132static inline void arch_atomic_inc(atomic_t *v)
133{
134 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
135}
136#define arch_atomic_inc arch_atomic_inc
137
138static inline void arch_atomic_dec(atomic_t *v)
139{
140 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
141}
142#define arch_atomic_dec arch_atomic_dec
143
144static inline int arch_atomic_dec_and_test(atomic_t *v)
145{
146 char c;
147 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
148 return c != 0;
149}
150#define arch_atomic_dec_and_test arch_atomic_dec_and_test
151
152static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
153{
154 char c;
155 __asm__ __volatile__(
156 "subql #1,%1; slt %0"
157 : "=d" (c), "=m" (*v)
158 : "m" (*v));
159 return c != 0;
160}
161
162static inline int arch_atomic_inc_and_test(atomic_t *v)
163{
164 char c;
165 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
166 return c != 0;
167}
168#define arch_atomic_inc_and_test arch_atomic_inc_and_test
169
170#ifndef CONFIG_RMW_INSNS
171
172static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
173{
174 unsigned long flags;
175 int prev;
176
177 local_irq_save(flags);
178 prev = arch_atomic_read(v);
179 if (prev == old)
180 arch_atomic_set(v, new);
181 local_irq_restore(flags);
182 return prev;
183}
184#define arch_atomic_cmpxchg arch_atomic_cmpxchg
185
186static inline int arch_atomic_xchg(atomic_t *v, int new)
187{
188 unsigned long flags;
189 int prev;
190
191 local_irq_save(flags);
192 prev = arch_atomic_read(v);
193 arch_atomic_set(v, new);
194 local_irq_restore(flags);
195 return prev;
196}
197#define arch_atomic_xchg arch_atomic_xchg
198
199#endif /* !CONFIG_RMW_INSNS */
200
201static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
202{
203 char c;
204 __asm__ __volatile__("subl %2,%1; seq %0"
205 : "=d" (c), "+m" (*v)
206 : ASM_DI (i));
207 return c != 0;
208}
209#define arch_atomic_sub_and_test arch_atomic_sub_and_test
210
211static inline int arch_atomic_add_negative(int i, atomic_t *v)
212{
213 char c;
214 __asm__ __volatile__("addl %2,%1; smi %0"
215 : "=d" (c), "+m" (*v)
216 : ASM_DI (i));
217 return c != 0;
218}
219#define arch_atomic_add_negative arch_atomic_add_negative
220
221#endif /* __ARCH_M68K_ATOMIC __ */
1#ifndef __ARCH_M68K_ATOMIC__
2#define __ARCH_M68K_ATOMIC__
3
4#include <linux/types.h>
5#include <linux/irqflags.h>
6#include <asm/cmpxchg.h>
7
8/*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
11 */
12
13/*
14 * We do not have SMP m68k systems, so we don't have to deal with that.
15 */
16
17#define ATOMIC_INIT(i) { (i) }
18
19#define atomic_read(v) (*(volatile int *)&(v)->counter)
20#define atomic_set(v, i) (((v)->counter) = i)
21
22/*
23 * The ColdFire parts cannot do some immediate to memory operations,
24 * so for them we do not specify the "i" asm constraint.
25 */
26#ifdef CONFIG_COLDFIRE
27#define ASM_DI "d"
28#else
29#define ASM_DI "di"
30#endif
31
32static inline void atomic_add(int i, atomic_t *v)
33{
34 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
35}
36
37static inline void atomic_sub(int i, atomic_t *v)
38{
39 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
40}
41
42static inline void atomic_inc(atomic_t *v)
43{
44 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
45}
46
47static inline void atomic_dec(atomic_t *v)
48{
49 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
50}
51
52static inline int atomic_dec_and_test(atomic_t *v)
53{
54 char c;
55 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
56 return c != 0;
57}
58
59static inline int atomic_dec_and_test_lt(atomic_t *v)
60{
61 char c;
62 __asm__ __volatile__(
63 "subql #1,%1; slt %0"
64 : "=d" (c), "=m" (*v)
65 : "m" (*v));
66 return c != 0;
67}
68
69static inline int atomic_inc_and_test(atomic_t *v)
70{
71 char c;
72 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
73 return c != 0;
74}
75
76#ifdef CONFIG_RMW_INSNS
77
78static inline int atomic_add_return(int i, atomic_t *v)
79{
80 int t, tmp;
81
82 __asm__ __volatile__(
83 "1: movel %2,%1\n"
84 " addl %3,%1\n"
85 " casl %2,%1,%0\n"
86 " jne 1b"
87 : "+m" (*v), "=&d" (t), "=&d" (tmp)
88 : "g" (i), "2" (atomic_read(v)));
89 return t;
90}
91
92static inline int atomic_sub_return(int i, atomic_t *v)
93{
94 int t, tmp;
95
96 __asm__ __volatile__(
97 "1: movel %2,%1\n"
98 " subl %3,%1\n"
99 " casl %2,%1,%0\n"
100 " jne 1b"
101 : "+m" (*v), "=&d" (t), "=&d" (tmp)
102 : "g" (i), "2" (atomic_read(v)));
103 return t;
104}
105
106#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
107#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
108
109#else /* !CONFIG_RMW_INSNS */
110
111static inline int atomic_add_return(int i, atomic_t * v)
112{
113 unsigned long flags;
114 int t;
115
116 local_irq_save(flags);
117 t = atomic_read(v);
118 t += i;
119 atomic_set(v, t);
120 local_irq_restore(flags);
121
122 return t;
123}
124
125static inline int atomic_sub_return(int i, atomic_t * v)
126{
127 unsigned long flags;
128 int t;
129
130 local_irq_save(flags);
131 t = atomic_read(v);
132 t -= i;
133 atomic_set(v, t);
134 local_irq_restore(flags);
135
136 return t;
137}
138
139static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
140{
141 unsigned long flags;
142 int prev;
143
144 local_irq_save(flags);
145 prev = atomic_read(v);
146 if (prev == old)
147 atomic_set(v, new);
148 local_irq_restore(flags);
149 return prev;
150}
151
152static inline int atomic_xchg(atomic_t *v, int new)
153{
154 unsigned long flags;
155 int prev;
156
157 local_irq_save(flags);
158 prev = atomic_read(v);
159 atomic_set(v, new);
160 local_irq_restore(flags);
161 return prev;
162}
163
164#endif /* !CONFIG_RMW_INSNS */
165
166#define atomic_dec_return(v) atomic_sub_return(1, (v))
167#define atomic_inc_return(v) atomic_add_return(1, (v))
168
169static inline int atomic_sub_and_test(int i, atomic_t *v)
170{
171 char c;
172 __asm__ __volatile__("subl %2,%1; seq %0"
173 : "=d" (c), "+m" (*v)
174 : ASM_DI (i));
175 return c != 0;
176}
177
178static inline int atomic_add_negative(int i, atomic_t *v)
179{
180 char c;
181 __asm__ __volatile__("addl %2,%1; smi %0"
182 : "=d" (c), "+m" (*v)
183 : ASM_DI (i));
184 return c != 0;
185}
186
187static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
188{
189 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
190}
191
192static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
193{
194 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
195}
196
197static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
198{
199 int c, old;
200 c = atomic_read(v);
201 for (;;) {
202 if (unlikely(c == (u)))
203 break;
204 old = atomic_cmpxchg((v), c, c + (a));
205 if (likely(old == c))
206 break;
207 c = old;
208 }
209 return c;
210}
211
212
213/* Atomic operations are already serializing */
214#define smp_mb__before_atomic_dec() barrier()
215#define smp_mb__after_atomic_dec() barrier()
216#define smp_mb__before_atomic_inc() barrier()
217#define smp_mb__after_atomic_inc() barrier()
218
219#endif /* __ARCH_M68K_ATOMIC __ */