Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_ATOMIC_H
3#define _ASM_X86_ATOMIC_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7#include <asm/alternative.h>
8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h>
10#include <asm/barrier.h>
11
12/*
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
15 */
16
17#define ATOMIC_INIT(i) { (i) }
18
19/**
20 * arch_atomic_read - read atomic variable
21 * @v: pointer of type atomic_t
22 *
23 * Atomically reads the value of @v.
24 */
25static __always_inline int arch_atomic_read(const atomic_t *v)
26{
27 /*
28 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
29 * it's non-inlined function that increases binary size and stack usage.
30 */
31 return READ_ONCE((v)->counter);
32}
33
34/**
35 * arch_atomic_set - set atomic variable
36 * @v: pointer of type atomic_t
37 * @i: required value
38 *
39 * Atomically sets the value of @v to @i.
40 */
41static __always_inline void arch_atomic_set(atomic_t *v, int i)
42{
43 WRITE_ONCE(v->counter, i);
44}
45
46/**
47 * arch_atomic_add - add integer to atomic variable
48 * @i: integer value to add
49 * @v: pointer of type atomic_t
50 *
51 * Atomically adds @i to @v.
52 */
53static __always_inline void arch_atomic_add(int i, atomic_t *v)
54{
55 asm volatile(LOCK_PREFIX "addl %1,%0"
56 : "+m" (v->counter)
57 : "ir" (i));
58}
59
60/**
61 * arch_atomic_sub - subtract integer from atomic variable
62 * @i: integer value to subtract
63 * @v: pointer of type atomic_t
64 *
65 * Atomically subtracts @i from @v.
66 */
67static __always_inline void arch_atomic_sub(int i, atomic_t *v)
68{
69 asm volatile(LOCK_PREFIX "subl %1,%0"
70 : "+m" (v->counter)
71 : "ir" (i));
72}
73
74/**
75 * arch_atomic_sub_and_test - subtract value from variable and test result
76 * @i: integer value to subtract
77 * @v: pointer of type atomic_t
78 *
79 * Atomically subtracts @i from @v and returns
80 * true if the result is zero, or false for all
81 * other cases.
82 */
83static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
84{
85 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
86}
87
88/**
89 * arch_atomic_inc - increment atomic variable
90 * @v: pointer of type atomic_t
91 *
92 * Atomically increments @v by 1.
93 */
94static __always_inline void arch_atomic_inc(atomic_t *v)
95{
96 asm volatile(LOCK_PREFIX "incl %0"
97 : "+m" (v->counter));
98}
99
100/**
101 * arch_atomic_dec - decrement atomic variable
102 * @v: pointer of type atomic_t
103 *
104 * Atomically decrements @v by 1.
105 */
106static __always_inline void arch_atomic_dec(atomic_t *v)
107{
108 asm volatile(LOCK_PREFIX "decl %0"
109 : "+m" (v->counter));
110}
111
112/**
113 * arch_atomic_dec_and_test - decrement and test
114 * @v: pointer of type atomic_t
115 *
116 * Atomically decrements @v by 1 and
117 * returns true if the result is 0, or false for all other
118 * cases.
119 */
120static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
121{
122 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
123}
124
125/**
126 * arch_atomic_inc_and_test - increment and test
127 * @v: pointer of type atomic_t
128 *
129 * Atomically increments @v by 1
130 * and returns true if the result is zero, or false for all
131 * other cases.
132 */
133static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
134{
135 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
136}
137
138/**
139 * arch_atomic_add_negative - add and test if negative
140 * @i: integer value to add
141 * @v: pointer of type atomic_t
142 *
143 * Atomically adds @i to @v and returns true
144 * if the result is negative, or false when
145 * result is greater than or equal to zero.
146 */
147static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
148{
149 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
150}
151
152/**
153 * arch_atomic_add_return - add integer and return
154 * @i: integer value to add
155 * @v: pointer of type atomic_t
156 *
157 * Atomically adds @i to @v and returns @i + @v
158 */
159static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
160{
161 return i + xadd(&v->counter, i);
162}
163
164/**
165 * arch_atomic_sub_return - subtract integer and return
166 * @v: pointer of type atomic_t
167 * @i: integer value to subtract
168 *
169 * Atomically subtracts @i from @v and returns @v - @i
170 */
171static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
172{
173 return arch_atomic_add_return(-i, v);
174}
175
176#define arch_atomic_inc_return(v) (arch_atomic_add_return(1, v))
177#define arch_atomic_dec_return(v) (arch_atomic_sub_return(1, v))
178
179static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
180{
181 return xadd(&v->counter, i);
182}
183
184static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
185{
186 return xadd(&v->counter, -i);
187}
188
189static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
190{
191 return arch_cmpxchg(&v->counter, old, new);
192}
193
194#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
195static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
196{
197 return try_cmpxchg(&v->counter, old, new);
198}
199
200static inline int arch_atomic_xchg(atomic_t *v, int new)
201{
202 return xchg(&v->counter, new);
203}
204
205static inline void arch_atomic_and(int i, atomic_t *v)
206{
207 asm volatile(LOCK_PREFIX "andl %1,%0"
208 : "+m" (v->counter)
209 : "ir" (i)
210 : "memory");
211}
212
213static inline int arch_atomic_fetch_and(int i, atomic_t *v)
214{
215 int val = arch_atomic_read(v);
216
217 do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
218
219 return val;
220}
221
222static inline void arch_atomic_or(int i, atomic_t *v)
223{
224 asm volatile(LOCK_PREFIX "orl %1,%0"
225 : "+m" (v->counter)
226 : "ir" (i)
227 : "memory");
228}
229
230static inline int arch_atomic_fetch_or(int i, atomic_t *v)
231{
232 int val = arch_atomic_read(v);
233
234 do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
235
236 return val;
237}
238
239static inline void arch_atomic_xor(int i, atomic_t *v)
240{
241 asm volatile(LOCK_PREFIX "xorl %1,%0"
242 : "+m" (v->counter)
243 : "ir" (i)
244 : "memory");
245}
246
247static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
248{
249 int val = arch_atomic_read(v);
250
251 do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
252
253 return val;
254}
255
256/**
257 * __arch_atomic_add_unless - add unless the number is already a given value
258 * @v: pointer of type atomic_t
259 * @a: the amount to add to v...
260 * @u: ...unless v is equal to u.
261 *
262 * Atomically adds @a to @v, so long as @v was not already @u.
263 * Returns the old value of @v.
264 */
265static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
266{
267 int c = arch_atomic_read(v);
268
269 do {
270 if (unlikely(c == u))
271 break;
272 } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
273
274 return c;
275}
276
277#ifdef CONFIG_X86_32
278# include <asm/atomic64_32.h>
279#else
280# include <asm/atomic64_64.h>
281#endif
282
283#include <asm-generic/atomic-instrumented.h>
284
285#endif /* _ASM_X86_ATOMIC_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_ATOMIC_H
3#define _ASM_X86_ATOMIC_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7#include <asm/alternative.h>
8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h>
10#include <asm/barrier.h>
11
12/*
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
15 */
16
17/**
18 * arch_atomic_read - read atomic variable
19 * @v: pointer of type atomic_t
20 *
21 * Atomically reads the value of @v.
22 */
23static __always_inline int arch_atomic_read(const atomic_t *v)
24{
25 /*
26 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
27 * it's non-inlined function that increases binary size and stack usage.
28 */
29 return __READ_ONCE((v)->counter);
30}
31
32/**
33 * arch_atomic_set - set atomic variable
34 * @v: pointer of type atomic_t
35 * @i: required value
36 *
37 * Atomically sets the value of @v to @i.
38 */
39static __always_inline void arch_atomic_set(atomic_t *v, int i)
40{
41 __WRITE_ONCE(v->counter, i);
42}
43
44/**
45 * arch_atomic_add - add integer to atomic variable
46 * @i: integer value to add
47 * @v: pointer of type atomic_t
48 *
49 * Atomically adds @i to @v.
50 */
51static __always_inline void arch_atomic_add(int i, atomic_t *v)
52{
53 asm volatile(LOCK_PREFIX "addl %1,%0"
54 : "+m" (v->counter)
55 : "ir" (i) : "memory");
56}
57
58/**
59 * arch_atomic_sub - subtract integer from atomic variable
60 * @i: integer value to subtract
61 * @v: pointer of type atomic_t
62 *
63 * Atomically subtracts @i from @v.
64 */
65static __always_inline void arch_atomic_sub(int i, atomic_t *v)
66{
67 asm volatile(LOCK_PREFIX "subl %1,%0"
68 : "+m" (v->counter)
69 : "ir" (i) : "memory");
70}
71
72/**
73 * arch_atomic_sub_and_test - subtract value from variable and test result
74 * @i: integer value to subtract
75 * @v: pointer of type atomic_t
76 *
77 * Atomically subtracts @i from @v and returns
78 * true if the result is zero, or false for all
79 * other cases.
80 */
81static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
82{
83 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
84}
85#define arch_atomic_sub_and_test arch_atomic_sub_and_test
86
87/**
88 * arch_atomic_inc - increment atomic variable
89 * @v: pointer of type atomic_t
90 *
91 * Atomically increments @v by 1.
92 */
93static __always_inline void arch_atomic_inc(atomic_t *v)
94{
95 asm volatile(LOCK_PREFIX "incl %0"
96 : "+m" (v->counter) :: "memory");
97}
98#define arch_atomic_inc arch_atomic_inc
99
100/**
101 * arch_atomic_dec - decrement atomic variable
102 * @v: pointer of type atomic_t
103 *
104 * Atomically decrements @v by 1.
105 */
106static __always_inline void arch_atomic_dec(atomic_t *v)
107{
108 asm volatile(LOCK_PREFIX "decl %0"
109 : "+m" (v->counter) :: "memory");
110}
111#define arch_atomic_dec arch_atomic_dec
112
113/**
114 * arch_atomic_dec_and_test - decrement and test
115 * @v: pointer of type atomic_t
116 *
117 * Atomically decrements @v by 1 and
118 * returns true if the result is 0, or false for all other
119 * cases.
120 */
121static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
122{
123 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
124}
125#define arch_atomic_dec_and_test arch_atomic_dec_and_test
126
127/**
128 * arch_atomic_inc_and_test - increment and test
129 * @v: pointer of type atomic_t
130 *
131 * Atomically increments @v by 1
132 * and returns true if the result is zero, or false for all
133 * other cases.
134 */
135static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
136{
137 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
138}
139#define arch_atomic_inc_and_test arch_atomic_inc_and_test
140
141/**
142 * arch_atomic_add_negative - add and test if negative
143 * @i: integer value to add
144 * @v: pointer of type atomic_t
145 *
146 * Atomically adds @i to @v and returns true
147 * if the result is negative, or false when
148 * result is greater than or equal to zero.
149 */
150static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
151{
152 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
153}
154#define arch_atomic_add_negative arch_atomic_add_negative
155
156/**
157 * arch_atomic_add_return - add integer and return
158 * @i: integer value to add
159 * @v: pointer of type atomic_t
160 *
161 * Atomically adds @i to @v and returns @i + @v
162 */
163static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
164{
165 return i + xadd(&v->counter, i);
166}
167#define arch_atomic_add_return arch_atomic_add_return
168
169/**
170 * arch_atomic_sub_return - subtract integer and return
171 * @v: pointer of type atomic_t
172 * @i: integer value to subtract
173 *
174 * Atomically subtracts @i from @v and returns @v - @i
175 */
176static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
177{
178 return arch_atomic_add_return(-i, v);
179}
180#define arch_atomic_sub_return arch_atomic_sub_return
181
182static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
183{
184 return xadd(&v->counter, i);
185}
186#define arch_atomic_fetch_add arch_atomic_fetch_add
187
188static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
189{
190 return xadd(&v->counter, -i);
191}
192#define arch_atomic_fetch_sub arch_atomic_fetch_sub
193
194static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
195{
196 return arch_cmpxchg(&v->counter, old, new);
197}
198#define arch_atomic_cmpxchg arch_atomic_cmpxchg
199
200static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
201{
202 return arch_try_cmpxchg(&v->counter, old, new);
203}
204#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
205
206static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
207{
208 return arch_xchg(&v->counter, new);
209}
210#define arch_atomic_xchg arch_atomic_xchg
211
212static __always_inline void arch_atomic_and(int i, atomic_t *v)
213{
214 asm volatile(LOCK_PREFIX "andl %1,%0"
215 : "+m" (v->counter)
216 : "ir" (i)
217 : "memory");
218}
219
220static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
221{
222 int val = arch_atomic_read(v);
223
224 do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
225
226 return val;
227}
228#define arch_atomic_fetch_and arch_atomic_fetch_and
229
230static __always_inline void arch_atomic_or(int i, atomic_t *v)
231{
232 asm volatile(LOCK_PREFIX "orl %1,%0"
233 : "+m" (v->counter)
234 : "ir" (i)
235 : "memory");
236}
237
238static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
239{
240 int val = arch_atomic_read(v);
241
242 do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
243
244 return val;
245}
246#define arch_atomic_fetch_or arch_atomic_fetch_or
247
248static __always_inline void arch_atomic_xor(int i, atomic_t *v)
249{
250 asm volatile(LOCK_PREFIX "xorl %1,%0"
251 : "+m" (v->counter)
252 : "ir" (i)
253 : "memory");
254}
255
256static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
257{
258 int val = arch_atomic_read(v);
259
260 do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
261
262 return val;
263}
264#define arch_atomic_fetch_xor arch_atomic_fetch_xor
265
266#ifdef CONFIG_X86_32
267# include <asm/atomic64_32.h>
268#else
269# include <asm/atomic64_64.h>
270#endif
271
272#endif /* _ASM_X86_ATOMIC_H */