Loading...
1#ifndef _ASM_X86_ATOMIC64_64_H
2#define _ASM_X86_ATOMIC64_64_H
3
4#include <linux/types.h>
5#include <asm/alternative.h>
6#include <asm/cmpxchg.h>
7
8/* The 64-bit atomic type */
9
10#define ATOMIC64_INIT(i) { (i) }
11
12/**
13 * atomic64_read - read atomic64 variable
14 * @v: pointer of type atomic64_t
15 *
16 * Atomically reads the value of @v.
17 * Doesn't imply a read memory barrier.
18 */
19static inline long atomic64_read(const atomic64_t *v)
20{
21 return READ_ONCE((v)->counter);
22}
23
24/**
25 * atomic64_set - set atomic64 variable
26 * @v: pointer to type atomic64_t
27 * @i: required value
28 *
29 * Atomically sets the value of @v to @i.
30 */
31static inline void atomic64_set(atomic64_t *v, long i)
32{
33 WRITE_ONCE(v->counter, i);
34}
35
36/**
37 * atomic64_add - add integer to atomic64 variable
38 * @i: integer value to add
39 * @v: pointer to type atomic64_t
40 *
41 * Atomically adds @i to @v.
42 */
43static __always_inline void atomic64_add(long i, atomic64_t *v)
44{
45 asm volatile(LOCK_PREFIX "addq %1,%0"
46 : "=m" (v->counter)
47 : "er" (i), "m" (v->counter));
48}
49
50/**
51 * atomic64_sub - subtract the atomic64 variable
52 * @i: integer value to subtract
53 * @v: pointer to type atomic64_t
54 *
55 * Atomically subtracts @i from @v.
56 */
57static inline void atomic64_sub(long i, atomic64_t *v)
58{
59 asm volatile(LOCK_PREFIX "subq %1,%0"
60 : "=m" (v->counter)
61 : "er" (i), "m" (v->counter));
62}
63
64/**
65 * atomic64_sub_and_test - subtract value from variable and test result
66 * @i: integer value to subtract
67 * @v: pointer to type atomic64_t
68 *
69 * Atomically subtracts @i from @v and returns
70 * true if the result is zero, or false for all
71 * other cases.
72 */
73static inline int atomic64_sub_and_test(long i, atomic64_t *v)
74{
75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
76}
77
78/**
79 * atomic64_inc - increment atomic64 variable
80 * @v: pointer to type atomic64_t
81 *
82 * Atomically increments @v by 1.
83 */
84static __always_inline void atomic64_inc(atomic64_t *v)
85{
86 asm volatile(LOCK_PREFIX "incq %0"
87 : "=m" (v->counter)
88 : "m" (v->counter));
89}
90
91/**
92 * atomic64_dec - decrement atomic64 variable
93 * @v: pointer to type atomic64_t
94 *
95 * Atomically decrements @v by 1.
96 */
97static __always_inline void atomic64_dec(atomic64_t *v)
98{
99 asm volatile(LOCK_PREFIX "decq %0"
100 : "=m" (v->counter)
101 : "m" (v->counter));
102}
103
104/**
105 * atomic64_dec_and_test - decrement and test
106 * @v: pointer to type atomic64_t
107 *
108 * Atomically decrements @v by 1 and
109 * returns true if the result is 0, or false for all other
110 * cases.
111 */
112static inline int atomic64_dec_and_test(atomic64_t *v)
113{
114 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
115}
116
117/**
118 * atomic64_inc_and_test - increment and test
119 * @v: pointer to type atomic64_t
120 *
121 * Atomically increments @v by 1
122 * and returns true if the result is zero, or false for all
123 * other cases.
124 */
125static inline int atomic64_inc_and_test(atomic64_t *v)
126{
127 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
128}
129
130/**
131 * atomic64_add_negative - add and test if negative
132 * @i: integer value to add
133 * @v: pointer to type atomic64_t
134 *
135 * Atomically adds @i to @v and returns true
136 * if the result is negative, or false when
137 * result is greater than or equal to zero.
138 */
139static inline int atomic64_add_negative(long i, atomic64_t *v)
140{
141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
142}
143
144/**
145 * atomic64_add_return - add and return
146 * @i: integer value to add
147 * @v: pointer to type atomic64_t
148 *
149 * Atomically adds @i to @v and returns @i + @v
150 */
151static __always_inline long atomic64_add_return(long i, atomic64_t *v)
152{
153 return i + xadd(&v->counter, i);
154}
155
156static inline long atomic64_sub_return(long i, atomic64_t *v)
157{
158 return atomic64_add_return(-i, v);
159}
160
161#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
162#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
163
164static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
165{
166 return cmpxchg(&v->counter, old, new);
167}
168
169static inline long atomic64_xchg(atomic64_t *v, long new)
170{
171 return xchg(&v->counter, new);
172}
173
174/**
175 * atomic64_add_unless - add unless the number is a given value
176 * @v: pointer of type atomic64_t
177 * @a: the amount to add to v...
178 * @u: ...unless v is equal to u.
179 *
180 * Atomically adds @a to @v, so long as it was not @u.
181 * Returns the old value of @v.
182 */
183static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
184{
185 long c, old;
186 c = atomic64_read(v);
187 for (;;) {
188 if (unlikely(c == (u)))
189 break;
190 old = atomic64_cmpxchg((v), c, c + (a));
191 if (likely(old == c))
192 break;
193 c = old;
194 }
195 return c != (u);
196}
197
198#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
199
200/*
201 * atomic64_dec_if_positive - decrement by 1 if old value positive
202 * @v: pointer of type atomic_t
203 *
204 * The function returns the old value of *v minus 1, even if
205 * the atomic variable, v, was not decremented.
206 */
207static inline long atomic64_dec_if_positive(atomic64_t *v)
208{
209 long c, old, dec;
210 c = atomic64_read(v);
211 for (;;) {
212 dec = c - 1;
213 if (unlikely(dec < 0))
214 break;
215 old = atomic64_cmpxchg((v), c, dec);
216 if (likely(old == c))
217 break;
218 c = old;
219 }
220 return dec;
221}
222
223#define ATOMIC64_OP(op) \
224static inline void atomic64_##op(long i, atomic64_t *v) \
225{ \
226 asm volatile(LOCK_PREFIX #op"q %1,%0" \
227 : "+m" (v->counter) \
228 : "er" (i) \
229 : "memory"); \
230}
231
232ATOMIC64_OP(and)
233ATOMIC64_OP(or)
234ATOMIC64_OP(xor)
235
236#undef ATOMIC64_OP
237
238#endif /* _ASM_X86_ATOMIC64_64_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_ATOMIC64_64_H
3#define _ASM_X86_ATOMIC64_64_H
4
5#include <linux/types.h>
6#include <asm/alternative.h>
7#include <asm/cmpxchg.h>
8
9/* The 64-bit atomic type */
10
11#define ATOMIC64_INIT(i) { (i) }
12
13static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
14{
15 return __READ_ONCE((v)->counter);
16}
17
18static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
19{
20 __WRITE_ONCE(v->counter, i);
21}
22
23static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
24{
25 asm volatile(LOCK_PREFIX "addq %1,%0"
26 : "=m" (v->counter)
27 : "er" (i), "m" (v->counter) : "memory");
28}
29
30static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
31{
32 asm volatile(LOCK_PREFIX "subq %1,%0"
33 : "=m" (v->counter)
34 : "er" (i), "m" (v->counter) : "memory");
35}
36
37static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
38{
39 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
40}
41#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
42
43static __always_inline void arch_atomic64_inc(atomic64_t *v)
44{
45 asm volatile(LOCK_PREFIX "incq %0"
46 : "=m" (v->counter)
47 : "m" (v->counter) : "memory");
48}
49#define arch_atomic64_inc arch_atomic64_inc
50
51static __always_inline void arch_atomic64_dec(atomic64_t *v)
52{
53 asm volatile(LOCK_PREFIX "decq %0"
54 : "=m" (v->counter)
55 : "m" (v->counter) : "memory");
56}
57#define arch_atomic64_dec arch_atomic64_dec
58
59static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
60{
61 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
62}
63#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
64
65static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
66{
67 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
68}
69#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
70
71static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
72{
73 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
74}
75#define arch_atomic64_add_negative arch_atomic64_add_negative
76
77static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
78{
79 return i + xadd(&v->counter, i);
80}
81#define arch_atomic64_add_return arch_atomic64_add_return
82
83static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
84{
85 return arch_atomic64_add_return(-i, v);
86}
87#define arch_atomic64_sub_return arch_atomic64_sub_return
88
89static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
90{
91 return xadd(&v->counter, i);
92}
93#define arch_atomic64_fetch_add arch_atomic64_fetch_add
94
95static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
96{
97 return xadd(&v->counter, -i);
98}
99#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
100
101static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
102{
103 return arch_cmpxchg(&v->counter, old, new);
104}
105#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
106
107static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
108{
109 return arch_try_cmpxchg(&v->counter, old, new);
110}
111#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
112
113static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
114{
115 return arch_xchg(&v->counter, new);
116}
117#define arch_atomic64_xchg arch_atomic64_xchg
118
119static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
120{
121 asm volatile(LOCK_PREFIX "andq %1,%0"
122 : "+m" (v->counter)
123 : "er" (i)
124 : "memory");
125}
126
127static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
128{
129 s64 val = arch_atomic64_read(v);
130
131 do {
132 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
133 return val;
134}
135#define arch_atomic64_fetch_and arch_atomic64_fetch_and
136
137static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
138{
139 asm volatile(LOCK_PREFIX "orq %1,%0"
140 : "+m" (v->counter)
141 : "er" (i)
142 : "memory");
143}
144
145static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
146{
147 s64 val = arch_atomic64_read(v);
148
149 do {
150 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
151 return val;
152}
153#define arch_atomic64_fetch_or arch_atomic64_fetch_or
154
155static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
156{
157 asm volatile(LOCK_PREFIX "xorq %1,%0"
158 : "+m" (v->counter)
159 : "er" (i)
160 : "memory");
161}
162
163static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
164{
165 s64 val = arch_atomic64_read(v);
166
167 do {
168 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
169 return val;
170}
171#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
172
173#endif /* _ASM_X86_ATOMIC64_64_H */