Loading...
1#ifndef _ASM_X86_LOCAL_H
2#define _ASM_X86_LOCAL_H
3
4#include <linux/percpu.h>
5
6#include <linux/atomic.h>
7#include <asm/asm.h>
8
9typedef struct {
10 atomic_long_t a;
11} local_t;
12
13#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
14
15#define local_read(l) atomic_long_read(&(l)->a)
16#define local_set(l, i) atomic_long_set(&(l)->a, (i))
17
18static inline void local_inc(local_t *l)
19{
20 asm volatile(_ASM_INC "%0"
21 : "+m" (l->a.counter));
22}
23
24static inline void local_dec(local_t *l)
25{
26 asm volatile(_ASM_DEC "%0"
27 : "+m" (l->a.counter));
28}
29
30static inline void local_add(long i, local_t *l)
31{
32 asm volatile(_ASM_ADD "%1,%0"
33 : "+m" (l->a.counter)
34 : "ir" (i));
35}
36
37static inline void local_sub(long i, local_t *l)
38{
39 asm volatile(_ASM_SUB "%1,%0"
40 : "+m" (l->a.counter)
41 : "ir" (i));
42}
43
44/**
45 * local_sub_and_test - subtract value from variable and test result
46 * @i: integer value to subtract
47 * @l: pointer to type local_t
48 *
49 * Atomically subtracts @i from @l and returns
50 * true if the result is zero, or false for all
51 * other cases.
52 */
53static inline int local_sub_and_test(long i, local_t *l)
54{
55 GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
56}
57
58/**
59 * local_dec_and_test - decrement and test
60 * @l: pointer to type local_t
61 *
62 * Atomically decrements @l by 1 and
63 * returns true if the result is 0, or false for all other
64 * cases.
65 */
66static inline int local_dec_and_test(local_t *l)
67{
68 GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
69}
70
71/**
72 * local_inc_and_test - increment and test
73 * @l: pointer to type local_t
74 *
75 * Atomically increments @l by 1
76 * and returns true if the result is zero, or false for all
77 * other cases.
78 */
79static inline int local_inc_and_test(local_t *l)
80{
81 GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
82}
83
84/**
85 * local_add_negative - add and test if negative
86 * @i: integer value to add
87 * @l: pointer to type local_t
88 *
89 * Atomically adds @i to @l and returns true
90 * if the result is negative, or false when
91 * result is greater than or equal to zero.
92 */
93static inline int local_add_negative(long i, local_t *l)
94{
95 GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
96}
97
98/**
99 * local_add_return - add and return
100 * @i: integer value to add
101 * @l: pointer to type local_t
102 *
103 * Atomically adds @i to @l and returns @i + @l
104 */
105static inline long local_add_return(long i, local_t *l)
106{
107 long __i = i;
108 asm volatile(_ASM_XADD "%0, %1;"
109 : "+r" (i), "+m" (l->a.counter)
110 : : "memory");
111 return i + __i;
112}
113
114static inline long local_sub_return(long i, local_t *l)
115{
116 return local_add_return(-i, l);
117}
118
119#define local_inc_return(l) (local_add_return(1, l))
120#define local_dec_return(l) (local_sub_return(1, l))
121
122#define local_cmpxchg(l, o, n) \
123 (cmpxchg_local(&((l)->a.counter), (o), (n)))
124/* Always has a lock prefix */
125#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
126
127/**
128 * local_add_unless - add unless the number is a given value
129 * @l: pointer of type local_t
130 * @a: the amount to add to l...
131 * @u: ...unless l is equal to u.
132 *
133 * Atomically adds @a to @l, so long as it was not @u.
134 * Returns non-zero if @l was not @u, and zero otherwise.
135 */
136#define local_add_unless(l, a, u) \
137({ \
138 long c, old; \
139 c = local_read((l)); \
140 for (;;) { \
141 if (unlikely(c == (u))) \
142 break; \
143 old = local_cmpxchg((l), c, c + (a)); \
144 if (likely(old == c)) \
145 break; \
146 c = old; \
147 } \
148 c != (u); \
149})
150#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
151
152/* On x86_32, these are no better than the atomic variants.
153 * On x86-64 these are better than the atomic variants on SMP kernels
154 * because they dont use a lock prefix.
155 */
156#define __local_inc(l) local_inc(l)
157#define __local_dec(l) local_dec(l)
158#define __local_add(i, l) local_add((i), (l))
159#define __local_sub(i, l) local_sub((i), (l))
160
161#endif /* _ASM_X86_LOCAL_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_LOCAL_H
3#define _ASM_X86_LOCAL_H
4
5#include <linux/percpu.h>
6
7#include <linux/atomic.h>
8#include <asm/asm.h>
9
10typedef struct {
11 atomic_long_t a;
12} local_t;
13
14#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
15
16#define local_read(l) atomic_long_read(&(l)->a)
17#define local_set(l, i) atomic_long_set(&(l)->a, (i))
18
19static inline void local_inc(local_t *l)
20{
21 asm volatile(_ASM_INC "%0"
22 : "+m" (l->a.counter));
23}
24
25static inline void local_dec(local_t *l)
26{
27 asm volatile(_ASM_DEC "%0"
28 : "+m" (l->a.counter));
29}
30
31static inline void local_add(long i, local_t *l)
32{
33 asm volatile(_ASM_ADD "%1,%0"
34 : "+m" (l->a.counter)
35 : "ir" (i));
36}
37
38static inline void local_sub(long i, local_t *l)
39{
40 asm volatile(_ASM_SUB "%1,%0"
41 : "+m" (l->a.counter)
42 : "ir" (i));
43}
44
45/**
46 * local_sub_and_test - subtract value from variable and test result
47 * @i: integer value to subtract
48 * @l: pointer to type local_t
49 *
50 * Atomically subtracts @i from @l and returns
51 * true if the result is zero, or false for all
52 * other cases.
53 */
54static inline bool local_sub_and_test(long i, local_t *l)
55{
56 return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i);
57}
58
59/**
60 * local_dec_and_test - decrement and test
61 * @l: pointer to type local_t
62 *
63 * Atomically decrements @l by 1 and
64 * returns true if the result is 0, or false for all other
65 * cases.
66 */
67static inline bool local_dec_and_test(local_t *l)
68{
69 return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e);
70}
71
72/**
73 * local_inc_and_test - increment and test
74 * @l: pointer to type local_t
75 *
76 * Atomically increments @l by 1
77 * and returns true if the result is zero, or false for all
78 * other cases.
79 */
80static inline bool local_inc_and_test(local_t *l)
81{
82 return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e);
83}
84
85/**
86 * local_add_negative - add and test if negative
87 * @i: integer value to add
88 * @l: pointer to type local_t
89 *
90 * Atomically adds @i to @l and returns true
91 * if the result is negative, or false when
92 * result is greater than or equal to zero.
93 */
94static inline bool local_add_negative(long i, local_t *l)
95{
96 return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i);
97}
98
99/**
100 * local_add_return - add and return
101 * @i: integer value to add
102 * @l: pointer to type local_t
103 *
104 * Atomically adds @i to @l and returns @i + @l
105 */
106static inline long local_add_return(long i, local_t *l)
107{
108 long __i = i;
109 asm volatile(_ASM_XADD "%0, %1;"
110 : "+r" (i), "+m" (l->a.counter)
111 : : "memory");
112 return i + __i;
113}
114
115static inline long local_sub_return(long i, local_t *l)
116{
117 return local_add_return(-i, l);
118}
119
120#define local_inc_return(l) (local_add_return(1, l))
121#define local_dec_return(l) (local_sub_return(1, l))
122
123static inline long local_cmpxchg(local_t *l, long old, long new)
124{
125 return cmpxchg_local(&l->a.counter, old, new);
126}
127
128static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
129{
130 return try_cmpxchg_local(&l->a.counter,
131 (typeof(l->a.counter) *) old, new);
132}
133
134/* Always has a lock prefix */
135#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
136
137/**
138 * local_add_unless - add unless the number is already a given value
139 * @l: pointer of type local_t
140 * @a: the amount to add to l...
141 * @u: ...unless l is equal to u.
142 *
143 * Atomically adds @a to @l, if @v was not already @u.
144 * Returns true if the addition was done.
145 */
146static __always_inline bool
147local_add_unless(local_t *l, long a, long u)
148{
149 long c = local_read(l);
150
151 do {
152 if (unlikely(c == u))
153 return false;
154 } while (!local_try_cmpxchg(l, &c, c + a));
155
156 return true;
157}
158
159#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
160
161/* On x86_32, these are no better than the atomic variants.
162 * On x86-64 these are better than the atomic variants on SMP kernels
163 * because they dont use a lock prefix.
164 */
165#define __local_inc(l) local_inc(l)
166#define __local_dec(l) local_dec(l)
167#define __local_add(i, l) local_add((i), (l))
168#define __local_sub(i, l) local_sub((i), (l))
169
170#endif /* _ASM_X86_LOCAL_H */