Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Generic C implementation of atomic counter operations. Usable on
4 * UP systems only. Do not include in machine independent code.
5 *
6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 */
9#ifndef __ASM_GENERIC_ATOMIC_H
10#define __ASM_GENERIC_ATOMIC_H
11
12#include <asm/cmpxchg.h>
13#include <asm/barrier.h>
14
15/*
16 * atomic_$op() - $op integer to atomic variable
17 * @i: integer value to $op
18 * @v: pointer to the atomic variable
19 *
20 * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
21 * smp_mb__{before,after}_atomic().
22 */
23
24/*
25 * atomic_$op_return() - $op interer to atomic variable and returns the result
26 * @i: integer value to $op
27 * @v: pointer to the atomic variable
28 *
29 * Atomically $ops @i to @v. Does imply a full memory barrier.
30 */
31
32#ifdef CONFIG_SMP
33
34/* we can build all atomic primitives from cmpxchg */
35
36#define ATOMIC_OP(op, c_op) \
37static inline void atomic_##op(int i, atomic_t *v) \
38{ \
39 int c, old; \
40 \
41 c = v->counter; \
42 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
43 c = old; \
44}
45
46#define ATOMIC_OP_RETURN(op, c_op) \
47static inline int atomic_##op##_return(int i, atomic_t *v) \
48{ \
49 int c, old; \
50 \
51 c = v->counter; \
52 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
53 c = old; \
54 \
55 return c c_op i; \
56}
57
58#define ATOMIC_FETCH_OP(op, c_op) \
59static inline int atomic_fetch_##op(int i, atomic_t *v) \
60{ \
61 int c, old; \
62 \
63 c = v->counter; \
64 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
65 c = old; \
66 \
67 return c; \
68}
69
70#else
71
72#include <linux/irqflags.h>
73
74#define ATOMIC_OP(op, c_op) \
75static inline void atomic_##op(int i, atomic_t *v) \
76{ \
77 unsigned long flags; \
78 \
79 raw_local_irq_save(flags); \
80 v->counter = v->counter c_op i; \
81 raw_local_irq_restore(flags); \
82}
83
84#define ATOMIC_OP_RETURN(op, c_op) \
85static inline int atomic_##op##_return(int i, atomic_t *v) \
86{ \
87 unsigned long flags; \
88 int ret; \
89 \
90 raw_local_irq_save(flags); \
91 ret = (v->counter = v->counter c_op i); \
92 raw_local_irq_restore(flags); \
93 \
94 return ret; \
95}
96
97#define ATOMIC_FETCH_OP(op, c_op) \
98static inline int atomic_fetch_##op(int i, atomic_t *v) \
99{ \
100 unsigned long flags; \
101 int ret; \
102 \
103 raw_local_irq_save(flags); \
104 ret = v->counter; \
105 v->counter = v->counter c_op i; \
106 raw_local_irq_restore(flags); \
107 \
108 return ret; \
109}
110
111#endif /* CONFIG_SMP */
112
113#ifndef atomic_add_return
114ATOMIC_OP_RETURN(add, +)
115#endif
116
117#ifndef atomic_sub_return
118ATOMIC_OP_RETURN(sub, -)
119#endif
120
121#ifndef atomic_fetch_add
122ATOMIC_FETCH_OP(add, +)
123#endif
124
125#ifndef atomic_fetch_sub
126ATOMIC_FETCH_OP(sub, -)
127#endif
128
129#ifndef atomic_fetch_and
130ATOMIC_FETCH_OP(and, &)
131#endif
132
133#ifndef atomic_fetch_or
134ATOMIC_FETCH_OP(or, |)
135#endif
136
137#ifndef atomic_fetch_xor
138ATOMIC_FETCH_OP(xor, ^)
139#endif
140
141#ifndef atomic_and
142ATOMIC_OP(and, &)
143#endif
144
145#ifndef atomic_or
146ATOMIC_OP(or, |)
147#endif
148
149#ifndef atomic_xor
150ATOMIC_OP(xor, ^)
151#endif
152
153#undef ATOMIC_FETCH_OP
154#undef ATOMIC_OP_RETURN
155#undef ATOMIC_OP
156
157/*
158 * Atomic operations that C can't guarantee us. Useful for
159 * resource counting etc..
160 */
161
162#define ATOMIC_INIT(i) { (i) }
163
164/**
165 * atomic_read - read atomic variable
166 * @v: pointer of type atomic_t
167 *
168 * Atomically reads the value of @v.
169 */
170#ifndef atomic_read
171#define atomic_read(v) READ_ONCE((v)->counter)
172#endif
173
174/**
175 * atomic_set - set atomic variable
176 * @v: pointer of type atomic_t
177 * @i: required value
178 *
179 * Atomically sets the value of @v to @i.
180 */
181#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
182
183#include <linux/irqflags.h>
184
185static inline void atomic_add(int i, atomic_t *v)
186{
187 atomic_add_return(i, v);
188}
189
190static inline void atomic_sub(int i, atomic_t *v)
191{
192 atomic_sub_return(i, v);
193}
194
195#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
196#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
197
198#endif /* __ASM_GENERIC_ATOMIC_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Generic C implementation of atomic counter operations. Do not include in
4 * machine independent code.
5 *
6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 */
9#ifndef __ASM_GENERIC_ATOMIC_H
10#define __ASM_GENERIC_ATOMIC_H
11
12#include <asm/cmpxchg.h>
13#include <asm/barrier.h>
14
15#ifdef CONFIG_SMP
16
17/* we can build all atomic primitives from cmpxchg */
18
19#define ATOMIC_OP(op, c_op) \
20static inline void generic_atomic_##op(int i, atomic_t *v) \
21{ \
22 int c, old; \
23 \
24 c = v->counter; \
25 while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
26 c = old; \
27}
28
29#define ATOMIC_OP_RETURN(op, c_op) \
30static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
31{ \
32 int c, old; \
33 \
34 c = v->counter; \
35 while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
36 c = old; \
37 \
38 return c c_op i; \
39}
40
41#define ATOMIC_FETCH_OP(op, c_op) \
42static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
43{ \
44 int c, old; \
45 \
46 c = v->counter; \
47 while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
48 c = old; \
49 \
50 return c; \
51}
52
53#else
54
55#include <linux/irqflags.h>
56
57#define ATOMIC_OP(op, c_op) \
58static inline void generic_atomic_##op(int i, atomic_t *v) \
59{ \
60 unsigned long flags; \
61 \
62 raw_local_irq_save(flags); \
63 v->counter = v->counter c_op i; \
64 raw_local_irq_restore(flags); \
65}
66
67#define ATOMIC_OP_RETURN(op, c_op) \
68static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
69{ \
70 unsigned long flags; \
71 int ret; \
72 \
73 raw_local_irq_save(flags); \
74 ret = (v->counter = v->counter c_op i); \
75 raw_local_irq_restore(flags); \
76 \
77 return ret; \
78}
79
80#define ATOMIC_FETCH_OP(op, c_op) \
81static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
82{ \
83 unsigned long flags; \
84 int ret; \
85 \
86 raw_local_irq_save(flags); \
87 ret = v->counter; \
88 v->counter = v->counter c_op i; \
89 raw_local_irq_restore(flags); \
90 \
91 return ret; \
92}
93
94#endif /* CONFIG_SMP */
95
96ATOMIC_OP_RETURN(add, +)
97ATOMIC_OP_RETURN(sub, -)
98
99ATOMIC_FETCH_OP(add, +)
100ATOMIC_FETCH_OP(sub, -)
101ATOMIC_FETCH_OP(and, &)
102ATOMIC_FETCH_OP(or, |)
103ATOMIC_FETCH_OP(xor, ^)
104
105ATOMIC_OP(add, +)
106ATOMIC_OP(sub, -)
107ATOMIC_OP(and, &)
108ATOMIC_OP(or, |)
109ATOMIC_OP(xor, ^)
110
111#undef ATOMIC_FETCH_OP
112#undef ATOMIC_OP_RETURN
113#undef ATOMIC_OP
114
115#define arch_atomic_add_return generic_atomic_add_return
116#define arch_atomic_sub_return generic_atomic_sub_return
117
118#define arch_atomic_fetch_add generic_atomic_fetch_add
119#define arch_atomic_fetch_sub generic_atomic_fetch_sub
120#define arch_atomic_fetch_and generic_atomic_fetch_and
121#define arch_atomic_fetch_or generic_atomic_fetch_or
122#define arch_atomic_fetch_xor generic_atomic_fetch_xor
123
124#define arch_atomic_add generic_atomic_add
125#define arch_atomic_sub generic_atomic_sub
126#define arch_atomic_and generic_atomic_and
127#define arch_atomic_or generic_atomic_or
128#define arch_atomic_xor generic_atomic_xor
129
130#define arch_atomic_read(v) READ_ONCE((v)->counter)
131#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
132
133#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
134#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
135
136#endif /* __ASM_GENERIC_ATOMIC_H */