Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Generic C implementation of atomic counter operations. Do not include in
4 * machine independent code.
5 *
6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 */
9#ifndef __ASM_GENERIC_ATOMIC_H
10#define __ASM_GENERIC_ATOMIC_H
11
12#include <asm/cmpxchg.h>
13#include <asm/barrier.h>
14
15#ifdef CONFIG_SMP
16
17/* we can build all atomic primitives from cmpxchg */
18
19#define ATOMIC_OP(op, c_op) \
20static inline void generic_atomic_##op(int i, atomic_t *v) \
21{ \
22 int c, old; \
23 \
24 c = v->counter; \
25 while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
26 c = old; \
27}
28
29#define ATOMIC_OP_RETURN(op, c_op) \
30static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
31{ \
32 int c, old; \
33 \
34 c = v->counter; \
35 while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
36 c = old; \
37 \
38 return c c_op i; \
39}
40
41#define ATOMIC_FETCH_OP(op, c_op) \
42static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
43{ \
44 int c, old; \
45 \
46 c = v->counter; \
47 while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
48 c = old; \
49 \
50 return c; \
51}
52
53#else
54
55#include <linux/irqflags.h>
56
57#define ATOMIC_OP(op, c_op) \
58static inline void generic_atomic_##op(int i, atomic_t *v) \
59{ \
60 unsigned long flags; \
61 \
62 raw_local_irq_save(flags); \
63 v->counter = v->counter c_op i; \
64 raw_local_irq_restore(flags); \
65}
66
67#define ATOMIC_OP_RETURN(op, c_op) \
68static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
69{ \
70 unsigned long flags; \
71 int ret; \
72 \
73 raw_local_irq_save(flags); \
74 ret = (v->counter = v->counter c_op i); \
75 raw_local_irq_restore(flags); \
76 \
77 return ret; \
78}
79
80#define ATOMIC_FETCH_OP(op, c_op) \
81static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
82{ \
83 unsigned long flags; \
84 int ret; \
85 \
86 raw_local_irq_save(flags); \
87 ret = v->counter; \
88 v->counter = v->counter c_op i; \
89 raw_local_irq_restore(flags); \
90 \
91 return ret; \
92}
93
94#endif /* CONFIG_SMP */
95
96ATOMIC_OP_RETURN(add, +)
97ATOMIC_OP_RETURN(sub, -)
98
99ATOMIC_FETCH_OP(add, +)
100ATOMIC_FETCH_OP(sub, -)
101ATOMIC_FETCH_OP(and, &)
102ATOMIC_FETCH_OP(or, |)
103ATOMIC_FETCH_OP(xor, ^)
104
105ATOMIC_OP(add, +)
106ATOMIC_OP(sub, -)
107ATOMIC_OP(and, &)
108ATOMIC_OP(or, |)
109ATOMIC_OP(xor, ^)
110
111#undef ATOMIC_FETCH_OP
112#undef ATOMIC_OP_RETURN
113#undef ATOMIC_OP
114
115#define arch_atomic_add_return generic_atomic_add_return
116#define arch_atomic_sub_return generic_atomic_sub_return
117
118#define arch_atomic_fetch_add generic_atomic_fetch_add
119#define arch_atomic_fetch_sub generic_atomic_fetch_sub
120#define arch_atomic_fetch_and generic_atomic_fetch_and
121#define arch_atomic_fetch_or generic_atomic_fetch_or
122#define arch_atomic_fetch_xor generic_atomic_fetch_xor
123
124#define arch_atomic_add generic_atomic_add
125#define arch_atomic_sub generic_atomic_sub
126#define arch_atomic_and generic_atomic_and
127#define arch_atomic_or generic_atomic_or
128#define arch_atomic_xor generic_atomic_xor
129
130#define arch_atomic_read(v) READ_ONCE((v)->counter)
131#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
132
133#endif /* __ASM_GENERIC_ATOMIC_H */
1/*
2 * Generic C implementation of atomic counter operations. Usable on
3 * UP systems only. Do not include in machine independent code.
4 *
5 * Originally implemented for MN10300.
6 *
7 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
8 * Written by David Howells (dhowells@redhat.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public Licence
12 * as published by the Free Software Foundation; either version
13 * 2 of the Licence, or (at your option) any later version.
14 */
15#ifndef __ASM_GENERIC_ATOMIC_H
16#define __ASM_GENERIC_ATOMIC_H
17
18#include <asm/cmpxchg.h>
19#include <asm/barrier.h>
20
21/*
22 * atomic_$op() - $op integer to atomic variable
23 * @i: integer value to $op
24 * @v: pointer to the atomic variable
25 *
26 * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
27 * smp_mb__{before,after}_atomic().
28 */
29
30/*
31 * atomic_$op_return() - $op interer to atomic variable and returns the result
32 * @i: integer value to $op
33 * @v: pointer to the atomic variable
34 *
35 * Atomically $ops @i to @v. Does imply a full memory barrier.
36 */
37
38#ifdef CONFIG_SMP
39
40/* we can build all atomic primitives from cmpxchg */
41
42#define ATOMIC_OP(op, c_op) \
43static inline void atomic_##op(int i, atomic_t *v) \
44{ \
45 int c, old; \
46 \
47 c = v->counter; \
48 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
49 c = old; \
50}
51
52#define ATOMIC_OP_RETURN(op, c_op) \
53static inline int atomic_##op##_return(int i, atomic_t *v) \
54{ \
55 int c, old; \
56 \
57 c = v->counter; \
58 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
59 c = old; \
60 \
61 return c c_op i; \
62}
63
64#else
65
66#include <linux/irqflags.h>
67
68#define ATOMIC_OP(op, c_op) \
69static inline void atomic_##op(int i, atomic_t *v) \
70{ \
71 unsigned long flags; \
72 \
73 raw_local_irq_save(flags); \
74 v->counter = v->counter c_op i; \
75 raw_local_irq_restore(flags); \
76}
77
78#define ATOMIC_OP_RETURN(op, c_op) \
79static inline int atomic_##op##_return(int i, atomic_t *v) \
80{ \
81 unsigned long flags; \
82 int ret; \
83 \
84 raw_local_irq_save(flags); \
85 ret = (v->counter = v->counter c_op i); \
86 raw_local_irq_restore(flags); \
87 \
88 return ret; \
89}
90
91#endif /* CONFIG_SMP */
92
93#ifndef atomic_add_return
94ATOMIC_OP_RETURN(add, +)
95#endif
96
97#ifndef atomic_sub_return
98ATOMIC_OP_RETURN(sub, -)
99#endif
100
101#ifndef atomic_and
102ATOMIC_OP(and, &)
103#endif
104
105#ifndef atomic_or
106ATOMIC_OP(or, |)
107#endif
108
109#ifndef atomic_xor
110ATOMIC_OP(xor, ^)
111#endif
112
113#undef ATOMIC_OP_RETURN
114#undef ATOMIC_OP
115
116/*
117 * Atomic operations that C can't guarantee us. Useful for
118 * resource counting etc..
119 */
120
121#define ATOMIC_INIT(i) { (i) }
122
123/**
124 * atomic_read - read atomic variable
125 * @v: pointer of type atomic_t
126 *
127 * Atomically reads the value of @v.
128 */
129#ifndef atomic_read
130#define atomic_read(v) READ_ONCE((v)->counter)
131#endif
132
133/**
134 * atomic_set - set atomic variable
135 * @v: pointer of type atomic_t
136 * @i: required value
137 *
138 * Atomically sets the value of @v to @i.
139 */
140#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
141
142#include <linux/irqflags.h>
143
144static inline int atomic_add_negative(int i, atomic_t *v)
145{
146 return atomic_add_return(i, v) < 0;
147}
148
149static inline void atomic_add(int i, atomic_t *v)
150{
151 atomic_add_return(i, v);
152}
153
154static inline void atomic_sub(int i, atomic_t *v)
155{
156 atomic_sub_return(i, v);
157}
158
159static inline void atomic_inc(atomic_t *v)
160{
161 atomic_add_return(1, v);
162}
163
164static inline void atomic_dec(atomic_t *v)
165{
166 atomic_sub_return(1, v);
167}
168
169#define atomic_dec_return(v) atomic_sub_return(1, (v))
170#define atomic_inc_return(v) atomic_add_return(1, (v))
171
172#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
173#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
174#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
175
176#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
177#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
178
179static inline int __atomic_add_unless(atomic_t *v, int a, int u)
180{
181 int c, old;
182 c = atomic_read(v);
183 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
184 c = old;
185 return c;
186}
187
188#endif /* __ASM_GENERIC_ATOMIC_H */