Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
5
6#ifndef _ASM_ARC_ATOMIC_H
7#define _ASM_ARC_ATOMIC_H
8
9#ifndef __ASSEMBLY__
10
11#include <linux/types.h>
12#include <linux/compiler.h>
13#include <asm/cmpxchg.h>
14#include <asm/barrier.h>
15#include <asm/smp.h>
16
17#define arch_atomic_read(v) READ_ONCE((v)->counter)
18
19#ifdef CONFIG_ARC_HAS_LLSC
20#include <asm/atomic-llsc.h>
21#else
22#include <asm/atomic-spinlock.h>
23#endif
24
25/*
26 * 64-bit atomics
27 */
28#ifdef CONFIG_GENERIC_ATOMIC64
29#include <asm-generic/atomic64.h>
30#else
31#include <asm/atomic64-arcv2.h>
32#endif
33
34#endif /* !__ASSEMBLY__ */
35
36#endif
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
12#ifdef __KERNEL__
13
14#ifndef __ASSEMBLY__
15
16#include <linux/types.h>
17#include <linux/compiler.h>
18#include <asm/cmpxchg.h>
19#include <asm/barrier.h>
20#include <asm/smp.h>
21
22#define atomic_read(v) ((v)->counter)
23
24#ifdef CONFIG_ARC_HAS_LLSC
25
26#define atomic_set(v, i) (((v)->counter) = (i))
27
28static inline void atomic_add(int i, atomic_t *v)
29{
30 unsigned int temp;
31
32 __asm__ __volatile__(
33 "1: llock %0, [%1] \n"
34 " add %0, %0, %2 \n"
35 " scond %0, [%1] \n"
36 " bnz 1b \n"
37 : "=&r"(temp) /* Early clobber, to prevent reg reuse */
38 : "r"(&v->counter), "ir"(i)
39 : "cc");
40}
41
42static inline void atomic_sub(int i, atomic_t *v)
43{
44 unsigned int temp;
45
46 __asm__ __volatile__(
47 "1: llock %0, [%1] \n"
48 " sub %0, %0, %2 \n"
49 " scond %0, [%1] \n"
50 " bnz 1b \n"
51 : "=&r"(temp)
52 : "r"(&v->counter), "ir"(i)
53 : "cc");
54}
55
56/* add and also return the new value */
57static inline int atomic_add_return(int i, atomic_t *v)
58{
59 unsigned int temp;
60
61 __asm__ __volatile__(
62 "1: llock %0, [%1] \n"
63 " add %0, %0, %2 \n"
64 " scond %0, [%1] \n"
65 " bnz 1b \n"
66 : "=&r"(temp)
67 : "r"(&v->counter), "ir"(i)
68 : "cc");
69
70 return temp;
71}
72
73static inline int atomic_sub_return(int i, atomic_t *v)
74{
75 unsigned int temp;
76
77 __asm__ __volatile__(
78 "1: llock %0, [%1] \n"
79 " sub %0, %0, %2 \n"
80 " scond %0, [%1] \n"
81 " bnz 1b \n"
82 : "=&r"(temp)
83 : "r"(&v->counter), "ir"(i)
84 : "cc");
85
86 return temp;
87}
88
89static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
90{
91 unsigned int temp;
92
93 __asm__ __volatile__(
94 "1: llock %0, [%1] \n"
95 " bic %0, %0, %2 \n"
96 " scond %0, [%1] \n"
97 " bnz 1b \n"
98 : "=&r"(temp)
99 : "r"(addr), "ir"(mask)
100 : "cc");
101}
102
103#else /* !CONFIG_ARC_HAS_LLSC */
104
105#ifndef CONFIG_SMP
106
107 /* violating atomic_xxx API locking protocol in UP for optimization sake */
108#define atomic_set(v, i) (((v)->counter) = (i))
109
110#else
111
112static inline void atomic_set(atomic_t *v, int i)
113{
114 /*
115 * Independent of hardware support, all of the atomic_xxx() APIs need
116 * to follow the same locking rules to make sure that a "hardware"
117 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
118 * sequence
119 *
120 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
121 * requires the locking.
122 */
123 unsigned long flags;
124
125 atomic_ops_lock(flags);
126 v->counter = i;
127 atomic_ops_unlock(flags);
128}
129#endif
130
131/*
132 * Non hardware assisted Atomic-R-M-W
133 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
134 */
135
136static inline void atomic_add(int i, atomic_t *v)
137{
138 unsigned long flags;
139
140 atomic_ops_lock(flags);
141 v->counter += i;
142 atomic_ops_unlock(flags);
143}
144
145static inline void atomic_sub(int i, atomic_t *v)
146{
147 unsigned long flags;
148
149 atomic_ops_lock(flags);
150 v->counter -= i;
151 atomic_ops_unlock(flags);
152}
153
154static inline int atomic_add_return(int i, atomic_t *v)
155{
156 unsigned long flags;
157 unsigned long temp;
158
159 atomic_ops_lock(flags);
160 temp = v->counter;
161 temp += i;
162 v->counter = temp;
163 atomic_ops_unlock(flags);
164
165 return temp;
166}
167
168static inline int atomic_sub_return(int i, atomic_t *v)
169{
170 unsigned long flags;
171 unsigned long temp;
172
173 atomic_ops_lock(flags);
174 temp = v->counter;
175 temp -= i;
176 v->counter = temp;
177 atomic_ops_unlock(flags);
178
179 return temp;
180}
181
182static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
183{
184 unsigned long flags;
185
186 atomic_ops_lock(flags);
187 *addr &= ~mask;
188 atomic_ops_unlock(flags);
189}
190
191#endif /* !CONFIG_ARC_HAS_LLSC */
192
193#define smp_mb__before_atomic_dec() barrier()
194#define smp_mb__after_atomic_dec() barrier()
195#define smp_mb__before_atomic_inc() barrier()
196#define smp_mb__after_atomic_inc() barrier()
197
198/**
199 * __atomic_add_unless - add unless the number is a given value
200 * @v: pointer of type atomic_t
201 * @a: the amount to add to v...
202 * @u: ...unless v is equal to u.
203 *
204 * Atomically adds @a to @v, so long as it was not @u.
205 * Returns the old value of @v
206 */
207#define __atomic_add_unless(v, a, u) \
208({ \
209 int c, old; \
210 c = atomic_read(v); \
211 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
212 c = old; \
213 c; \
214})
215
216#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
217
218#define atomic_inc(v) atomic_add(1, v)
219#define atomic_dec(v) atomic_sub(1, v)
220
221#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
222#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
223#define atomic_inc_return(v) atomic_add_return(1, (v))
224#define atomic_dec_return(v) atomic_sub_return(1, (v))
225#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
226
227#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
228
229#define ATOMIC_INIT(i) { (i) }
230
231#include <asm-generic/atomic64.h>
232
233#endif
234
235#endif
236
237#endif