Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Atomic operations for the Hexagon architecture
4 *
5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6 */
7
8#ifndef _ASM_ATOMIC_H
9#define _ASM_ATOMIC_H
10
11#include <linux/types.h>
12#include <asm/cmpxchg.h>
13#include <asm/barrier.h>
14
15/* Normal writes in our arch don't clear lock reservations */
16
17static inline void arch_atomic_set(atomic_t *v, int new)
18{
19 asm volatile(
20 "1: r6 = memw_locked(%0);\n"
21 " memw_locked(%0,p0) = %1;\n"
22 " if (!P0) jump 1b;\n"
23 :
24 : "r" (&v->counter), "r" (new)
25 : "memory", "p0", "r6"
26 );
27}
28
29#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
30
31#define arch_atomic_read(v) READ_ONCE((v)->counter)
32
33#define ATOMIC_OP(op) \
34static inline void arch_atomic_##op(int i, atomic_t *v) \
35{ \
36 int output; \
37 \
38 __asm__ __volatile__ ( \
39 "1: %0 = memw_locked(%1);\n" \
40 " %0 = "#op "(%0,%2);\n" \
41 " memw_locked(%1,P3)=%0;\n" \
42 " if (!P3) jump 1b;\n" \
43 : "=&r" (output) \
44 : "r" (&v->counter), "r" (i) \
45 : "memory", "p3" \
46 ); \
47} \
48
49#define ATOMIC_OP_RETURN(op) \
50static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
51{ \
52 int output; \
53 \
54 __asm__ __volatile__ ( \
55 "1: %0 = memw_locked(%1);\n" \
56 " %0 = "#op "(%0,%2);\n" \
57 " memw_locked(%1,P3)=%0;\n" \
58 " if (!P3) jump 1b;\n" \
59 : "=&r" (output) \
60 : "r" (&v->counter), "r" (i) \
61 : "memory", "p3" \
62 ); \
63 return output; \
64}
65
66#define ATOMIC_FETCH_OP(op) \
67static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
68{ \
69 int output, val; \
70 \
71 __asm__ __volatile__ ( \
72 "1: %0 = memw_locked(%2);\n" \
73 " %1 = "#op "(%0,%3);\n" \
74 " memw_locked(%2,P3)=%1;\n" \
75 " if (!P3) jump 1b;\n" \
76 : "=&r" (output), "=&r" (val) \
77 : "r" (&v->counter), "r" (i) \
78 : "memory", "p3" \
79 ); \
80 return output; \
81}
82
83#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
84
85ATOMIC_OPS(add)
86ATOMIC_OPS(sub)
87
88#define arch_atomic_add_return arch_atomic_add_return
89#define arch_atomic_sub_return arch_atomic_sub_return
90#define arch_atomic_fetch_add arch_atomic_fetch_add
91#define arch_atomic_fetch_sub arch_atomic_fetch_sub
92
93#undef ATOMIC_OPS
94#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
95
96ATOMIC_OPS(and)
97ATOMIC_OPS(or)
98ATOMIC_OPS(xor)
99
100#define arch_atomic_fetch_and arch_atomic_fetch_and
101#define arch_atomic_fetch_or arch_atomic_fetch_or
102#define arch_atomic_fetch_xor arch_atomic_fetch_xor
103
104#undef ATOMIC_OPS
105#undef ATOMIC_FETCH_OP
106#undef ATOMIC_OP_RETURN
107#undef ATOMIC_OP
108
109static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
110{
111 int __oldval;
112 register int tmp;
113
114 asm volatile(
115 "1: %0 = memw_locked(%2);"
116 " {"
117 " p3 = cmp.eq(%0, %4);"
118 " if (p3.new) jump:nt 2f;"
119 " %1 = add(%0, %3);"
120 " }"
121 " memw_locked(%2, p3) = %1;"
122 " {"
123 " if (!p3) jump 1b;"
124 " }"
125 "2:"
126 : "=&r" (__oldval), "=&r" (tmp)
127 : "r" (v), "r" (a), "r" (u)
128 : "memory", "p3"
129 );
130 return __oldval;
131}
132#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
133
134#endif
1/*
2 * Atomic operations for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22#ifndef _ASM_ATOMIC_H
23#define _ASM_ATOMIC_H
24
25#include <linux/types.h>
26#include <asm/cmpxchg.h>
27#include <asm/barrier.h>
28
29#define ATOMIC_INIT(i) { (i) }
30
31/* Normal writes in our arch don't clear lock reservations */
32
33static inline void atomic_set(atomic_t *v, int new)
34{
35 asm volatile(
36 "1: r6 = memw_locked(%0);\n"
37 " memw_locked(%0,p0) = %1;\n"
38 " if (!P0) jump 1b;\n"
39 :
40 : "r" (&v->counter), "r" (new)
41 : "memory", "p0", "r6"
42 );
43}
44
45/**
46 * atomic_read - reads a word, atomically
47 * @v: pointer to atomic value
48 *
49 * Assumes all word reads on our architecture are atomic.
50 */
51#define atomic_read(v) READ_ONCE((v)->counter)
52
53/**
54 * atomic_xchg - atomic
55 * @v: pointer to memory to change
56 * @new: new value (technically passed in a register -- see xchg)
57 */
58#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
59
60
61/**
62 * atomic_cmpxchg - atomic compare-and-exchange values
63 * @v: pointer to value to change
64 * @old: desired old value to match
65 * @new: new value to put in
66 *
67 * Parameters are then pointer, value-in-register, value-in-register,
68 * and the output is the old value.
69 *
70 * Apparently this is complicated for archs that don't support
71 * the memw_locked like we do (or it's broken or whatever).
72 *
73 * Kind of the lynchpin of the rest of the generically defined routines.
74 * Remember V2 had that bug with dotnew predicate set by memw_locked.
75 *
76 * "old" is "expected" old val, __oldval is actual old value
77 */
78static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
79{
80 int __oldval;
81
82 asm volatile(
83 "1: %0 = memw_locked(%1);\n"
84 " { P0 = cmp.eq(%0,%2);\n"
85 " if (!P0.new) jump:nt 2f; }\n"
86 " memw_locked(%1,P0) = %3;\n"
87 " if (!P0) jump 1b;\n"
88 "2:\n"
89 : "=&r" (__oldval)
90 : "r" (&v->counter), "r" (old), "r" (new)
91 : "memory", "p0"
92 );
93
94 return __oldval;
95}
96
97#define ATOMIC_OP(op) \
98static inline void atomic_##op(int i, atomic_t *v) \
99{ \
100 int output; \
101 \
102 __asm__ __volatile__ ( \
103 "1: %0 = memw_locked(%1);\n" \
104 " %0 = "#op "(%0,%2);\n" \
105 " memw_locked(%1,P3)=%0;\n" \
106 " if !P3 jump 1b;\n" \
107 : "=&r" (output) \
108 : "r" (&v->counter), "r" (i) \
109 : "memory", "p3" \
110 ); \
111} \
112
113#define ATOMIC_OP_RETURN(op) \
114static inline int atomic_##op##_return(int i, atomic_t *v) \
115{ \
116 int output; \
117 \
118 __asm__ __volatile__ ( \
119 "1: %0 = memw_locked(%1);\n" \
120 " %0 = "#op "(%0,%2);\n" \
121 " memw_locked(%1,P3)=%0;\n" \
122 " if !P3 jump 1b;\n" \
123 : "=&r" (output) \
124 : "r" (&v->counter), "r" (i) \
125 : "memory", "p3" \
126 ); \
127 return output; \
128}
129
130#define ATOMIC_FETCH_OP(op) \
131static inline int atomic_fetch_##op(int i, atomic_t *v) \
132{ \
133 int output, val; \
134 \
135 __asm__ __volatile__ ( \
136 "1: %0 = memw_locked(%2);\n" \
137 " %1 = "#op "(%0,%3);\n" \
138 " memw_locked(%2,P3)=%1;\n" \
139 " if !P3 jump 1b;\n" \
140 : "=&r" (output), "=&r" (val) \
141 : "r" (&v->counter), "r" (i) \
142 : "memory", "p3" \
143 ); \
144 return output; \
145}
146
147#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
148
149ATOMIC_OPS(add)
150ATOMIC_OPS(sub)
151
152#undef ATOMIC_OPS
153#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
154
155ATOMIC_OPS(and)
156ATOMIC_OPS(or)
157ATOMIC_OPS(xor)
158
159#undef ATOMIC_OPS
160#undef ATOMIC_FETCH_OP
161#undef ATOMIC_OP_RETURN
162#undef ATOMIC_OP
163
164/**
165 * __atomic_add_unless - add unless the number is a given value
166 * @v: pointer to value
167 * @a: amount to add
168 * @u: unless value is equal to u
169 *
170 * Returns old value.
171 *
172 */
173
174static inline int __atomic_add_unless(atomic_t *v, int a, int u)
175{
176 int __oldval;
177 register int tmp;
178
179 asm volatile(
180 "1: %0 = memw_locked(%2);"
181 " {"
182 " p3 = cmp.eq(%0, %4);"
183 " if (p3.new) jump:nt 2f;"
184 " %1 = add(%0, %3);"
185 " }"
186 " memw_locked(%2, p3) = %1;"
187 " {"
188 " if !p3 jump 1b;"
189 " }"
190 "2:"
191 : "=&r" (__oldval), "=&r" (tmp)
192 : "r" (v), "r" (a), "r" (u)
193 : "memory", "p3"
194 );
195 return __oldval;
196}
197
198#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
199
200#define atomic_inc(v) atomic_add(1, (v))
201#define atomic_dec(v) atomic_sub(1, (v))
202
203#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
204#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
205#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
206#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
207
208#define atomic_inc_return(v) (atomic_add_return(1, v))
209#define atomic_dec_return(v) (atomic_sub_return(1, v))
210
211#endif