Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Generic implementation of 64-bit atomics using spinlocks,
4 * useful on processors that don't have 64-bit atomic instructions.
5 *
6 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8#include <linux/types.h>
9#include <linux/cache.h>
10#include <linux/spinlock.h>
11#include <linux/init.h>
12#include <linux/export.h>
13#include <linux/atomic.h>
14
15/*
16 * We use a hashed array of spinlocks to provide exclusive access
17 * to each atomic64_t variable. Since this is expected to used on
18 * systems with small numbers of CPUs (<= 4 or so), we use a
19 * relatively small array of 16 spinlocks to avoid wasting too much
20 * memory on the spinlock array.
21 */
22#define NR_LOCKS 16
23
24/*
25 * Ensure each lock is in a separate cacheline.
26 */
27static union {
28 raw_spinlock_t lock;
29 char pad[L1_CACHE_BYTES];
30} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
31 [0 ... (NR_LOCKS - 1)] = {
32 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
33 },
34};
35
36static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
37{
38 unsigned long addr = (unsigned long) v;
39
40 addr >>= L1_CACHE_SHIFT;
41 addr ^= (addr >> 8) ^ (addr >> 16);
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43}
44
45s64 generic_atomic64_read(const atomic64_t *v)
46{
47 unsigned long flags;
48 raw_spinlock_t *lock = lock_addr(v);
49 s64 val;
50
51 raw_spin_lock_irqsave(lock, flags);
52 val = v->counter;
53 raw_spin_unlock_irqrestore(lock, flags);
54 return val;
55}
56EXPORT_SYMBOL(generic_atomic64_read);
57
58void generic_atomic64_set(atomic64_t *v, s64 i)
59{
60 unsigned long flags;
61 raw_spinlock_t *lock = lock_addr(v);
62
63 raw_spin_lock_irqsave(lock, flags);
64 v->counter = i;
65 raw_spin_unlock_irqrestore(lock, flags);
66}
67EXPORT_SYMBOL(generic_atomic64_set);
68
69#define ATOMIC64_OP(op, c_op) \
70void generic_atomic64_##op(s64 a, atomic64_t *v) \
71{ \
72 unsigned long flags; \
73 raw_spinlock_t *lock = lock_addr(v); \
74 \
75 raw_spin_lock_irqsave(lock, flags); \
76 v->counter c_op a; \
77 raw_spin_unlock_irqrestore(lock, flags); \
78} \
79EXPORT_SYMBOL(generic_atomic64_##op);
80
81#define ATOMIC64_OP_RETURN(op, c_op) \
82s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
83{ \
84 unsigned long flags; \
85 raw_spinlock_t *lock = lock_addr(v); \
86 s64 val; \
87 \
88 raw_spin_lock_irqsave(lock, flags); \
89 val = (v->counter c_op a); \
90 raw_spin_unlock_irqrestore(lock, flags); \
91 return val; \
92} \
93EXPORT_SYMBOL(generic_atomic64_##op##_return);
94
95#define ATOMIC64_FETCH_OP(op, c_op) \
96s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
97{ \
98 unsigned long flags; \
99 raw_spinlock_t *lock = lock_addr(v); \
100 s64 val; \
101 \
102 raw_spin_lock_irqsave(lock, flags); \
103 val = v->counter; \
104 v->counter c_op a; \
105 raw_spin_unlock_irqrestore(lock, flags); \
106 return val; \
107} \
108EXPORT_SYMBOL(generic_atomic64_fetch_##op);
109
110#define ATOMIC64_OPS(op, c_op) \
111 ATOMIC64_OP(op, c_op) \
112 ATOMIC64_OP_RETURN(op, c_op) \
113 ATOMIC64_FETCH_OP(op, c_op)
114
115ATOMIC64_OPS(add, +=)
116ATOMIC64_OPS(sub, -=)
117
118#undef ATOMIC64_OPS
119#define ATOMIC64_OPS(op, c_op) \
120 ATOMIC64_OP(op, c_op) \
121 ATOMIC64_OP_RETURN(op, c_op) \
122 ATOMIC64_FETCH_OP(op, c_op)
123
124ATOMIC64_OPS(and, &=)
125ATOMIC64_OPS(or, |=)
126ATOMIC64_OPS(xor, ^=)
127
128#undef ATOMIC64_OPS
129#undef ATOMIC64_FETCH_OP
130#undef ATOMIC64_OP_RETURN
131#undef ATOMIC64_OP
132
133s64 generic_atomic64_dec_if_positive(atomic64_t *v)
134{
135 unsigned long flags;
136 raw_spinlock_t *lock = lock_addr(v);
137 s64 val;
138
139 raw_spin_lock_irqsave(lock, flags);
140 val = v->counter - 1;
141 if (val >= 0)
142 v->counter = val;
143 raw_spin_unlock_irqrestore(lock, flags);
144 return val;
145}
146EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
147
148s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
149{
150 unsigned long flags;
151 raw_spinlock_t *lock = lock_addr(v);
152 s64 val;
153
154 raw_spin_lock_irqsave(lock, flags);
155 val = v->counter;
156 if (val == o)
157 v->counter = n;
158 raw_spin_unlock_irqrestore(lock, flags);
159 return val;
160}
161EXPORT_SYMBOL(generic_atomic64_cmpxchg);
162
163s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
164{
165 unsigned long flags;
166 raw_spinlock_t *lock = lock_addr(v);
167 s64 val;
168
169 raw_spin_lock_irqsave(lock, flags);
170 val = v->counter;
171 v->counter = new;
172 raw_spin_unlock_irqrestore(lock, flags);
173 return val;
174}
175EXPORT_SYMBOL(generic_atomic64_xchg);
176
177s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
178{
179 unsigned long flags;
180 raw_spinlock_t *lock = lock_addr(v);
181 s64 val;
182
183 raw_spin_lock_irqsave(lock, flags);
184 val = v->counter;
185 if (val != u)
186 v->counter += a;
187 raw_spin_unlock_irqrestore(lock, flags);
188
189 return val;
190}
191EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
1/*
2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/types.h>
13#include <linux/cache.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/atomic.h>
18
19/*
20 * We use a hashed array of spinlocks to provide exclusive access
21 * to each atomic64_t variable. Since this is expected to used on
22 * systems with small numbers of CPUs (<= 4 or so), we use a
23 * relatively small array of 16 spinlocks to avoid wasting too much
24 * memory on the spinlock array.
25 */
26#define NR_LOCKS 16
27
28/*
29 * Ensure each lock is in a separate cacheline.
30 */
31static union {
32 spinlock_t lock;
33 char pad[L1_CACHE_BYTES];
34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
35
36static inline spinlock_t *lock_addr(const atomic64_t *v)
37{
38 unsigned long addr = (unsigned long) v;
39
40 addr >>= L1_CACHE_SHIFT;
41 addr ^= (addr >> 8) ^ (addr >> 16);
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43}
44
45long long atomic64_read(const atomic64_t *v)
46{
47 unsigned long flags;
48 spinlock_t *lock = lock_addr(v);
49 long long val;
50
51 spin_lock_irqsave(lock, flags);
52 val = v->counter;
53 spin_unlock_irqrestore(lock, flags);
54 return val;
55}
56EXPORT_SYMBOL(atomic64_read);
57
58void atomic64_set(atomic64_t *v, long long i)
59{
60 unsigned long flags;
61 spinlock_t *lock = lock_addr(v);
62
63 spin_lock_irqsave(lock, flags);
64 v->counter = i;
65 spin_unlock_irqrestore(lock, flags);
66}
67EXPORT_SYMBOL(atomic64_set);
68
69void atomic64_add(long long a, atomic64_t *v)
70{
71 unsigned long flags;
72 spinlock_t *lock = lock_addr(v);
73
74 spin_lock_irqsave(lock, flags);
75 v->counter += a;
76 spin_unlock_irqrestore(lock, flags);
77}
78EXPORT_SYMBOL(atomic64_add);
79
80long long atomic64_add_return(long long a, atomic64_t *v)
81{
82 unsigned long flags;
83 spinlock_t *lock = lock_addr(v);
84 long long val;
85
86 spin_lock_irqsave(lock, flags);
87 val = v->counter += a;
88 spin_unlock_irqrestore(lock, flags);
89 return val;
90}
91EXPORT_SYMBOL(atomic64_add_return);
92
93void atomic64_sub(long long a, atomic64_t *v)
94{
95 unsigned long flags;
96 spinlock_t *lock = lock_addr(v);
97
98 spin_lock_irqsave(lock, flags);
99 v->counter -= a;
100 spin_unlock_irqrestore(lock, flags);
101}
102EXPORT_SYMBOL(atomic64_sub);
103
104long long atomic64_sub_return(long long a, atomic64_t *v)
105{
106 unsigned long flags;
107 spinlock_t *lock = lock_addr(v);
108 long long val;
109
110 spin_lock_irqsave(lock, flags);
111 val = v->counter -= a;
112 spin_unlock_irqrestore(lock, flags);
113 return val;
114}
115EXPORT_SYMBOL(atomic64_sub_return);
116
117long long atomic64_dec_if_positive(atomic64_t *v)
118{
119 unsigned long flags;
120 spinlock_t *lock = lock_addr(v);
121 long long val;
122
123 spin_lock_irqsave(lock, flags);
124 val = v->counter - 1;
125 if (val >= 0)
126 v->counter = val;
127 spin_unlock_irqrestore(lock, flags);
128 return val;
129}
130EXPORT_SYMBOL(atomic64_dec_if_positive);
131
132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
133{
134 unsigned long flags;
135 spinlock_t *lock = lock_addr(v);
136 long long val;
137
138 spin_lock_irqsave(lock, flags);
139 val = v->counter;
140 if (val == o)
141 v->counter = n;
142 spin_unlock_irqrestore(lock, flags);
143 return val;
144}
145EXPORT_SYMBOL(atomic64_cmpxchg);
146
147long long atomic64_xchg(atomic64_t *v, long long new)
148{
149 unsigned long flags;
150 spinlock_t *lock = lock_addr(v);
151 long long val;
152
153 spin_lock_irqsave(lock, flags);
154 val = v->counter;
155 v->counter = new;
156 spin_unlock_irqrestore(lock, flags);
157 return val;
158}
159EXPORT_SYMBOL(atomic64_xchg);
160
161int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162{
163 unsigned long flags;
164 spinlock_t *lock = lock_addr(v);
165 int ret = 0;
166
167 spin_lock_irqsave(lock, flags);
168 if (v->counter != u) {
169 v->counter += a;
170 ret = 1;
171 }
172 spin_unlock_irqrestore(lock, flags);
173 return ret;
174}
175EXPORT_SYMBOL(atomic64_add_unless);
176
177static int init_atomic64_lock(void)
178{
179 int i;
180
181 for (i = 0; i < NR_LOCKS; ++i)
182 spin_lock_init(&atomic64_lock[i].lock);
183 return 0;
184}
185
186pure_initcall(init_atomic64_lock);