Loading...
1/*
2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/types.h>
13#include <linux/cache.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/atomic.h>
18
19/*
20 * We use a hashed array of spinlocks to provide exclusive access
21 * to each atomic64_t variable. Since this is expected to used on
22 * systems with small numbers of CPUs (<= 4 or so), we use a
23 * relatively small array of 16 spinlocks to avoid wasting too much
24 * memory on the spinlock array.
25 */
26#define NR_LOCKS 16
27
28/*
29 * Ensure each lock is in a separate cacheline.
30 */
31static union {
32 raw_spinlock_t lock;
33 char pad[L1_CACHE_BYTES];
34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
35 [0 ... (NR_LOCKS - 1)] = {
36 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
37 },
38};
39
40static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
41{
42 unsigned long addr = (unsigned long) v;
43
44 addr >>= L1_CACHE_SHIFT;
45 addr ^= (addr >> 8) ^ (addr >> 16);
46 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
47}
48
49long long atomic64_read(const atomic64_t *v)
50{
51 unsigned long flags;
52 raw_spinlock_t *lock = lock_addr(v);
53 long long val;
54
55 raw_spin_lock_irqsave(lock, flags);
56 val = v->counter;
57 raw_spin_unlock_irqrestore(lock, flags);
58 return val;
59}
60EXPORT_SYMBOL(atomic64_read);
61
62void atomic64_set(atomic64_t *v, long long i)
63{
64 unsigned long flags;
65 raw_spinlock_t *lock = lock_addr(v);
66
67 raw_spin_lock_irqsave(lock, flags);
68 v->counter = i;
69 raw_spin_unlock_irqrestore(lock, flags);
70}
71EXPORT_SYMBOL(atomic64_set);
72
73void atomic64_add(long long a, atomic64_t *v)
74{
75 unsigned long flags;
76 raw_spinlock_t *lock = lock_addr(v);
77
78 raw_spin_lock_irqsave(lock, flags);
79 v->counter += a;
80 raw_spin_unlock_irqrestore(lock, flags);
81}
82EXPORT_SYMBOL(atomic64_add);
83
84long long atomic64_add_return(long long a, atomic64_t *v)
85{
86 unsigned long flags;
87 raw_spinlock_t *lock = lock_addr(v);
88 long long val;
89
90 raw_spin_lock_irqsave(lock, flags);
91 val = v->counter += a;
92 raw_spin_unlock_irqrestore(lock, flags);
93 return val;
94}
95EXPORT_SYMBOL(atomic64_add_return);
96
97void atomic64_sub(long long a, atomic64_t *v)
98{
99 unsigned long flags;
100 raw_spinlock_t *lock = lock_addr(v);
101
102 raw_spin_lock_irqsave(lock, flags);
103 v->counter -= a;
104 raw_spin_unlock_irqrestore(lock, flags);
105}
106EXPORT_SYMBOL(atomic64_sub);
107
108long long atomic64_sub_return(long long a, atomic64_t *v)
109{
110 unsigned long flags;
111 raw_spinlock_t *lock = lock_addr(v);
112 long long val;
113
114 raw_spin_lock_irqsave(lock, flags);
115 val = v->counter -= a;
116 raw_spin_unlock_irqrestore(lock, flags);
117 return val;
118}
119EXPORT_SYMBOL(atomic64_sub_return);
120
121long long atomic64_dec_if_positive(atomic64_t *v)
122{
123 unsigned long flags;
124 raw_spinlock_t *lock = lock_addr(v);
125 long long val;
126
127 raw_spin_lock_irqsave(lock, flags);
128 val = v->counter - 1;
129 if (val >= 0)
130 v->counter = val;
131 raw_spin_unlock_irqrestore(lock, flags);
132 return val;
133}
134EXPORT_SYMBOL(atomic64_dec_if_positive);
135
136long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
137{
138 unsigned long flags;
139 raw_spinlock_t *lock = lock_addr(v);
140 long long val;
141
142 raw_spin_lock_irqsave(lock, flags);
143 val = v->counter;
144 if (val == o)
145 v->counter = n;
146 raw_spin_unlock_irqrestore(lock, flags);
147 return val;
148}
149EXPORT_SYMBOL(atomic64_cmpxchg);
150
151long long atomic64_xchg(atomic64_t *v, long long new)
152{
153 unsigned long flags;
154 raw_spinlock_t *lock = lock_addr(v);
155 long long val;
156
157 raw_spin_lock_irqsave(lock, flags);
158 val = v->counter;
159 v->counter = new;
160 raw_spin_unlock_irqrestore(lock, flags);
161 return val;
162}
163EXPORT_SYMBOL(atomic64_xchg);
164
165int atomic64_add_unless(atomic64_t *v, long long a, long long u)
166{
167 unsigned long flags;
168 raw_spinlock_t *lock = lock_addr(v);
169 int ret = 0;
170
171 raw_spin_lock_irqsave(lock, flags);
172 if (v->counter != u) {
173 v->counter += a;
174 ret = 1;
175 }
176 raw_spin_unlock_irqrestore(lock, flags);
177 return ret;
178}
179EXPORT_SYMBOL(atomic64_add_unless);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Generic implementation of 64-bit atomics using spinlocks,
4 * useful on processors that don't have 64-bit atomic instructions.
5 *
6 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8#include <linux/types.h>
9#include <linux/cache.h>
10#include <linux/spinlock.h>
11#include <linux/init.h>
12#include <linux/export.h>
13#include <linux/atomic.h>
14
15/*
16 * We use a hashed array of spinlocks to provide exclusive access
17 * to each atomic64_t variable. Since this is expected to used on
18 * systems with small numbers of CPUs (<= 4 or so), we use a
19 * relatively small array of 16 spinlocks to avoid wasting too much
20 * memory on the spinlock array.
21 */
22#define NR_LOCKS 16
23
24/*
25 * Ensure each lock is in a separate cacheline.
26 */
27static union {
28 raw_spinlock_t lock;
29 char pad[L1_CACHE_BYTES];
30} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
31 [0 ... (NR_LOCKS - 1)] = {
32 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
33 },
34};
35
36static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
37{
38 unsigned long addr = (unsigned long) v;
39
40 addr >>= L1_CACHE_SHIFT;
41 addr ^= (addr >> 8) ^ (addr >> 16);
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43}
44
45s64 generic_atomic64_read(const atomic64_t *v)
46{
47 unsigned long flags;
48 raw_spinlock_t *lock = lock_addr(v);
49 s64 val;
50
51 raw_spin_lock_irqsave(lock, flags);
52 val = v->counter;
53 raw_spin_unlock_irqrestore(lock, flags);
54 return val;
55}
56EXPORT_SYMBOL(generic_atomic64_read);
57
58void generic_atomic64_set(atomic64_t *v, s64 i)
59{
60 unsigned long flags;
61 raw_spinlock_t *lock = lock_addr(v);
62
63 raw_spin_lock_irqsave(lock, flags);
64 v->counter = i;
65 raw_spin_unlock_irqrestore(lock, flags);
66}
67EXPORT_SYMBOL(generic_atomic64_set);
68
69#define ATOMIC64_OP(op, c_op) \
70void generic_atomic64_##op(s64 a, atomic64_t *v) \
71{ \
72 unsigned long flags; \
73 raw_spinlock_t *lock = lock_addr(v); \
74 \
75 raw_spin_lock_irqsave(lock, flags); \
76 v->counter c_op a; \
77 raw_spin_unlock_irqrestore(lock, flags); \
78} \
79EXPORT_SYMBOL(generic_atomic64_##op);
80
81#define ATOMIC64_OP_RETURN(op, c_op) \
82s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
83{ \
84 unsigned long flags; \
85 raw_spinlock_t *lock = lock_addr(v); \
86 s64 val; \
87 \
88 raw_spin_lock_irqsave(lock, flags); \
89 val = (v->counter c_op a); \
90 raw_spin_unlock_irqrestore(lock, flags); \
91 return val; \
92} \
93EXPORT_SYMBOL(generic_atomic64_##op##_return);
94
95#define ATOMIC64_FETCH_OP(op, c_op) \
96s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
97{ \
98 unsigned long flags; \
99 raw_spinlock_t *lock = lock_addr(v); \
100 s64 val; \
101 \
102 raw_spin_lock_irqsave(lock, flags); \
103 val = v->counter; \
104 v->counter c_op a; \
105 raw_spin_unlock_irqrestore(lock, flags); \
106 return val; \
107} \
108EXPORT_SYMBOL(generic_atomic64_fetch_##op);
109
110#define ATOMIC64_OPS(op, c_op) \
111 ATOMIC64_OP(op, c_op) \
112 ATOMIC64_OP_RETURN(op, c_op) \
113 ATOMIC64_FETCH_OP(op, c_op)
114
115ATOMIC64_OPS(add, +=)
116ATOMIC64_OPS(sub, -=)
117
118#undef ATOMIC64_OPS
119#define ATOMIC64_OPS(op, c_op) \
120 ATOMIC64_OP(op, c_op) \
121 ATOMIC64_FETCH_OP(op, c_op)
122
123ATOMIC64_OPS(and, &=)
124ATOMIC64_OPS(or, |=)
125ATOMIC64_OPS(xor, ^=)
126
127#undef ATOMIC64_OPS
128#undef ATOMIC64_FETCH_OP
129#undef ATOMIC64_OP
130
131s64 generic_atomic64_dec_if_positive(atomic64_t *v)
132{
133 unsigned long flags;
134 raw_spinlock_t *lock = lock_addr(v);
135 s64 val;
136
137 raw_spin_lock_irqsave(lock, flags);
138 val = v->counter - 1;
139 if (val >= 0)
140 v->counter = val;
141 raw_spin_unlock_irqrestore(lock, flags);
142 return val;
143}
144EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
145
146s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
147{
148 unsigned long flags;
149 raw_spinlock_t *lock = lock_addr(v);
150 s64 val;
151
152 raw_spin_lock_irqsave(lock, flags);
153 val = v->counter;
154 if (val == o)
155 v->counter = n;
156 raw_spin_unlock_irqrestore(lock, flags);
157 return val;
158}
159EXPORT_SYMBOL(generic_atomic64_cmpxchg);
160
161s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
162{
163 unsigned long flags;
164 raw_spinlock_t *lock = lock_addr(v);
165 s64 val;
166
167 raw_spin_lock_irqsave(lock, flags);
168 val = v->counter;
169 v->counter = new;
170 raw_spin_unlock_irqrestore(lock, flags);
171 return val;
172}
173EXPORT_SYMBOL(generic_atomic64_xchg);
174
175s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
176{
177 unsigned long flags;
178 raw_spinlock_t *lock = lock_addr(v);
179 s64 val;
180
181 raw_spin_lock_irqsave(lock, flags);
182 val = v->counter;
183 if (val != u)
184 v->counter += a;
185 raw_spin_unlock_irqrestore(lock, flags);
186
187 return val;
188}
189EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);