Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Generic implementation of 64-bit atomics using spinlocks,
  4 * useful on processors that don't have 64-bit atomic instructions.
  5 *
  6 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 
 
 
 
 
  7 */
  8#include <linux/types.h>
  9#include <linux/cache.h>
 10#include <linux/spinlock.h>
 11#include <linux/init.h>
 12#include <linux/export.h>
 13#include <linux/atomic.h>
 14
 15/*
 16 * We use a hashed array of spinlocks to provide exclusive access
 17 * to each atomic64_t variable.  Since this is expected to used on
 18 * systems with small numbers of CPUs (<= 4 or so), we use a
 19 * relatively small array of 16 spinlocks to avoid wasting too much
 20 * memory on the spinlock array.
 21 */
 22#define NR_LOCKS	16
 23
 24/*
 25 * Ensure each lock is in a separate cacheline.
 26 */
 27static union {
 28	raw_spinlock_t lock;
 29	char pad[L1_CACHE_BYTES];
 30} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
 31	[0 ... (NR_LOCKS - 1)] = {
 32		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
 33	},
 34};
 35
 36static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
 37{
 38	unsigned long addr = (unsigned long) v;
 39
 40	addr >>= L1_CACHE_SHIFT;
 41	addr ^= (addr >> 8) ^ (addr >> 16);
 42	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
 43}
 44
 45s64 generic_atomic64_read(const atomic64_t *v)
 46{
 47	unsigned long flags;
 48	raw_spinlock_t *lock = lock_addr(v);
 49	s64 val;
 50
 51	raw_spin_lock_irqsave(lock, flags);
 52	val = v->counter;
 53	raw_spin_unlock_irqrestore(lock, flags);
 54	return val;
 55}
 56EXPORT_SYMBOL(generic_atomic64_read);
 57
 58void generic_atomic64_set(atomic64_t *v, s64 i)
 59{
 60	unsigned long flags;
 61	raw_spinlock_t *lock = lock_addr(v);
 62
 63	raw_spin_lock_irqsave(lock, flags);
 64	v->counter = i;
 65	raw_spin_unlock_irqrestore(lock, flags);
 66}
 67EXPORT_SYMBOL(generic_atomic64_set);
 68
 69#define ATOMIC64_OP(op, c_op)						\
 70void generic_atomic64_##op(s64 a, atomic64_t *v)			\
 71{									\
 72	unsigned long flags;						\
 73	raw_spinlock_t *lock = lock_addr(v);				\
 74									\
 75	raw_spin_lock_irqsave(lock, flags);				\
 76	v->counter c_op a;						\
 77	raw_spin_unlock_irqrestore(lock, flags);			\
 78}									\
 79EXPORT_SYMBOL(generic_atomic64_##op);
 80
 81#define ATOMIC64_OP_RETURN(op, c_op)					\
 82s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v)		\
 83{									\
 84	unsigned long flags;						\
 85	raw_spinlock_t *lock = lock_addr(v);				\
 86	s64 val;							\
 87									\
 88	raw_spin_lock_irqsave(lock, flags);				\
 89	val = (v->counter c_op a);					\
 90	raw_spin_unlock_irqrestore(lock, flags);			\
 91	return val;							\
 92}									\
 93EXPORT_SYMBOL(generic_atomic64_##op##_return);
 94
 95#define ATOMIC64_FETCH_OP(op, c_op)					\
 96s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v)			\
 97{									\
 98	unsigned long flags;						\
 99	raw_spinlock_t *lock = lock_addr(v);				\
100	s64 val;							\
101									\
102	raw_spin_lock_irqsave(lock, flags);				\
103	val = v->counter;						\
104	v->counter c_op a;						\
105	raw_spin_unlock_irqrestore(lock, flags);			\
106	return val;							\
107}									\
108EXPORT_SYMBOL(generic_atomic64_fetch_##op);
109
110#define ATOMIC64_OPS(op, c_op)						\
111	ATOMIC64_OP(op, c_op)						\
112	ATOMIC64_OP_RETURN(op, c_op)					\
113	ATOMIC64_FETCH_OP(op, c_op)
114
115ATOMIC64_OPS(add, +=)
116ATOMIC64_OPS(sub, -=)
117
118#undef ATOMIC64_OPS
119#define ATOMIC64_OPS(op, c_op)						\
120	ATOMIC64_OP(op, c_op)						\
121	ATOMIC64_OP_RETURN(op, c_op)					\
122	ATOMIC64_FETCH_OP(op, c_op)
123
124ATOMIC64_OPS(and, &=)
125ATOMIC64_OPS(or, |=)
126ATOMIC64_OPS(xor, ^=)
127
128#undef ATOMIC64_OPS
129#undef ATOMIC64_FETCH_OP
130#undef ATOMIC64_OP_RETURN
131#undef ATOMIC64_OP
132
133s64 generic_atomic64_dec_if_positive(atomic64_t *v)
134{
135	unsigned long flags;
136	raw_spinlock_t *lock = lock_addr(v);
137	s64 val;
138
139	raw_spin_lock_irqsave(lock, flags);
140	val = v->counter - 1;
141	if (val >= 0)
142		v->counter = val;
143	raw_spin_unlock_irqrestore(lock, flags);
144	return val;
145}
146EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
147
148s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
149{
150	unsigned long flags;
151	raw_spinlock_t *lock = lock_addr(v);
152	s64 val;
153
154	raw_spin_lock_irqsave(lock, flags);
155	val = v->counter;
156	if (val == o)
157		v->counter = n;
158	raw_spin_unlock_irqrestore(lock, flags);
159	return val;
160}
161EXPORT_SYMBOL(generic_atomic64_cmpxchg);
162
163s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
164{
165	unsigned long flags;
166	raw_spinlock_t *lock = lock_addr(v);
167	s64 val;
168
169	raw_spin_lock_irqsave(lock, flags);
170	val = v->counter;
171	v->counter = new;
172	raw_spin_unlock_irqrestore(lock, flags);
173	return val;
174}
175EXPORT_SYMBOL(generic_atomic64_xchg);
176
177s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
178{
179	unsigned long flags;
180	raw_spinlock_t *lock = lock_addr(v);
181	s64 val;
182
183	raw_spin_lock_irqsave(lock, flags);
184	val = v->counter;
185	if (val != u)
186		v->counter += a;
 
 
187	raw_spin_unlock_irqrestore(lock, flags);
188
189	return val;
190}
191EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
v4.17
 
  1/*
  2 * Generic implementation of 64-bit atomics using spinlocks,
  3 * useful on processors that don't have 64-bit atomic instructions.
  4 *
  5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; either version
 10 * 2 of the License, or (at your option) any later version.
 11 */
 12#include <linux/types.h>
 13#include <linux/cache.h>
 14#include <linux/spinlock.h>
 15#include <linux/init.h>
 16#include <linux/export.h>
 17#include <linux/atomic.h>
 18
 19/*
 20 * We use a hashed array of spinlocks to provide exclusive access
 21 * to each atomic64_t variable.  Since this is expected to used on
 22 * systems with small numbers of CPUs (<= 4 or so), we use a
 23 * relatively small array of 16 spinlocks to avoid wasting too much
 24 * memory on the spinlock array.
 25 */
 26#define NR_LOCKS	16
 27
 28/*
 29 * Ensure each lock is in a separate cacheline.
 30 */
 31static union {
 32	raw_spinlock_t lock;
 33	char pad[L1_CACHE_BYTES];
 34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
 35	[0 ... (NR_LOCKS - 1)] = {
 36		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
 37	},
 38};
 39
 40static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
 41{
 42	unsigned long addr = (unsigned long) v;
 43
 44	addr >>= L1_CACHE_SHIFT;
 45	addr ^= (addr >> 8) ^ (addr >> 16);
 46	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
 47}
 48
 49long long atomic64_read(const atomic64_t *v)
 50{
 51	unsigned long flags;
 52	raw_spinlock_t *lock = lock_addr(v);
 53	long long val;
 54
 55	raw_spin_lock_irqsave(lock, flags);
 56	val = v->counter;
 57	raw_spin_unlock_irqrestore(lock, flags);
 58	return val;
 59}
 60EXPORT_SYMBOL(atomic64_read);
 61
 62void atomic64_set(atomic64_t *v, long long i)
 63{
 64	unsigned long flags;
 65	raw_spinlock_t *lock = lock_addr(v);
 66
 67	raw_spin_lock_irqsave(lock, flags);
 68	v->counter = i;
 69	raw_spin_unlock_irqrestore(lock, flags);
 70}
 71EXPORT_SYMBOL(atomic64_set);
 72
 73#define ATOMIC64_OP(op, c_op)						\
 74void atomic64_##op(long long a, atomic64_t *v)				\
 75{									\
 76	unsigned long flags;						\
 77	raw_spinlock_t *lock = lock_addr(v);				\
 78									\
 79	raw_spin_lock_irqsave(lock, flags);				\
 80	v->counter c_op a;						\
 81	raw_spin_unlock_irqrestore(lock, flags);			\
 82}									\
 83EXPORT_SYMBOL(atomic64_##op);
 84
 85#define ATOMIC64_OP_RETURN(op, c_op)					\
 86long long atomic64_##op##_return(long long a, atomic64_t *v)		\
 87{									\
 88	unsigned long flags;						\
 89	raw_spinlock_t *lock = lock_addr(v);				\
 90	long long val;							\
 91									\
 92	raw_spin_lock_irqsave(lock, flags);				\
 93	val = (v->counter c_op a);					\
 94	raw_spin_unlock_irqrestore(lock, flags);			\
 95	return val;							\
 96}									\
 97EXPORT_SYMBOL(atomic64_##op##_return);
 98
 99#define ATOMIC64_FETCH_OP(op, c_op)					\
100long long atomic64_fetch_##op(long long a, atomic64_t *v)		\
101{									\
102	unsigned long flags;						\
103	raw_spinlock_t *lock = lock_addr(v);				\
104	long long val;							\
105									\
106	raw_spin_lock_irqsave(lock, flags);				\
107	val = v->counter;						\
108	v->counter c_op a;						\
109	raw_spin_unlock_irqrestore(lock, flags);			\
110	return val;							\
111}									\
112EXPORT_SYMBOL(atomic64_fetch_##op);
113
114#define ATOMIC64_OPS(op, c_op)						\
115	ATOMIC64_OP(op, c_op)						\
116	ATOMIC64_OP_RETURN(op, c_op)					\
117	ATOMIC64_FETCH_OP(op, c_op)
118
119ATOMIC64_OPS(add, +=)
120ATOMIC64_OPS(sub, -=)
121
122#undef ATOMIC64_OPS
123#define ATOMIC64_OPS(op, c_op)						\
124	ATOMIC64_OP(op, c_op)						\
125	ATOMIC64_OP_RETURN(op, c_op)					\
126	ATOMIC64_FETCH_OP(op, c_op)
127
128ATOMIC64_OPS(and, &=)
129ATOMIC64_OPS(or, |=)
130ATOMIC64_OPS(xor, ^=)
131
132#undef ATOMIC64_OPS
133#undef ATOMIC64_FETCH_OP
134#undef ATOMIC64_OP_RETURN
135#undef ATOMIC64_OP
136
137long long atomic64_dec_if_positive(atomic64_t *v)
138{
139	unsigned long flags;
140	raw_spinlock_t *lock = lock_addr(v);
141	long long val;
142
143	raw_spin_lock_irqsave(lock, flags);
144	val = v->counter - 1;
145	if (val >= 0)
146		v->counter = val;
147	raw_spin_unlock_irqrestore(lock, flags);
148	return val;
149}
150EXPORT_SYMBOL(atomic64_dec_if_positive);
151
152long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
153{
154	unsigned long flags;
155	raw_spinlock_t *lock = lock_addr(v);
156	long long val;
157
158	raw_spin_lock_irqsave(lock, flags);
159	val = v->counter;
160	if (val == o)
161		v->counter = n;
162	raw_spin_unlock_irqrestore(lock, flags);
163	return val;
164}
165EXPORT_SYMBOL(atomic64_cmpxchg);
166
167long long atomic64_xchg(atomic64_t *v, long long new)
168{
169	unsigned long flags;
170	raw_spinlock_t *lock = lock_addr(v);
171	long long val;
172
173	raw_spin_lock_irqsave(lock, flags);
174	val = v->counter;
175	v->counter = new;
176	raw_spin_unlock_irqrestore(lock, flags);
177	return val;
178}
179EXPORT_SYMBOL(atomic64_xchg);
180
181int atomic64_add_unless(atomic64_t *v, long long a, long long u)
182{
183	unsigned long flags;
184	raw_spinlock_t *lock = lock_addr(v);
185	int ret = 0;
186
187	raw_spin_lock_irqsave(lock, flags);
188	if (v->counter != u) {
 
189		v->counter += a;
190		ret = 1;
191	}
192	raw_spin_unlock_irqrestore(lock, flags);
193	return ret;
 
194}
195EXPORT_SYMBOL(atomic64_add_unless);