Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * atomic32.c: 32-bit atomic_t implementation
  4 *
  5 * Copyright (C) 2004 Keith M Wesolowski
  6 * Copyright (C) 2007 Kyle McMartin
  7 * 
  8 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
  9 */
 10
 11#include <linux/atomic.h>
 12#include <linux/spinlock.h>
 13#include <linux/module.h>
 14
 15#ifdef CONFIG_SMP
 16#define ATOMIC_HASH_SIZE	4
 17#define ATOMIC_HASH(a)	(&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
 18
 19spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
 20	[0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
 21};
 22
 23#else /* SMP */
 24
 25static DEFINE_SPINLOCK(dummy);
 26#define ATOMIC_HASH_SIZE	1
 27#define ATOMIC_HASH(a)		(&dummy)
 28
 29#endif /* SMP */
 30
 31#define ATOMIC_FETCH_OP(op, c_op)					\
 32int arch_atomic_fetch_##op(int i, atomic_t *v)				\
 33{									\
 34	int ret;							\
 35	unsigned long flags;						\
 36	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
 37									\
 38	ret = v->counter;						\
 39	v->counter c_op i;						\
 40									\
 41	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
 42	return ret;							\
 43}									\
 44EXPORT_SYMBOL(arch_atomic_fetch_##op);
 45
 46#define ATOMIC_OP_RETURN(op, c_op)					\
 47int arch_atomic_##op##_return(int i, atomic_t *v)			\
 48{									\
 49	int ret;							\
 50	unsigned long flags;						\
 51	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
 52									\
 53	ret = (v->counter c_op i);					\
 54									\
 55	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
 56	return ret;							\
 57}									\
 58EXPORT_SYMBOL(arch_atomic_##op##_return);
 59
 60ATOMIC_OP_RETURN(add, +=)
 
 
 
 61
 62ATOMIC_FETCH_OP(add, +=)
 63ATOMIC_FETCH_OP(and, &=)
 64ATOMIC_FETCH_OP(or, |=)
 65ATOMIC_FETCH_OP(xor, ^=)
 66
 67#undef ATOMIC_FETCH_OP
 68#undef ATOMIC_OP_RETURN
 
 69
 70int arch_atomic_xchg(atomic_t *v, int new)
 71{
 72	int ret;
 73	unsigned long flags;
 74
 75	spin_lock_irqsave(ATOMIC_HASH(v), flags);
 76	ret = v->counter;
 77	v->counter = new;
 78	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 79	return ret;
 80}
 81EXPORT_SYMBOL(arch_atomic_xchg);
 82
 83int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 84{
 85	int ret;
 86	unsigned long flags;
 87
 88	spin_lock_irqsave(ATOMIC_HASH(v), flags);
 89	ret = v->counter;
 90	if (likely(ret == old))
 91		v->counter = new;
 92
 93	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 94	return ret;
 95}
 96EXPORT_SYMBOL(arch_atomic_cmpxchg);
 97
 98int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
 99{
100	int ret;
101	unsigned long flags;
102
103	spin_lock_irqsave(ATOMIC_HASH(v), flags);
104	ret = v->counter;
105	if (ret != u)
106		v->counter += a;
107	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
108	return ret;
109}
110EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
111
112/* Atomic operations are already serializing */
113void arch_atomic_set(atomic_t *v, int i)
114{
115	unsigned long flags;
116
117	spin_lock_irqsave(ATOMIC_HASH(v), flags);
118	v->counter = i;
119	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
120}
121EXPORT_SYMBOL(arch_atomic_set);
122
123unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
124{
125	unsigned long old, flags;
126
127	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
128	old = *addr;
129	*addr = old | mask;
130	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
131
132	return old & mask;
133}
134EXPORT_SYMBOL(sp32___set_bit);
135
136unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
137{
138	unsigned long old, flags;
139
140	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
141	old = *addr;
142	*addr = old & ~mask;
143	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
144
145	return old & mask;
146}
147EXPORT_SYMBOL(sp32___clear_bit);
148
149unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
150{
151	unsigned long old, flags;
152
153	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
154	old = *addr;
155	*addr = old ^ mask;
156	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
157
158	return old & mask;
159}
160EXPORT_SYMBOL(sp32___change_bit);
161
162unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
163{
164	unsigned long flags;
165	u32 prev;
166
167	spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
168	if ((prev = *ptr) == old)
169		*ptr = new;
170	spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
171
172	return (unsigned long)prev;
173}
174EXPORT_SYMBOL(__cmpxchg_u32);
175
176u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
177{
178	unsigned long flags;
179	u64 prev;
180
181	spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
182	if ((prev = *ptr) == old)
183		*ptr = new;
184	spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
185
186	return prev;
187}
188EXPORT_SYMBOL(__cmpxchg_u64);
189
190unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
191{
192	unsigned long flags;
193	u32 prev;
194
195	spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
196	prev = *ptr;
197	*ptr = new;
198	spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
199
200	return (unsigned long)prev;
201}
202EXPORT_SYMBOL(__xchg_u32);
v4.6
 
  1/*
  2 * atomic32.c: 32-bit atomic_t implementation
  3 *
  4 * Copyright (C) 2004 Keith M Wesolowski
  5 * Copyright (C) 2007 Kyle McMartin
  6 * 
  7 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
  8 */
  9
 10#include <linux/atomic.h>
 11#include <linux/spinlock.h>
 12#include <linux/module.h>
 13
 14#ifdef CONFIG_SMP
 15#define ATOMIC_HASH_SIZE	4
 16#define ATOMIC_HASH(a)	(&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
 17
 18spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
 19	[0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
 20};
 21
 22#else /* SMP */
 23
 24static DEFINE_SPINLOCK(dummy);
 25#define ATOMIC_HASH_SIZE	1
 26#define ATOMIC_HASH(a)		(&dummy)
 27
 28#endif /* SMP */
 29
 30#define ATOMIC_OP_RETURN(op, c_op)					\
 31int atomic_##op##_return(int i, atomic_t *v)				\
 32{									\
 33	int ret;							\
 34	unsigned long flags;						\
 35	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
 36									\
 37	ret = (v->counter c_op i);					\
 
 38									\
 39	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
 40	return ret;							\
 41}									\
 42EXPORT_SYMBOL(atomic_##op##_return);
 43
 44#define ATOMIC_OP(op, c_op)						\
 45void atomic_##op(int i, atomic_t *v)					\
 46{									\
 
 47	unsigned long flags;						\
 48	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
 49									\
 50	v->counter c_op i;						\
 51									\
 52	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
 
 53}									\
 54EXPORT_SYMBOL(atomic_##op);
 55
 56ATOMIC_OP_RETURN(add, +=)
 57ATOMIC_OP(and, &=)
 58ATOMIC_OP(or, |=)
 59ATOMIC_OP(xor, ^=)
 60
 
 
 
 
 
 
 61#undef ATOMIC_OP_RETURN
 62#undef ATOMIC_OP
 63
 64int atomic_xchg(atomic_t *v, int new)
 65{
 66	int ret;
 67	unsigned long flags;
 68
 69	spin_lock_irqsave(ATOMIC_HASH(v), flags);
 70	ret = v->counter;
 71	v->counter = new;
 72	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 73	return ret;
 74}
 75EXPORT_SYMBOL(atomic_xchg);
 76
 77int atomic_cmpxchg(atomic_t *v, int old, int new)
 78{
 79	int ret;
 80	unsigned long flags;
 81
 82	spin_lock_irqsave(ATOMIC_HASH(v), flags);
 83	ret = v->counter;
 84	if (likely(ret == old))
 85		v->counter = new;
 86
 87	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 88	return ret;
 89}
 90EXPORT_SYMBOL(atomic_cmpxchg);
 91
 92int __atomic_add_unless(atomic_t *v, int a, int u)
 93{
 94	int ret;
 95	unsigned long flags;
 96
 97	spin_lock_irqsave(ATOMIC_HASH(v), flags);
 98	ret = v->counter;
 99	if (ret != u)
100		v->counter += a;
101	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
102	return ret;
103}
104EXPORT_SYMBOL(__atomic_add_unless);
105
106/* Atomic operations are already serializing */
107void atomic_set(atomic_t *v, int i)
108{
109	unsigned long flags;
110
111	spin_lock_irqsave(ATOMIC_HASH(v), flags);
112	v->counter = i;
113	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
114}
115EXPORT_SYMBOL(atomic_set);
116
117unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
118{
119	unsigned long old, flags;
120
121	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
122	old = *addr;
123	*addr = old | mask;
124	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
125
126	return old & mask;
127}
128EXPORT_SYMBOL(___set_bit);
129
130unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
131{
132	unsigned long old, flags;
133
134	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
135	old = *addr;
136	*addr = old & ~mask;
137	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
138
139	return old & mask;
140}
141EXPORT_SYMBOL(___clear_bit);
142
143unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
144{
145	unsigned long old, flags;
146
147	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
148	old = *addr;
149	*addr = old ^ mask;
150	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
151
152	return old & mask;
153}
154EXPORT_SYMBOL(___change_bit);
155
156unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
157{
158	unsigned long flags;
159	u32 prev;
160
161	spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
162	if ((prev = *ptr) == old)
163		*ptr = new;
164	spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
165
166	return (unsigned long)prev;
167}
168EXPORT_SYMBOL(__cmpxchg_u32);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
170unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
171{
172	unsigned long flags;
173	u32 prev;
174
175	spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
176	prev = *ptr;
177	*ptr = new;
178	spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
179
180	return (unsigned long)prev;
181}
182EXPORT_SYMBOL(__xchg_u32);