Linux Audio

Check our new training course

Loading...
v3.1
 
  1#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
  2#define _ASM_GENERIC_BITOPS_ATOMIC_H_
  3
  4#include <asm/types.h>
  5#include <asm/system.h>
  6
  7#ifdef CONFIG_SMP
  8#include <asm/spinlock.h>
  9#include <asm/cache.h>		/* we use L1_CACHE_BYTES */
 10
 11/* Use an array of spinlocks for our atomic_ts.
 12 * Hash function to index into a different SPINLOCK.
 13 * Since "a" is usually an address, use one spinlock per cacheline.
 14 */
 15#  define ATOMIC_HASH_SIZE 4
 16#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 17
 18extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 19
 20/* Can't use raw_spin_lock_irq because of #include problems, so
 21 * this is the substitute */
 22#define _atomic_spin_lock_irqsave(l,f) do {	\
 23	arch_spinlock_t *s = ATOMIC_HASH(l);	\
 24	local_irq_save(f);			\
 25	arch_spin_lock(s);			\
 26} while(0)
 27
 28#define _atomic_spin_unlock_irqrestore(l,f) do {	\
 29	arch_spinlock_t *s = ATOMIC_HASH(l);		\
 30	arch_spin_unlock(s);				\
 31	local_irq_restore(f);				\
 32} while(0)
 33
 34
 35#else
 36#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
 37#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
 38#endif
 39
 40/*
 41 * NMI events can occur at any time, including when interrupts have been
 42 * disabled by *_irqsave().  So you can get NMI events occurring while a
 43 * *_bit function is holding a spin lock.  If the NMI handler also wants
 44 * to do bit manipulation (and they do) then you can get a deadlock
 45 * between the original caller of *_bit() and the NMI handler.
 46 *
 47 * by Keith Owens
 48 */
 49
 50/**
 51 * set_bit - Atomically set a bit in memory
 52 * @nr: the bit to set
 53 * @addr: the address to start counting from
 54 *
 55 * This function is atomic and may not be reordered.  See __set_bit()
 56 * if you do not require the atomic guarantees.
 57 *
 58 * Note: there are no guarantees that this function will not be reordered
 59 * on non x86 architectures, so if you are writing portable code,
 60 * make sure not to rely on its reordering guarantees.
 61 *
 62 * Note that @nr may be almost arbitrarily large; this function is not
 63 * restricted to acting on a single-word quantity.
 64 */
 65static inline void set_bit(int nr, volatile unsigned long *addr)
 66{
 67	unsigned long mask = BIT_MASK(nr);
 68	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 69	unsigned long flags;
 70
 71	_atomic_spin_lock_irqsave(p, flags);
 72	*p  |= mask;
 73	_atomic_spin_unlock_irqrestore(p, flags);
 74}
 75
 76/**
 77 * clear_bit - Clears a bit in memory
 78 * @nr: Bit to clear
 79 * @addr: Address to start counting from
 80 *
 81 * clear_bit() is atomic and may not be reordered.  However, it does
 82 * not contain a memory barrier, so if it is used for locking purposes,
 83 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
 84 * in order to ensure changes are visible on other processors.
 85 */
 86static inline void clear_bit(int nr, volatile unsigned long *addr)
 87{
 88	unsigned long mask = BIT_MASK(nr);
 89	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 90	unsigned long flags;
 91
 92	_atomic_spin_lock_irqsave(p, flags);
 93	*p &= ~mask;
 94	_atomic_spin_unlock_irqrestore(p, flags);
 95}
 96
 97/**
 98 * change_bit - Toggle a bit in memory
 99 * @nr: Bit to change
100 * @addr: Address to start counting from
101 *
102 * change_bit() is atomic and may not be reordered. It may be
103 * reordered on other architectures than x86.
104 * Note that @nr may be almost arbitrarily large; this function is not
105 * restricted to acting on a single-word quantity.
106 */
107static inline void change_bit(int nr, volatile unsigned long *addr)
108{
109	unsigned long mask = BIT_MASK(nr);
110	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
111	unsigned long flags;
112
113	_atomic_spin_lock_irqsave(p, flags);
114	*p ^= mask;
115	_atomic_spin_unlock_irqrestore(p, flags);
116}
117
118/**
119 * test_and_set_bit - Set a bit and return its old value
120 * @nr: Bit to set
121 * @addr: Address to count from
122 *
123 * This operation is atomic and cannot be reordered.
124 * It may be reordered on other architectures than x86.
125 * It also implies a memory barrier.
126 */
127static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
128{
129	unsigned long mask = BIT_MASK(nr);
130	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
131	unsigned long old;
132	unsigned long flags;
133
134	_atomic_spin_lock_irqsave(p, flags);
135	old = *p;
136	*p = old | mask;
137	_atomic_spin_unlock_irqrestore(p, flags);
138
139	return (old & mask) != 0;
140}
141
142/**
143 * test_and_clear_bit - Clear a bit and return its old value
144 * @nr: Bit to clear
145 * @addr: Address to count from
146 *
147 * This operation is atomic and cannot be reordered.
148 * It can be reorderdered on other architectures other than x86.
149 * It also implies a memory barrier.
150 */
151static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
152{
153	unsigned long mask = BIT_MASK(nr);
154	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
155	unsigned long old;
156	unsigned long flags;
157
158	_atomic_spin_lock_irqsave(p, flags);
159	old = *p;
160	*p = old & ~mask;
161	_atomic_spin_unlock_irqrestore(p, flags);
162
163	return (old & mask) != 0;
164}
165
166/**
167 * test_and_change_bit - Change a bit and return its old value
168 * @nr: Bit to change
169 * @addr: Address to count from
170 *
171 * This operation is atomic and cannot be reordered.
172 * It also implies a memory barrier.
173 */
174static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
175{
176	unsigned long mask = BIT_MASK(nr);
177	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
178	unsigned long old;
179	unsigned long flags;
180
181	_atomic_spin_lock_irqsave(p, flags);
182	old = *p;
183	*p = old ^ mask;
184	_atomic_spin_unlock_irqrestore(p, flags);
185
186	return (old & mask) != 0;
187}
188
189#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
  3#define _ASM_GENERIC_BITOPS_ATOMIC_H_
  4
  5#include <asm/types.h>
  6#include <linux/irqflags.h>
  7
  8#ifdef CONFIG_SMP
  9#include <asm/spinlock.h>
 10#include <asm/cache.h>		/* we use L1_CACHE_BYTES */
 11
 12/* Use an array of spinlocks for our atomic_ts.
 13 * Hash function to index into a different SPINLOCK.
 14 * Since "a" is usually an address, use one spinlock per cacheline.
 15 */
 16#  define ATOMIC_HASH_SIZE 4
 17#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 18
 19extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 20
 21/* Can't use raw_spin_lock_irq because of #include problems, so
 22 * this is the substitute */
 23#define _atomic_spin_lock_irqsave(l,f) do {	\
 24	arch_spinlock_t *s = ATOMIC_HASH(l);	\
 25	local_irq_save(f);			\
 26	arch_spin_lock(s);			\
 27} while(0)
 28
 29#define _atomic_spin_unlock_irqrestore(l,f) do {	\
 30	arch_spinlock_t *s = ATOMIC_HASH(l);		\
 31	arch_spin_unlock(s);				\
 32	local_irq_restore(f);				\
 33} while(0)
 34
 35
 36#else
 37#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
 38#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
 39#endif
 40
 41/*
 42 * NMI events can occur at any time, including when interrupts have been
 43 * disabled by *_irqsave().  So you can get NMI events occurring while a
 44 * *_bit function is holding a spin lock.  If the NMI handler also wants
 45 * to do bit manipulation (and they do) then you can get a deadlock
 46 * between the original caller of *_bit() and the NMI handler.
 47 *
 48 * by Keith Owens
 49 */
 50
 51/**
 52 * set_bit - Atomically set a bit in memory
 53 * @nr: the bit to set
 54 * @addr: the address to start counting from
 55 *
 56 * This function is atomic and may not be reordered.  See __set_bit()
 57 * if you do not require the atomic guarantees.
 58 *
 59 * Note: there are no guarantees that this function will not be reordered
 60 * on non x86 architectures, so if you are writing portable code,
 61 * make sure not to rely on its reordering guarantees.
 62 *
 63 * Note that @nr may be almost arbitrarily large; this function is not
 64 * restricted to acting on a single-word quantity.
 65 */
 66static inline void set_bit(int nr, volatile unsigned long *addr)
 67{
 68	unsigned long mask = BIT_MASK(nr);
 69	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 70	unsigned long flags;
 71
 72	_atomic_spin_lock_irqsave(p, flags);
 73	*p  |= mask;
 74	_atomic_spin_unlock_irqrestore(p, flags);
 75}
 76
 77/**
 78 * clear_bit - Clears a bit in memory
 79 * @nr: Bit to clear
 80 * @addr: Address to start counting from
 81 *
 82 * clear_bit() is atomic and may not be reordered.  However, it does
 83 * not contain a memory barrier, so if it is used for locking purposes,
 84 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 85 * in order to ensure changes are visible on other processors.
 86 */
 87static inline void clear_bit(int nr, volatile unsigned long *addr)
 88{
 89	unsigned long mask = BIT_MASK(nr);
 90	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 91	unsigned long flags;
 92
 93	_atomic_spin_lock_irqsave(p, flags);
 94	*p &= ~mask;
 95	_atomic_spin_unlock_irqrestore(p, flags);
 96}
 97
 98/**
 99 * change_bit - Toggle a bit in memory
100 * @nr: Bit to change
101 * @addr: Address to start counting from
102 *
103 * change_bit() is atomic and may not be reordered. It may be
104 * reordered on other architectures than x86.
105 * Note that @nr may be almost arbitrarily large; this function is not
106 * restricted to acting on a single-word quantity.
107 */
108static inline void change_bit(int nr, volatile unsigned long *addr)
109{
110	unsigned long mask = BIT_MASK(nr);
111	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
112	unsigned long flags;
113
114	_atomic_spin_lock_irqsave(p, flags);
115	*p ^= mask;
116	_atomic_spin_unlock_irqrestore(p, flags);
117}
118
119/**
120 * test_and_set_bit - Set a bit and return its old value
121 * @nr: Bit to set
122 * @addr: Address to count from
123 *
124 * This operation is atomic and cannot be reordered.
125 * It may be reordered on other architectures than x86.
126 * It also implies a memory barrier.
127 */
128static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
129{
130	unsigned long mask = BIT_MASK(nr);
131	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
132	unsigned long old;
133	unsigned long flags;
134
135	_atomic_spin_lock_irqsave(p, flags);
136	old = *p;
137	*p = old | mask;
138	_atomic_spin_unlock_irqrestore(p, flags);
139
140	return (old & mask) != 0;
141}
142
143/**
144 * test_and_clear_bit - Clear a bit and return its old value
145 * @nr: Bit to clear
146 * @addr: Address to count from
147 *
148 * This operation is atomic and cannot be reordered.
149 * It can be reorderdered on other architectures other than x86.
150 * It also implies a memory barrier.
151 */
152static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
153{
154	unsigned long mask = BIT_MASK(nr);
155	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
156	unsigned long old;
157	unsigned long flags;
158
159	_atomic_spin_lock_irqsave(p, flags);
160	old = *p;
161	*p = old & ~mask;
162	_atomic_spin_unlock_irqrestore(p, flags);
163
164	return (old & mask) != 0;
165}
166
167/**
168 * test_and_change_bit - Change a bit and return its old value
169 * @nr: Bit to change
170 * @addr: Address to count from
171 *
172 * This operation is atomic and cannot be reordered.
173 * It also implies a memory barrier.
174 */
175static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
176{
177	unsigned long mask = BIT_MASK(nr);
178	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
179	unsigned long old;
180	unsigned long flags;
181
182	_atomic_spin_lock_irqsave(p, flags);
183	old = *p;
184	*p = old ^ mask;
185	_atomic_spin_unlock_irqrestore(p, flags);
186
187	return (old & mask) != 0;
188}
189
190#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */