Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.13.7
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
 3#define _ASM_GENERIC_BITOPS_ATOMIC_H_
 4
 5#include <linux/atomic.h>
 6#include <linux/compiler.h>
 7#include <asm/barrier.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 8
 9/*
10 * Implementation of atomic bitops using atomic-fetch ops.
11 * See Documentation/atomic_bitops.txt for details.
 
 
 
 
 
12 */
13
14static __always_inline void
15arch_set_bit(unsigned int nr, volatile unsigned long *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16{
17	p += BIT_WORD(nr);
18	raw_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
19}
20
21static __always_inline void
22arch_clear_bit(unsigned int nr, volatile unsigned long *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23{
24	p += BIT_WORD(nr);
25	raw_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
26}
27
28static __always_inline void
29arch_change_bit(unsigned int nr, volatile unsigned long *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30{
31	p += BIT_WORD(nr);
32	raw_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
33}
34
35static __always_inline int
36arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
37{
38	long old;
39	unsigned long mask = BIT_MASK(nr);
40
41	p += BIT_WORD(nr);
42	old = raw_atomic_long_fetch_or(mask, (atomic_long_t *)p);
43	return !!(old & mask);
44}
45
46static __always_inline int
47arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48{
49	long old;
50	unsigned long mask = BIT_MASK(nr);
51
52	p += BIT_WORD(nr);
53	old = raw_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
54	return !!(old & mask);
55}
56
57static __always_inline int
58arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
59{
60	long old;
61	unsigned long mask = BIT_MASK(nr);
 
 
 
 
 
 
 
 
62
63	p += BIT_WORD(nr);
64	old = raw_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
65	return !!(old & mask);
66}
67
68#include <asm-generic/bitops/instrumented-atomic.h>
69
70#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
  3#define _ASM_GENERIC_BITOPS_ATOMIC_H_
  4
  5#include <asm/types.h>
  6#include <linux/irqflags.h>
  7
  8#ifdef CONFIG_SMP
  9#include <asm/spinlock.h>
 10#include <asm/cache.h>		/* we use L1_CACHE_BYTES */
 11
 12/* Use an array of spinlocks for our atomic_ts.
 13 * Hash function to index into a different SPINLOCK.
 14 * Since "a" is usually an address, use one spinlock per cacheline.
 15 */
 16#  define ATOMIC_HASH_SIZE 4
 17#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 18
 19extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 20
 21/* Can't use raw_spin_lock_irq because of #include problems, so
 22 * this is the substitute */
 23#define _atomic_spin_lock_irqsave(l,f) do {	\
 24	arch_spinlock_t *s = ATOMIC_HASH(l);	\
 25	local_irq_save(f);			\
 26	arch_spin_lock(s);			\
 27} while(0)
 28
 29#define _atomic_spin_unlock_irqrestore(l,f) do {	\
 30	arch_spinlock_t *s = ATOMIC_HASH(l);		\
 31	arch_spin_unlock(s);				\
 32	local_irq_restore(f);				\
 33} while(0)
 34
 35
 36#else
 37#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
 38#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
 39#endif
 40
 41/*
 42 * NMI events can occur at any time, including when interrupts have been
 43 * disabled by *_irqsave().  So you can get NMI events occurring while a
 44 * *_bit function is holding a spin lock.  If the NMI handler also wants
 45 * to do bit manipulation (and they do) then you can get a deadlock
 46 * between the original caller of *_bit() and the NMI handler.
 47 *
 48 * by Keith Owens
 49 */
 50
 51/**
 52 * set_bit - Atomically set a bit in memory
 53 * @nr: the bit to set
 54 * @addr: the address to start counting from
 55 *
 56 * This function is atomic and may not be reordered.  See __set_bit()
 57 * if you do not require the atomic guarantees.
 58 *
 59 * Note: there are no guarantees that this function will not be reordered
 60 * on non x86 architectures, so if you are writing portable code,
 61 * make sure not to rely on its reordering guarantees.
 62 *
 63 * Note that @nr may be almost arbitrarily large; this function is not
 64 * restricted to acting on a single-word quantity.
 65 */
 66static inline void set_bit(int nr, volatile unsigned long *addr)
 67{
 68	unsigned long mask = BIT_MASK(nr);
 69	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 70	unsigned long flags;
 71
 72	_atomic_spin_lock_irqsave(p, flags);
 73	*p  |= mask;
 74	_atomic_spin_unlock_irqrestore(p, flags);
 75}
 76
 77/**
 78 * clear_bit - Clears a bit in memory
 79 * @nr: Bit to clear
 80 * @addr: Address to start counting from
 81 *
 82 * clear_bit() is atomic and may not be reordered.  However, it does
 83 * not contain a memory barrier, so if it is used for locking purposes,
 84 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 85 * in order to ensure changes are visible on other processors.
 86 */
 87static inline void clear_bit(int nr, volatile unsigned long *addr)
 88{
 89	unsigned long mask = BIT_MASK(nr);
 90	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 91	unsigned long flags;
 92
 93	_atomic_spin_lock_irqsave(p, flags);
 94	*p &= ~mask;
 95	_atomic_spin_unlock_irqrestore(p, flags);
 96}
 97
 98/**
 99 * change_bit - Toggle a bit in memory
100 * @nr: Bit to change
101 * @addr: Address to start counting from
102 *
103 * change_bit() is atomic and may not be reordered. It may be
104 * reordered on other architectures than x86.
105 * Note that @nr may be almost arbitrarily large; this function is not
106 * restricted to acting on a single-word quantity.
107 */
108static inline void change_bit(int nr, volatile unsigned long *addr)
109{
110	unsigned long mask = BIT_MASK(nr);
111	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
112	unsigned long flags;
113
114	_atomic_spin_lock_irqsave(p, flags);
115	*p ^= mask;
116	_atomic_spin_unlock_irqrestore(p, flags);
117}
118
119/**
120 * test_and_set_bit - Set a bit and return its old value
121 * @nr: Bit to set
122 * @addr: Address to count from
123 *
124 * This operation is atomic and cannot be reordered.
125 * It may be reordered on other architectures than x86.
126 * It also implies a memory barrier.
127 */
128static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
129{
 
130	unsigned long mask = BIT_MASK(nr);
131	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
132	unsigned long old;
133	unsigned long flags;
134
135	_atomic_spin_lock_irqsave(p, flags);
136	old = *p;
137	*p = old | mask;
138	_atomic_spin_unlock_irqrestore(p, flags);
139
140	return (old & mask) != 0;
141}
142
143/**
144 * test_and_clear_bit - Clear a bit and return its old value
145 * @nr: Bit to clear
146 * @addr: Address to count from
147 *
148 * This operation is atomic and cannot be reordered.
149 * It can be reorderdered on other architectures other than x86.
150 * It also implies a memory barrier.
151 */
152static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
153{
 
154	unsigned long mask = BIT_MASK(nr);
155	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
156	unsigned long old;
157	unsigned long flags;
158
159	_atomic_spin_lock_irqsave(p, flags);
160	old = *p;
161	*p = old & ~mask;
162	_atomic_spin_unlock_irqrestore(p, flags);
163
164	return (old & mask) != 0;
165}
166
167/**
168 * test_and_change_bit - Change a bit and return its old value
169 * @nr: Bit to change
170 * @addr: Address to count from
171 *
172 * This operation is atomic and cannot be reordered.
173 * It also implies a memory barrier.
174 */
175static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
176{
 
177	unsigned long mask = BIT_MASK(nr);
178	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
179	unsigned long old;
180	unsigned long flags;
181
182	_atomic_spin_lock_irqsave(p, flags);
183	old = *p;
184	*p = old ^ mask;
185	_atomic_spin_unlock_irqrestore(p, flags);
186
187	return (old & mask) != 0;
 
 
188}
 
 
189
190#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */