Linux Audio

Check our new training course

Loading...
v3.15
 
  1#ifndef __ASM_SPINLOCK_H
  2#define __ASM_SPINLOCK_H
  3
  4#if __LINUX_ARM_ARCH__ < 6
  5#error SMP not supported on pre-ARMv6 CPUs
  6#endif
  7
  8#include <linux/prefetch.h>
 
 
  9
 10/*
 11 * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
 12 * extensions, so when running on UP, we have to patch these instructions away.
 13 */
 14#ifdef CONFIG_THUMB2_KERNEL
 15/*
 16 * For Thumb-2, special care is needed to ensure that the conditional WFE
 17 * instruction really does assemble to exactly 4 bytes (as required by
 18 * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
 19 * assembler to insert a extra (16-bit) IT instruction, depending on the
 20 * presence or absence of neighbouring conditional instructions.
 21 *
 22 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
 23 * the assembler won't change IT instructions which are explicitly present
 24 * in the input.
 25 */
 26#define WFE(cond)	__ALT_SMP_ASM(		\
 27	"it " cond "\n\t"			\
 28	"wfe" cond ".n",			\
 29						\
 30	"nop.w"					\
 31)
 32#else
 33#define WFE(cond)	__ALT_SMP_ASM("wfe" cond, "nop")
 34#endif
 35
 36#define SEV		__ALT_SMP_ASM(WASM(sev), WASM(nop))
 37
 38static inline void dsb_sev(void)
 39{
 40
 41	dsb(ishst);
 42	__asm__(SEV);
 43}
 44
 45/*
 46 * ARMv6 ticket-based spin-locking.
 47 *
 48 * A memory barrier is required after we get a lock, and before we
 49 * release it, because V6 CPUs are assumed to have weakly ordered
 50 * memory.
 51 */
 52
 53#define arch_spin_unlock_wait(lock) \
 54	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
 55
 56#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 57
 58static inline void arch_spin_lock(arch_spinlock_t *lock)
 59{
 60	unsigned long tmp;
 61	u32 newval;
 62	arch_spinlock_t lockval;
 63
 64	prefetchw(&lock->slock);
 65	__asm__ __volatile__(
 66"1:	ldrex	%0, [%3]\n"
 67"	add	%1, %0, %4\n"
 68"	strex	%2, %1, [%3]\n"
 69"	teq	%2, #0\n"
 70"	bne	1b"
 71	: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
 72	: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
 73	: "cc");
 74
 75	while (lockval.tickets.next != lockval.tickets.owner) {
 76		wfe();
 77		lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
 78	}
 79
 80	smp_mb();
 81}
 82
 83static inline int arch_spin_trylock(arch_spinlock_t *lock)
 84{
 85	unsigned long contended, res;
 86	u32 slock;
 87
 88	prefetchw(&lock->slock);
 89	do {
 90		__asm__ __volatile__(
 91		"	ldrex	%0, [%3]\n"
 92		"	mov	%2, #0\n"
 93		"	subs	%1, %0, %0, ror #16\n"
 94		"	addeq	%0, %0, %4\n"
 95		"	strexeq	%2, %0, [%3]"
 96		: "=&r" (slock), "=&r" (contended), "=&r" (res)
 97		: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
 98		: "cc");
 99	} while (res);
100
101	if (!contended) {
102		smp_mb();
103		return 1;
104	} else {
105		return 0;
106	}
107}
108
109static inline void arch_spin_unlock(arch_spinlock_t *lock)
110{
111	smp_mb();
112	lock->tickets.owner++;
113	dsb_sev();
114}
115
116static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
117{
118	return lock.tickets.owner == lock.tickets.next;
119}
120
121static inline int arch_spin_is_locked(arch_spinlock_t *lock)
122{
123	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
124}
125
126static inline int arch_spin_is_contended(arch_spinlock_t *lock)
127{
128	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
129	return (tickets.next - tickets.owner) > 1;
130}
131#define arch_spin_is_contended	arch_spin_is_contended
132
133/*
134 * RWLOCKS
135 *
136 *
137 * Write locks are easy - we just set bit 31.  When unlocking, we can
138 * just write zero since the lock is exclusively held.
139 */
140
141static inline void arch_write_lock(arch_rwlock_t *rw)
142{
143	unsigned long tmp;
144
145	prefetchw(&rw->lock);
146	__asm__ __volatile__(
147"1:	ldrex	%0, [%1]\n"
148"	teq	%0, #0\n"
149	WFE("ne")
150"	strexeq	%0, %2, [%1]\n"
151"	teq	%0, #0\n"
152"	bne	1b"
153	: "=&r" (tmp)
154	: "r" (&rw->lock), "r" (0x80000000)
155	: "cc");
156
157	smp_mb();
158}
159
160static inline int arch_write_trylock(arch_rwlock_t *rw)
161{
162	unsigned long contended, res;
163
164	prefetchw(&rw->lock);
165	do {
166		__asm__ __volatile__(
167		"	ldrex	%0, [%2]\n"
168		"	mov	%1, #0\n"
169		"	teq	%0, #0\n"
170		"	strexeq	%1, %3, [%2]"
171		: "=&r" (contended), "=&r" (res)
172		: "r" (&rw->lock), "r" (0x80000000)
173		: "cc");
174	} while (res);
175
176	if (!contended) {
177		smp_mb();
178		return 1;
179	} else {
180		return 0;
181	}
182}
183
184static inline void arch_write_unlock(arch_rwlock_t *rw)
185{
186	smp_mb();
187
188	__asm__ __volatile__(
189	"str	%1, [%0]\n"
190	:
191	: "r" (&rw->lock), "r" (0)
192	: "cc");
193
194	dsb_sev();
195}
196
197/* write_can_lock - would write_trylock() succeed? */
198#define arch_write_can_lock(x)		(ACCESS_ONCE((x)->lock) == 0)
199
200/*
201 * Read locks are a bit more hairy:
202 *  - Exclusively load the lock value.
203 *  - Increment it.
204 *  - Store new lock value if positive, and we still own this location.
205 *    If the value is negative, we've already failed.
206 *  - If we failed to store the value, we want a negative result.
207 *  - If we failed, try again.
208 * Unlocking is similarly hairy.  We may have multiple read locks
209 * currently active.  However, we know we won't have any write
210 * locks.
211 */
212static inline void arch_read_lock(arch_rwlock_t *rw)
213{
214	unsigned long tmp, tmp2;
215
216	prefetchw(&rw->lock);
217	__asm__ __volatile__(
 
218"1:	ldrex	%0, [%2]\n"
219"	adds	%0, %0, #1\n"
220"	strexpl	%1, %0, [%2]\n"
221	WFE("mi")
222"	rsbpls	%0, %1, #0\n"
223"	bmi	1b"
224	: "=&r" (tmp), "=&r" (tmp2)
225	: "r" (&rw->lock)
226	: "cc");
227
228	smp_mb();
229}
230
231static inline void arch_read_unlock(arch_rwlock_t *rw)
232{
233	unsigned long tmp, tmp2;
234
235	smp_mb();
236
237	prefetchw(&rw->lock);
238	__asm__ __volatile__(
239"1:	ldrex	%0, [%2]\n"
240"	sub	%0, %0, #1\n"
241"	strex	%1, %0, [%2]\n"
242"	teq	%1, #0\n"
243"	bne	1b"
244	: "=&r" (tmp), "=&r" (tmp2)
245	: "r" (&rw->lock)
246	: "cc");
247
248	if (tmp == 0)
249		dsb_sev();
250}
251
252static inline int arch_read_trylock(arch_rwlock_t *rw)
253{
254	unsigned long contended, res;
255
256	prefetchw(&rw->lock);
257	do {
258		__asm__ __volatile__(
259		"	ldrex	%0, [%2]\n"
260		"	mov	%1, #0\n"
261		"	adds	%0, %0, #1\n"
262		"	strexpl	%1, %0, [%2]"
263		: "=&r" (contended), "=&r" (res)
264		: "r" (&rw->lock)
265		: "cc");
266	} while (res);
267
268	/* If the lock is negative, then it is already held for write. */
269	if (contended < 0x80000000) {
270		smp_mb();
271		return 1;
272	} else {
273		return 0;
274	}
275}
276
277/* read_can_lock - would read_trylock() succeed? */
278#define arch_read_can_lock(x)		(ACCESS_ONCE((x)->lock) < 0x80000000)
279
280#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
281#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
282
283#define arch_spin_relax(lock)	cpu_relax()
284#define arch_read_relax(lock)	cpu_relax()
285#define arch_write_relax(lock)	cpu_relax()
286
287#endif /* __ASM_SPINLOCK_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_SPINLOCK_H
  3#define __ASM_SPINLOCK_H
  4
  5#if __LINUX_ARM_ARCH__ < 6
  6#error SMP not supported on pre-ARMv6 CPUs
  7#endif
  8
  9#include <linux/prefetch.h>
 10#include <asm/barrier.h>
 11#include <asm/processor.h>
 12
 13/*
 14 * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
 15 * extensions, so when running on UP, we have to patch these instructions away.
 16 */
 17#ifdef CONFIG_THUMB2_KERNEL
 18/*
 19 * For Thumb-2, special care is needed to ensure that the conditional WFE
 20 * instruction really does assemble to exactly 4 bytes (as required by
 21 * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
 22 * assembler to insert a extra (16-bit) IT instruction, depending on the
 23 * presence or absence of neighbouring conditional instructions.
 24 *
 25 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
 26 * the assembler won't change IT instructions which are explicitly present
 27 * in the input.
 28 */
 29#define WFE(cond)	__ALT_SMP_ASM(		\
 30	"it " cond "\n\t"			\
 31	"wfe" cond ".n",			\
 32						\
 33	"nop.w"					\
 34)
 35#else
 36#define WFE(cond)	__ALT_SMP_ASM("wfe" cond, "nop")
 37#endif
 38
 39#define SEV		__ALT_SMP_ASM(WASM(sev), WASM(nop))
 40
 41static inline void dsb_sev(void)
 42{
 43
 44	dsb(ishst);
 45	__asm__(SEV);
 46}
 47
 48/*
 49 * ARMv6 ticket-based spin-locking.
 50 *
 51 * A memory barrier is required after we get a lock, and before we
 52 * release it, because V6 CPUs are assumed to have weakly ordered
 53 * memory.
 54 */
 55
 
 
 
 
 
 56static inline void arch_spin_lock(arch_spinlock_t *lock)
 57{
 58	unsigned long tmp;
 59	u32 newval;
 60	arch_spinlock_t lockval;
 61
 62	prefetchw(&lock->slock);
 63	__asm__ __volatile__(
 64"1:	ldrex	%0, [%3]\n"
 65"	add	%1, %0, %4\n"
 66"	strex	%2, %1, [%3]\n"
 67"	teq	%2, #0\n"
 68"	bne	1b"
 69	: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
 70	: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
 71	: "cc");
 72
 73	while (lockval.tickets.next != lockval.tickets.owner) {
 74		wfe();
 75		lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
 76	}
 77
 78	smp_mb();
 79}
 80
 81static inline int arch_spin_trylock(arch_spinlock_t *lock)
 82{
 83	unsigned long contended, res;
 84	u32 slock;
 85
 86	prefetchw(&lock->slock);
 87	do {
 88		__asm__ __volatile__(
 89		"	ldrex	%0, [%3]\n"
 90		"	mov	%2, #0\n"
 91		"	subs	%1, %0, %0, ror #16\n"
 92		"	addeq	%0, %0, %4\n"
 93		"	strexeq	%2, %0, [%3]"
 94		: "=&r" (slock), "=&r" (contended), "=&r" (res)
 95		: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
 96		: "cc");
 97	} while (res);
 98
 99	if (!contended) {
100		smp_mb();
101		return 1;
102	} else {
103		return 0;
104	}
105}
106
107static inline void arch_spin_unlock(arch_spinlock_t *lock)
108{
109	smp_mb();
110	lock->tickets.owner++;
111	dsb_sev();
112}
113
114static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
115{
116	return lock.tickets.owner == lock.tickets.next;
117}
118
119static inline int arch_spin_is_locked(arch_spinlock_t *lock)
120{
121	return !arch_spin_value_unlocked(READ_ONCE(*lock));
122}
123
124static inline int arch_spin_is_contended(arch_spinlock_t *lock)
125{
126	struct __raw_tickets tickets = READ_ONCE(lock->tickets);
127	return (tickets.next - tickets.owner) > 1;
128}
129#define arch_spin_is_contended	arch_spin_is_contended
130
131/*
132 * RWLOCKS
133 *
134 *
135 * Write locks are easy - we just set bit 31.  When unlocking, we can
136 * just write zero since the lock is exclusively held.
137 */
138
139static inline void arch_write_lock(arch_rwlock_t *rw)
140{
141	unsigned long tmp;
142
143	prefetchw(&rw->lock);
144	__asm__ __volatile__(
145"1:	ldrex	%0, [%1]\n"
146"	teq	%0, #0\n"
147	WFE("ne")
148"	strexeq	%0, %2, [%1]\n"
149"	teq	%0, #0\n"
150"	bne	1b"
151	: "=&r" (tmp)
152	: "r" (&rw->lock), "r" (0x80000000)
153	: "cc");
154
155	smp_mb();
156}
157
158static inline int arch_write_trylock(arch_rwlock_t *rw)
159{
160	unsigned long contended, res;
161
162	prefetchw(&rw->lock);
163	do {
164		__asm__ __volatile__(
165		"	ldrex	%0, [%2]\n"
166		"	mov	%1, #0\n"
167		"	teq	%0, #0\n"
168		"	strexeq	%1, %3, [%2]"
169		: "=&r" (contended), "=&r" (res)
170		: "r" (&rw->lock), "r" (0x80000000)
171		: "cc");
172	} while (res);
173
174	if (!contended) {
175		smp_mb();
176		return 1;
177	} else {
178		return 0;
179	}
180}
181
182static inline void arch_write_unlock(arch_rwlock_t *rw)
183{
184	smp_mb();
185
186	__asm__ __volatile__(
187	"str	%1, [%0]\n"
188	:
189	: "r" (&rw->lock), "r" (0)
190	: "cc");
191
192	dsb_sev();
193}
194
 
 
 
195/*
196 * Read locks are a bit more hairy:
197 *  - Exclusively load the lock value.
198 *  - Increment it.
199 *  - Store new lock value if positive, and we still own this location.
200 *    If the value is negative, we've already failed.
201 *  - If we failed to store the value, we want a negative result.
202 *  - If we failed, try again.
203 * Unlocking is similarly hairy.  We may have multiple read locks
204 * currently active.  However, we know we won't have any write
205 * locks.
206 */
207static inline void arch_read_lock(arch_rwlock_t *rw)
208{
209	unsigned long tmp, tmp2;
210
211	prefetchw(&rw->lock);
212	__asm__ __volatile__(
213"	.syntax unified\n"
214"1:	ldrex	%0, [%2]\n"
215"	adds	%0, %0, #1\n"
216"	strexpl	%1, %0, [%2]\n"
217	WFE("mi")
218"	rsbspl	%0, %1, #0\n"
219"	bmi	1b"
220	: "=&r" (tmp), "=&r" (tmp2)
221	: "r" (&rw->lock)
222	: "cc");
223
224	smp_mb();
225}
226
227static inline void arch_read_unlock(arch_rwlock_t *rw)
228{
229	unsigned long tmp, tmp2;
230
231	smp_mb();
232
233	prefetchw(&rw->lock);
234	__asm__ __volatile__(
235"1:	ldrex	%0, [%2]\n"
236"	sub	%0, %0, #1\n"
237"	strex	%1, %0, [%2]\n"
238"	teq	%1, #0\n"
239"	bne	1b"
240	: "=&r" (tmp), "=&r" (tmp2)
241	: "r" (&rw->lock)
242	: "cc");
243
244	if (tmp == 0)
245		dsb_sev();
246}
247
248static inline int arch_read_trylock(arch_rwlock_t *rw)
249{
250	unsigned long contended, res;
251
252	prefetchw(&rw->lock);
253	do {
254		__asm__ __volatile__(
255		"	ldrex	%0, [%2]\n"
256		"	mov	%1, #0\n"
257		"	adds	%0, %0, #1\n"
258		"	strexpl	%1, %0, [%2]"
259		: "=&r" (contended), "=&r" (res)
260		: "r" (&rw->lock)
261		: "cc");
262	} while (res);
263
264	/* If the lock is negative, then it is already held for write. */
265	if (contended < 0x80000000) {
266		smp_mb();
267		return 1;
268	} else {
269		return 0;
270	}
271}
 
 
 
 
 
 
 
 
 
 
272
273#endif /* __ASM_SPINLOCK_H */