Linux Audio

Check our new training course

Loading...
v6.8
 1/* SPDX-License-Identifier: GPL-2.0 */
 2/* spinlock.h: 64-bit Sparc spinlock support.
 3 *
 4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
 5 */
 6
 7#ifndef __SPARC64_SPINLOCK_H
 8#define __SPARC64_SPINLOCK_H
 9
10#ifndef __ASSEMBLY__
11
12#include <asm/processor.h>
13#include <asm/barrier.h>
14#include <asm/qspinlock.h>
15#include <asm/qrwlock.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
17#endif /* !(__ASSEMBLY__) */
18
19#endif /* !(__SPARC64_SPINLOCK_H) */
v4.10.11
 
  1/* spinlock.h: 64-bit Sparc spinlock support.
  2 *
  3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4 */
  5
  6#ifndef __SPARC64_SPINLOCK_H
  7#define __SPARC64_SPINLOCK_H
  8
  9#ifndef __ASSEMBLY__
 10
 11#include <asm/processor.h>
 12#include <asm/barrier.h>
 13
 14/* To get debugging spinlocks which detect and catch
 15 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
 16 * and rebuild your kernel.
 17 */
 18
 19/* Because we play games to save cycles in the non-contention case, we
 20 * need to be extra careful about branch targets into the "spinning"
 21 * code.  They live in their own section, but the newer V9 branches
 22 * have a shorter range than the traditional 32-bit sparc branch
 23 * variants.  The rule is that the branches that go into and out of
 24 * the spinner sections must be pre-V9 branches.
 25 */
 26
 27#define arch_spin_is_locked(lp)	((lp)->lock != 0)
 28
 29static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 30{
 31	smp_cond_load_acquire(&lock->lock, !VAL);
 32}
 33
 34static inline void arch_spin_lock(arch_spinlock_t *lock)
 35{
 36	unsigned long tmp;
 37
 38	__asm__ __volatile__(
 39"1:	ldstub		[%1], %0\n"
 40"	brnz,pn		%0, 2f\n"
 41"	 nop\n"
 42"	.subsection	2\n"
 43"2:	ldub		[%1], %0\n"
 44"	brnz,pt		%0, 2b\n"
 45"	 nop\n"
 46"	ba,a,pt		%%xcc, 1b\n"
 47"	.previous"
 48	: "=&r" (tmp)
 49	: "r" (lock)
 50	: "memory");
 51}
 52
 53static inline int arch_spin_trylock(arch_spinlock_t *lock)
 54{
 55	unsigned long result;
 56
 57	__asm__ __volatile__(
 58"	ldstub		[%1], %0\n"
 59	: "=r" (result)
 60	: "r" (lock)
 61	: "memory");
 62
 63	return (result == 0UL);
 64}
 65
 66static inline void arch_spin_unlock(arch_spinlock_t *lock)
 67{
 68	__asm__ __volatile__(
 69"	stb		%%g0, [%0]"
 70	: /* No outputs */
 71	: "r" (lock)
 72	: "memory");
 73}
 74
 75static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 76{
 77	unsigned long tmp1, tmp2;
 78
 79	__asm__ __volatile__(
 80"1:	ldstub		[%2], %0\n"
 81"	brnz,pn		%0, 2f\n"
 82"	 nop\n"
 83"	.subsection	2\n"
 84"2:	rdpr		%%pil, %1\n"
 85"	wrpr		%3, %%pil\n"
 86"3:	ldub		[%2], %0\n"
 87"	brnz,pt		%0, 3b\n"
 88"	 nop\n"
 89"	ba,pt		%%xcc, 1b\n"
 90"	 wrpr		%1, %%pil\n"
 91"	.previous"
 92	: "=&r" (tmp1), "=&r" (tmp2)
 93	: "r"(lock), "r"(flags)
 94	: "memory");
 95}
 96
 97/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
 98
 99static inline void arch_read_lock(arch_rwlock_t *lock)
100{
101	unsigned long tmp1, tmp2;
102
103	__asm__ __volatile__ (
104"1:	ldsw		[%2], %0\n"
105"	brlz,pn		%0, 2f\n"
106"4:	 add		%0, 1, %1\n"
107"	cas		[%2], %0, %1\n"
108"	cmp		%0, %1\n"
109"	bne,pn		%%icc, 1b\n"
110"	 nop\n"
111"	.subsection	2\n"
112"2:	ldsw		[%2], %0\n"
113"	brlz,pt		%0, 2b\n"
114"	 nop\n"
115"	ba,a,pt		%%xcc, 4b\n"
116"	.previous"
117	: "=&r" (tmp1), "=&r" (tmp2)
118	: "r" (lock)
119	: "memory");
120}
121
122static inline int arch_read_trylock(arch_rwlock_t *lock)
123{
124	int tmp1, tmp2;
125
126	__asm__ __volatile__ (
127"1:	ldsw		[%2], %0\n"
128"	brlz,a,pn	%0, 2f\n"
129"	 mov		0, %0\n"
130"	add		%0, 1, %1\n"
131"	cas		[%2], %0, %1\n"
132"	cmp		%0, %1\n"
133"	bne,pn		%%icc, 1b\n"
134"	 mov		1, %0\n"
135"2:"
136	: "=&r" (tmp1), "=&r" (tmp2)
137	: "r" (lock)
138	: "memory");
139
140	return tmp1;
141}
142
143static inline void arch_read_unlock(arch_rwlock_t *lock)
144{
145	unsigned long tmp1, tmp2;
146
147	__asm__ __volatile__(
148"1:	lduw	[%2], %0\n"
149"	sub	%0, 1, %1\n"
150"	cas	[%2], %0, %1\n"
151"	cmp	%0, %1\n"
152"	bne,pn	%%xcc, 1b\n"
153"	 nop"
154	: "=&r" (tmp1), "=&r" (tmp2)
155	: "r" (lock)
156	: "memory");
157}
158
159static inline void arch_write_lock(arch_rwlock_t *lock)
160{
161	unsigned long mask, tmp1, tmp2;
162
163	mask = 0x80000000UL;
164
165	__asm__ __volatile__(
166"1:	lduw		[%2], %0\n"
167"	brnz,pn		%0, 2f\n"
168"4:	 or		%0, %3, %1\n"
169"	cas		[%2], %0, %1\n"
170"	cmp		%0, %1\n"
171"	bne,pn		%%icc, 1b\n"
172"	 nop\n"
173"	.subsection	2\n"
174"2:	lduw		[%2], %0\n"
175"	brnz,pt		%0, 2b\n"
176"	 nop\n"
177"	ba,a,pt		%%xcc, 4b\n"
178"	.previous"
179	: "=&r" (tmp1), "=&r" (tmp2)
180	: "r" (lock), "r" (mask)
181	: "memory");
182}
183
184static inline void arch_write_unlock(arch_rwlock_t *lock)
185{
186	__asm__ __volatile__(
187"	stw		%%g0, [%0]"
188	: /* no outputs */
189	: "r" (lock)
190	: "memory");
191}
192
193static inline int arch_write_trylock(arch_rwlock_t *lock)
194{
195	unsigned long mask, tmp1, tmp2, result;
196
197	mask = 0x80000000UL;
198
199	__asm__ __volatile__(
200"	mov		0, %2\n"
201"1:	lduw		[%3], %0\n"
202"	brnz,pn		%0, 2f\n"
203"	 or		%0, %4, %1\n"
204"	cas		[%3], %0, %1\n"
205"	cmp		%0, %1\n"
206"	bne,pn		%%icc, 1b\n"
207"	 nop\n"
208"	mov		1, %2\n"
209"2:"
210	: "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
211	: "r" (lock), "r" (mask)
212	: "memory");
213
214	return result;
215}
216
217#define arch_read_lock_flags(p, f) arch_read_lock(p)
218#define arch_write_lock_flags(p, f) arch_write_lock(p)
219
220#define arch_read_can_lock(rw)		(!((rw)->lock & 0x80000000UL))
221#define arch_write_can_lock(rw)	(!(rw)->lock)
222
223#define arch_spin_relax(lock)	cpu_relax()
224#define arch_read_relax(lock)	cpu_relax()
225#define arch_write_relax(lock)	cpu_relax()
226
227#endif /* !(__ASSEMBLY__) */
228
229#endif /* !(__SPARC64_SPINLOCK_H) */