Linux Audio

Check our new training course

Loading...
v3.1
 
  1#ifndef _ALPHA_SPINLOCK_H
  2#define _ALPHA_SPINLOCK_H
  3
  4#include <asm/system.h>
  5#include <linux/kernel.h>
  6#include <asm/current.h>
 
 
  7
  8/*
  9 * Simple spin lock operations.  There are two variants, one clears IRQ's
 10 * on the local processor, one does not.
 11 *
 12 * We make no fairness assumptions. They have a cost.
 13 */
 14
 15#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 16#define arch_spin_is_locked(x)	((x)->lock != 0)
 17#define arch_spin_unlock_wait(x) \
 18		do { cpu_relax(); } while ((x)->lock)
 
 
 
 19
 20static inline void arch_spin_unlock(arch_spinlock_t * lock)
 21{
 22	mb();
 23	lock->lock = 0;
 24}
 25
 26static inline void arch_spin_lock(arch_spinlock_t * lock)
 27{
 28	long tmp;
 29
 30	__asm__ __volatile__(
 31	"1:	ldl_l	%0,%1\n"
 32	"	bne	%0,2f\n"
 33	"	lda	%0,1\n"
 34	"	stl_c	%0,%1\n"
 35	"	beq	%0,2f\n"
 36	"	mb\n"
 37	".subsection 2\n"
 38	"2:	ldl	%0,%1\n"
 39	"	bne	%0,2b\n"
 40	"	br	1b\n"
 41	".previous"
 42	: "=&r" (tmp), "=m" (lock->lock)
 43	: "m"(lock->lock) : "memory");
 44}
 45
 46static inline int arch_spin_trylock(arch_spinlock_t *lock)
 47{
 48	return !test_and_set_bit(0, &lock->lock);
 49}
 50
 51/***********************************************************/
 52
 53static inline int arch_read_can_lock(arch_rwlock_t *lock)
 54{
 55	return (lock->lock & 1) == 0;
 56}
 57
 58static inline int arch_write_can_lock(arch_rwlock_t *lock)
 59{
 60	return lock->lock == 0;
 61}
 62
 63static inline void arch_read_lock(arch_rwlock_t *lock)
 64{
 65	long regx;
 66
 67	__asm__ __volatile__(
 68	"1:	ldl_l	%1,%0\n"
 69	"	blbs	%1,6f\n"
 70	"	subl	%1,2,%1\n"
 71	"	stl_c	%1,%0\n"
 72	"	beq	%1,6f\n"
 73	"	mb\n"
 74	".subsection 2\n"
 75	"6:	ldl	%1,%0\n"
 76	"	blbs	%1,6b\n"
 77	"	br	1b\n"
 78	".previous"
 79	: "=m" (*lock), "=&r" (regx)
 80	: "m" (*lock) : "memory");
 81}
 82
 83static inline void arch_write_lock(arch_rwlock_t *lock)
 84{
 85	long regx;
 86
 87	__asm__ __volatile__(
 88	"1:	ldl_l	%1,%0\n"
 89	"	bne	%1,6f\n"
 90	"	lda	%1,1\n"
 91	"	stl_c	%1,%0\n"
 92	"	beq	%1,6f\n"
 93	"	mb\n"
 94	".subsection 2\n"
 95	"6:	ldl	%1,%0\n"
 96	"	bne	%1,6b\n"
 97	"	br	1b\n"
 98	".previous"
 99	: "=m" (*lock), "=&r" (regx)
100	: "m" (*lock) : "memory");
101}
102
103static inline int arch_read_trylock(arch_rwlock_t * lock)
104{
105	long regx;
106	int success;
107
108	__asm__ __volatile__(
109	"1:	ldl_l	%1,%0\n"
110	"	lda	%2,0\n"
111	"	blbs	%1,2f\n"
112	"	subl	%1,2,%2\n"
113	"	stl_c	%2,%0\n"
114	"	beq	%2,6f\n"
115	"2:	mb\n"
116	".subsection 2\n"
117	"6:	br	1b\n"
118	".previous"
119	: "=m" (*lock), "=&r" (regx), "=&r" (success)
120	: "m" (*lock) : "memory");
121
122	return success;
123}
124
125static inline int arch_write_trylock(arch_rwlock_t * lock)
126{
127	long regx;
128	int success;
129
130	__asm__ __volatile__(
131	"1:	ldl_l	%1,%0\n"
132	"	lda	%2,0\n"
133	"	bne	%1,2f\n"
134	"	lda	%2,1\n"
135	"	stl_c	%2,%0\n"
136	"	beq	%2,6f\n"
137	"2:	mb\n"
138	".subsection 2\n"
139	"6:	br	1b\n"
140	".previous"
141	: "=m" (*lock), "=&r" (regx), "=&r" (success)
142	: "m" (*lock) : "memory");
143
144	return success;
145}
146
147static inline void arch_read_unlock(arch_rwlock_t * lock)
148{
149	long regx;
150	__asm__ __volatile__(
151	"	mb\n"
152	"1:	ldl_l	%1,%0\n"
153	"	addl	%1,2,%1\n"
154	"	stl_c	%1,%0\n"
155	"	beq	%1,6f\n"
156	".subsection 2\n"
157	"6:	br	1b\n"
158	".previous"
159	: "=m" (*lock), "=&r" (regx)
160	: "m" (*lock) : "memory");
161}
162
163static inline void arch_write_unlock(arch_rwlock_t * lock)
164{
165	mb();
166	lock->lock = 0;
167}
168
169#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
170#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
171
172#define arch_spin_relax(lock)	cpu_relax()
173#define arch_read_relax(lock)	cpu_relax()
174#define arch_write_relax(lock)	cpu_relax()
175
176#endif /* _ALPHA_SPINLOCK_H */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ALPHA_SPINLOCK_H
  3#define _ALPHA_SPINLOCK_H
  4
 
  5#include <linux/kernel.h>
  6#include <asm/current.h>
  7#include <asm/barrier.h>
  8#include <asm/processor.h>
  9
 10/*
 11 * Simple spin lock operations.  There are two variants, one clears IRQ's
 12 * on the local processor, one does not.
 13 *
 14 * We make no fairness assumptions. They have a cost.
 15 */
 16
 
 17#define arch_spin_is_locked(x)	((x)->lock != 0)
 18
 19static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 20{
 21        return lock.lock == 0;
 22}
 23
 24static inline void arch_spin_unlock(arch_spinlock_t * lock)
 25{
 26	mb();
 27	lock->lock = 0;
 28}
 29
 30static inline void arch_spin_lock(arch_spinlock_t * lock)
 31{
 32	long tmp;
 33
 34	__asm__ __volatile__(
 35	"1:	ldl_l	%0,%1\n"
 36	"	bne	%0,2f\n"
 37	"	lda	%0,1\n"
 38	"	stl_c	%0,%1\n"
 39	"	beq	%0,2f\n"
 40	"	mb\n"
 41	".subsection 2\n"
 42	"2:	ldl	%0,%1\n"
 43	"	bne	%0,2b\n"
 44	"	br	1b\n"
 45	".previous"
 46	: "=&r" (tmp), "=m" (lock->lock)
 47	: "m"(lock->lock) : "memory");
 48}
 49
 50static inline int arch_spin_trylock(arch_spinlock_t *lock)
 51{
 52	return !test_and_set_bit(0, &lock->lock);
 53}
 54
 55/***********************************************************/
 56
 
 
 
 
 
 
 
 
 
 
 57static inline void arch_read_lock(arch_rwlock_t *lock)
 58{
 59	long regx;
 60
 61	__asm__ __volatile__(
 62	"1:	ldl_l	%1,%0\n"
 63	"	blbs	%1,6f\n"
 64	"	subl	%1,2,%1\n"
 65	"	stl_c	%1,%0\n"
 66	"	beq	%1,6f\n"
 67	"	mb\n"
 68	".subsection 2\n"
 69	"6:	ldl	%1,%0\n"
 70	"	blbs	%1,6b\n"
 71	"	br	1b\n"
 72	".previous"
 73	: "=m" (*lock), "=&r" (regx)
 74	: "m" (*lock) : "memory");
 75}
 76
 77static inline void arch_write_lock(arch_rwlock_t *lock)
 78{
 79	long regx;
 80
 81	__asm__ __volatile__(
 82	"1:	ldl_l	%1,%0\n"
 83	"	bne	%1,6f\n"
 84	"	lda	%1,1\n"
 85	"	stl_c	%1,%0\n"
 86	"	beq	%1,6f\n"
 87	"	mb\n"
 88	".subsection 2\n"
 89	"6:	ldl	%1,%0\n"
 90	"	bne	%1,6b\n"
 91	"	br	1b\n"
 92	".previous"
 93	: "=m" (*lock), "=&r" (regx)
 94	: "m" (*lock) : "memory");
 95}
 96
 97static inline int arch_read_trylock(arch_rwlock_t * lock)
 98{
 99	long regx;
100	int success;
101
102	__asm__ __volatile__(
103	"1:	ldl_l	%1,%0\n"
104	"	lda	%2,0\n"
105	"	blbs	%1,2f\n"
106	"	subl	%1,2,%2\n"
107	"	stl_c	%2,%0\n"
108	"	beq	%2,6f\n"
109	"2:	mb\n"
110	".subsection 2\n"
111	"6:	br	1b\n"
112	".previous"
113	: "=m" (*lock), "=&r" (regx), "=&r" (success)
114	: "m" (*lock) : "memory");
115
116	return success;
117}
118
119static inline int arch_write_trylock(arch_rwlock_t * lock)
120{
121	long regx;
122	int success;
123
124	__asm__ __volatile__(
125	"1:	ldl_l	%1,%0\n"
126	"	lda	%2,0\n"
127	"	bne	%1,2f\n"
128	"	lda	%2,1\n"
129	"	stl_c	%2,%0\n"
130	"	beq	%2,6f\n"
131	"2:	mb\n"
132	".subsection 2\n"
133	"6:	br	1b\n"
134	".previous"
135	: "=m" (*lock), "=&r" (regx), "=&r" (success)
136	: "m" (*lock) : "memory");
137
138	return success;
139}
140
141static inline void arch_read_unlock(arch_rwlock_t * lock)
142{
143	long regx;
144	__asm__ __volatile__(
145	"	mb\n"
146	"1:	ldl_l	%1,%0\n"
147	"	addl	%1,2,%1\n"
148	"	stl_c	%1,%0\n"
149	"	beq	%1,6f\n"
150	".subsection 2\n"
151	"6:	br	1b\n"
152	".previous"
153	: "=m" (*lock), "=&r" (regx)
154	: "m" (*lock) : "memory");
155}
156
157static inline void arch_write_unlock(arch_rwlock_t * lock)
158{
159	mb();
160	lock->lock = 0;
161}
 
 
 
 
 
 
 
162
163#endif /* _ALPHA_SPINLOCK_H */