Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v3.1
  1#ifndef _ALPHA_SPINLOCK_H
  2#define _ALPHA_SPINLOCK_H
  3
  4#include <asm/system.h>
  5#include <linux/kernel.h>
  6#include <asm/current.h>
  7
  8/*
  9 * Simple spin lock operations.  There are two variants, one clears IRQ's
 10 * on the local processor, one does not.
 11 *
 12 * We make no fairness assumptions. They have a cost.
 13 */
 14
 15#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 16#define arch_spin_is_locked(x)	((x)->lock != 0)
 17#define arch_spin_unlock_wait(x) \
 18		do { cpu_relax(); } while ((x)->lock)
 19
 20static inline void arch_spin_unlock(arch_spinlock_t * lock)
 21{
 22	mb();
 23	lock->lock = 0;
 24}
 25
 26static inline void arch_spin_lock(arch_spinlock_t * lock)
 27{
 28	long tmp;
 29
 30	__asm__ __volatile__(
 31	"1:	ldl_l	%0,%1\n"
 32	"	bne	%0,2f\n"
 33	"	lda	%0,1\n"
 34	"	stl_c	%0,%1\n"
 35	"	beq	%0,2f\n"
 36	"	mb\n"
 37	".subsection 2\n"
 38	"2:	ldl	%0,%1\n"
 39	"	bne	%0,2b\n"
 40	"	br	1b\n"
 41	".previous"
 42	: "=&r" (tmp), "=m" (lock->lock)
 43	: "m"(lock->lock) : "memory");
 44}
 45
 46static inline int arch_spin_trylock(arch_spinlock_t *lock)
 47{
 48	return !test_and_set_bit(0, &lock->lock);
 49}
 50
 51/***********************************************************/
 52
 53static inline int arch_read_can_lock(arch_rwlock_t *lock)
 54{
 55	return (lock->lock & 1) == 0;
 56}
 57
 58static inline int arch_write_can_lock(arch_rwlock_t *lock)
 59{
 60	return lock->lock == 0;
 61}
 62
 63static inline void arch_read_lock(arch_rwlock_t *lock)
 64{
 65	long regx;
 66
 67	__asm__ __volatile__(
 68	"1:	ldl_l	%1,%0\n"
 69	"	blbs	%1,6f\n"
 70	"	subl	%1,2,%1\n"
 71	"	stl_c	%1,%0\n"
 72	"	beq	%1,6f\n"
 73	"	mb\n"
 74	".subsection 2\n"
 75	"6:	ldl	%1,%0\n"
 76	"	blbs	%1,6b\n"
 77	"	br	1b\n"
 78	".previous"
 79	: "=m" (*lock), "=&r" (regx)
 80	: "m" (*lock) : "memory");
 81}
 82
 83static inline void arch_write_lock(arch_rwlock_t *lock)
 84{
 85	long regx;
 86
 87	__asm__ __volatile__(
 88	"1:	ldl_l	%1,%0\n"
 89	"	bne	%1,6f\n"
 90	"	lda	%1,1\n"
 91	"	stl_c	%1,%0\n"
 92	"	beq	%1,6f\n"
 93	"	mb\n"
 94	".subsection 2\n"
 95	"6:	ldl	%1,%0\n"
 96	"	bne	%1,6b\n"
 97	"	br	1b\n"
 98	".previous"
 99	: "=m" (*lock), "=&r" (regx)
100	: "m" (*lock) : "memory");
101}
102
103static inline int arch_read_trylock(arch_rwlock_t * lock)
104{
105	long regx;
106	int success;
107
108	__asm__ __volatile__(
109	"1:	ldl_l	%1,%0\n"
110	"	lda	%2,0\n"
111	"	blbs	%1,2f\n"
112	"	subl	%1,2,%2\n"
113	"	stl_c	%2,%0\n"
114	"	beq	%2,6f\n"
115	"2:	mb\n"
116	".subsection 2\n"
117	"6:	br	1b\n"
118	".previous"
119	: "=m" (*lock), "=&r" (regx), "=&r" (success)
120	: "m" (*lock) : "memory");
121
122	return success;
123}
124
125static inline int arch_write_trylock(arch_rwlock_t * lock)
126{
127	long regx;
128	int success;
129
130	__asm__ __volatile__(
131	"1:	ldl_l	%1,%0\n"
132	"	lda	%2,0\n"
133	"	bne	%1,2f\n"
134	"	lda	%2,1\n"
135	"	stl_c	%2,%0\n"
136	"	beq	%2,6f\n"
137	"2:	mb\n"
138	".subsection 2\n"
139	"6:	br	1b\n"
140	".previous"
141	: "=m" (*lock), "=&r" (regx), "=&r" (success)
142	: "m" (*lock) : "memory");
143
144	return success;
145}
146
147static inline void arch_read_unlock(arch_rwlock_t * lock)
148{
149	long regx;
150	__asm__ __volatile__(
151	"	mb\n"
152	"1:	ldl_l	%1,%0\n"
153	"	addl	%1,2,%1\n"
154	"	stl_c	%1,%0\n"
155	"	beq	%1,6f\n"
156	".subsection 2\n"
157	"6:	br	1b\n"
158	".previous"
159	: "=m" (*lock), "=&r" (regx)
160	: "m" (*lock) : "memory");
161}
162
163static inline void arch_write_unlock(arch_rwlock_t * lock)
164{
165	mb();
166	lock->lock = 0;
167}
168
169#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
170#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
171
172#define arch_spin_relax(lock)	cpu_relax()
173#define arch_read_relax(lock)	cpu_relax()
174#define arch_write_relax(lock)	cpu_relax()
175
176#endif /* _ALPHA_SPINLOCK_H */
v3.5.6
  1#ifndef _ALPHA_SPINLOCK_H
  2#define _ALPHA_SPINLOCK_H
  3
 
  4#include <linux/kernel.h>
  5#include <asm/current.h>
  6
  7/*
  8 * Simple spin lock operations.  There are two variants, one clears IRQ's
  9 * on the local processor, one does not.
 10 *
 11 * We make no fairness assumptions. They have a cost.
 12 */
 13
 14#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 15#define arch_spin_is_locked(x)	((x)->lock != 0)
 16#define arch_spin_unlock_wait(x) \
 17		do { cpu_relax(); } while ((x)->lock)
 18
 19static inline void arch_spin_unlock(arch_spinlock_t * lock)
 20{
 21	mb();
 22	lock->lock = 0;
 23}
 24
 25static inline void arch_spin_lock(arch_spinlock_t * lock)
 26{
 27	long tmp;
 28
 29	__asm__ __volatile__(
 30	"1:	ldl_l	%0,%1\n"
 31	"	bne	%0,2f\n"
 32	"	lda	%0,1\n"
 33	"	stl_c	%0,%1\n"
 34	"	beq	%0,2f\n"
 35	"	mb\n"
 36	".subsection 2\n"
 37	"2:	ldl	%0,%1\n"
 38	"	bne	%0,2b\n"
 39	"	br	1b\n"
 40	".previous"
 41	: "=&r" (tmp), "=m" (lock->lock)
 42	: "m"(lock->lock) : "memory");
 43}
 44
 45static inline int arch_spin_trylock(arch_spinlock_t *lock)
 46{
 47	return !test_and_set_bit(0, &lock->lock);
 48}
 49
 50/***********************************************************/
 51
 52static inline int arch_read_can_lock(arch_rwlock_t *lock)
 53{
 54	return (lock->lock & 1) == 0;
 55}
 56
 57static inline int arch_write_can_lock(arch_rwlock_t *lock)
 58{
 59	return lock->lock == 0;
 60}
 61
 62static inline void arch_read_lock(arch_rwlock_t *lock)
 63{
 64	long regx;
 65
 66	__asm__ __volatile__(
 67	"1:	ldl_l	%1,%0\n"
 68	"	blbs	%1,6f\n"
 69	"	subl	%1,2,%1\n"
 70	"	stl_c	%1,%0\n"
 71	"	beq	%1,6f\n"
 72	"	mb\n"
 73	".subsection 2\n"
 74	"6:	ldl	%1,%0\n"
 75	"	blbs	%1,6b\n"
 76	"	br	1b\n"
 77	".previous"
 78	: "=m" (*lock), "=&r" (regx)
 79	: "m" (*lock) : "memory");
 80}
 81
 82static inline void arch_write_lock(arch_rwlock_t *lock)
 83{
 84	long regx;
 85
 86	__asm__ __volatile__(
 87	"1:	ldl_l	%1,%0\n"
 88	"	bne	%1,6f\n"
 89	"	lda	%1,1\n"
 90	"	stl_c	%1,%0\n"
 91	"	beq	%1,6f\n"
 92	"	mb\n"
 93	".subsection 2\n"
 94	"6:	ldl	%1,%0\n"
 95	"	bne	%1,6b\n"
 96	"	br	1b\n"
 97	".previous"
 98	: "=m" (*lock), "=&r" (regx)
 99	: "m" (*lock) : "memory");
100}
101
102static inline int arch_read_trylock(arch_rwlock_t * lock)
103{
104	long regx;
105	int success;
106
107	__asm__ __volatile__(
108	"1:	ldl_l	%1,%0\n"
109	"	lda	%2,0\n"
110	"	blbs	%1,2f\n"
111	"	subl	%1,2,%2\n"
112	"	stl_c	%2,%0\n"
113	"	beq	%2,6f\n"
114	"2:	mb\n"
115	".subsection 2\n"
116	"6:	br	1b\n"
117	".previous"
118	: "=m" (*lock), "=&r" (regx), "=&r" (success)
119	: "m" (*lock) : "memory");
120
121	return success;
122}
123
124static inline int arch_write_trylock(arch_rwlock_t * lock)
125{
126	long regx;
127	int success;
128
129	__asm__ __volatile__(
130	"1:	ldl_l	%1,%0\n"
131	"	lda	%2,0\n"
132	"	bne	%1,2f\n"
133	"	lda	%2,1\n"
134	"	stl_c	%2,%0\n"
135	"	beq	%2,6f\n"
136	"2:	mb\n"
137	".subsection 2\n"
138	"6:	br	1b\n"
139	".previous"
140	: "=m" (*lock), "=&r" (regx), "=&r" (success)
141	: "m" (*lock) : "memory");
142
143	return success;
144}
145
146static inline void arch_read_unlock(arch_rwlock_t * lock)
147{
148	long regx;
149	__asm__ __volatile__(
150	"	mb\n"
151	"1:	ldl_l	%1,%0\n"
152	"	addl	%1,2,%1\n"
153	"	stl_c	%1,%0\n"
154	"	beq	%1,6f\n"
155	".subsection 2\n"
156	"6:	br	1b\n"
157	".previous"
158	: "=m" (*lock), "=&r" (regx)
159	: "m" (*lock) : "memory");
160}
161
162static inline void arch_write_unlock(arch_rwlock_t * lock)
163{
164	mb();
165	lock->lock = 0;
166}
167
168#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
169#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
170
171#define arch_spin_relax(lock)	cpu_relax()
172#define arch_read_relax(lock)	cpu_relax()
173#define arch_write_relax(lock)	cpu_relax()
174
175#endif /* _ALPHA_SPINLOCK_H */