Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
  9#ifndef __ASM_SPINLOCK_H
 10#define __ASM_SPINLOCK_H
 11
 12#include <asm/spinlock_types.h>
 13#include <asm/processor.h>
 14#include <asm/barrier.h>
 15
 16#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
 17#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
 18
 19static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 20{
 21	smp_cond_load_acquire(&lock->slock, !VAL);
 22}
 23
 24#ifdef CONFIG_ARC_HAS_LLSC
 25
 26static inline void arch_spin_lock(arch_spinlock_t *lock)
 27{
 28	unsigned int val;
 29
 30	smp_mb();
 31
 32	__asm__ __volatile__(
 33	"1:	llock	%[val], [%[slock]]	\n"
 34	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
 35	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
 36	"	bnz	1b			\n"
 37	"					\n"
 38	: [val]		"=&r"	(val)
 39	: [slock]	"r"	(&(lock->slock)),
 40	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
 41	: "memory", "cc");
 42
 43	smp_mb();
 44}
 45
 46/* 1 - lock taken successfully */
 47static inline int arch_spin_trylock(arch_spinlock_t *lock)
 48{
 49	unsigned int val, got_it = 0;
 50
 51	smp_mb();
 52
 53	__asm__ __volatile__(
 54	"1:	llock	%[val], [%[slock]]	\n"
 55	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
 56	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
 57	"	bnz	1b			\n"
 58	"	mov	%[got_it], 1		\n"
 59	"4:					\n"
 60	"					\n"
 61	: [val]		"=&r"	(val),
 62	  [got_it]	"+&r"	(got_it)
 63	: [slock]	"r"	(&(lock->slock)),
 64	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
 65	: "memory", "cc");
 66
 67	smp_mb();
 68
 69	return got_it;
 70}
 71
 72static inline void arch_spin_unlock(arch_spinlock_t *lock)
 73{
 74	smp_mb();
 75
 76	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
 77
 78	smp_mb();
 79}
 80
 81/*
 82 * Read-write spinlocks, allowing multiple readers but only one writer.
 83 * Unfair locking as Writers could be starved indefinitely by Reader(s)
 84 */
 85
 86static inline void arch_read_lock(arch_rwlock_t *rw)
 87{
 88	unsigned int val;
 89
 90	smp_mb();
 91
 92	/*
 93	 * zero means writer holds the lock exclusively, deny Reader.
 94	 * Otherwise grant lock to first/subseq reader
 95	 *
 96	 * 	if (rw->counter > 0) {
 97	 *		rw->counter--;
 98	 *		ret = 1;
 99	 *	}
100	 */
101
102	__asm__ __volatile__(
103	"1:	llock	%[val], [%[rwlock]]	\n"
104	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
105	"	sub	%[val], %[val], 1	\n"	/* reader lock */
106	"	scond	%[val], [%[rwlock]]	\n"
107	"	bnz	1b			\n"
108	"					\n"
109	: [val]		"=&r"	(val)
110	: [rwlock]	"r"	(&(rw->counter)),
111	  [WR_LOCKED]	"ir"	(0)
112	: "memory", "cc");
113
114	smp_mb();
115}
116
117/* 1 - lock taken successfully */
118static inline int arch_read_trylock(arch_rwlock_t *rw)
119{
120	unsigned int val, got_it = 0;
121
122	smp_mb();
123
124	__asm__ __volatile__(
125	"1:	llock	%[val], [%[rwlock]]	\n"
126	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
127	"	sub	%[val], %[val], 1	\n"	/* counter-- */
128	"	scond	%[val], [%[rwlock]]	\n"
129	"	bnz	1b			\n"	/* retry if collided with someone */
130	"	mov	%[got_it], 1		\n"
131	"					\n"
132	"4: ; --- done ---			\n"
133
134	: [val]		"=&r"	(val),
135	  [got_it]	"+&r"	(got_it)
136	: [rwlock]	"r"	(&(rw->counter)),
137	  [WR_LOCKED]	"ir"	(0)
138	: "memory", "cc");
139
140	smp_mb();
141
142	return got_it;
143}
144
145static inline void arch_write_lock(arch_rwlock_t *rw)
146{
147	unsigned int val;
148
149	smp_mb();
150
151	/*
152	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
153	 * deny writer. Otherwise if unlocked grant to writer
154	 * Hence the claim that Linux rwlocks are unfair to writers.
155	 * (can be starved for an indefinite time by readers).
156	 *
157	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
158	 *		rw->counter = 0;
159	 *		ret = 1;
160	 *	}
161	 */
162
163	__asm__ __volatile__(
164	"1:	llock	%[val], [%[rwlock]]	\n"
165	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
166	"	mov	%[val], %[WR_LOCKED]	\n"
167	"	scond	%[val], [%[rwlock]]	\n"
168	"	bnz	1b			\n"
169	"					\n"
170	: [val]		"=&r"	(val)
171	: [rwlock]	"r"	(&(rw->counter)),
172	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
173	  [WR_LOCKED]	"ir"	(0)
174	: "memory", "cc");
175
176	smp_mb();
177}
178
179/* 1 - lock taken successfully */
180static inline int arch_write_trylock(arch_rwlock_t *rw)
181{
182	unsigned int val, got_it = 0;
183
184	smp_mb();
185
186	__asm__ __volatile__(
187	"1:	llock	%[val], [%[rwlock]]	\n"
188	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
189	"	mov	%[val], %[WR_LOCKED]	\n"
190	"	scond	%[val], [%[rwlock]]	\n"
191	"	bnz	1b			\n"	/* retry if collided with someone */
192	"	mov	%[got_it], 1		\n"
193	"					\n"
194	"4: ; --- done ---			\n"
195
196	: [val]		"=&r"	(val),
197	  [got_it]	"+&r"	(got_it)
198	: [rwlock]	"r"	(&(rw->counter)),
199	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
200	  [WR_LOCKED]	"ir"	(0)
201	: "memory", "cc");
202
203	smp_mb();
204
205	return got_it;
206}
207
208static inline void arch_read_unlock(arch_rwlock_t *rw)
209{
210	unsigned int val;
211
212	smp_mb();
213
214	/*
215	 * rw->counter++;
216	 */
217	__asm__ __volatile__(
218	"1:	llock	%[val], [%[rwlock]]	\n"
219	"	add	%[val], %[val], 1	\n"
220	"	scond	%[val], [%[rwlock]]	\n"
221	"	bnz	1b			\n"
222	"					\n"
223	: [val]		"=&r"	(val)
224	: [rwlock]	"r"	(&(rw->counter))
225	: "memory", "cc");
226
227	smp_mb();
228}
229
230static inline void arch_write_unlock(arch_rwlock_t *rw)
231{
232	smp_mb();
233
234	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
235
236	smp_mb();
237}
238
239#else	/* !CONFIG_ARC_HAS_LLSC */
240
241static inline void arch_spin_lock(arch_spinlock_t *lock)
242{
243	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
244
245	/*
246	 * This smp_mb() is technically superfluous, we only need the one
247	 * after the lock for providing the ACQUIRE semantics.
248	 * However doing the "right" thing was regressing hackbench
249	 * so keeping this, pending further investigation
250	 */
251	smp_mb();
252
253	__asm__ __volatile__(
254	"1:	ex  %0, [%1]		\n"
 
 
 
255	"	breq  %0, %2, 1b	\n"
256	: "+&r" (val)
257	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
 
 
 
258	: "memory");
259
260	/*
261	 * ACQUIRE barrier to ensure load/store after taking the lock
262	 * don't "bleed-up" out of the critical section (leak-in is allowed)
263	 * http://www.spinics.net/lists/kernel/msg2010409.html
264	 *
265	 * ARCv2 only has load-load, store-store and all-all barrier
266	 * thus need the full all-all barrier
267	 */
268	smp_mb();
269}
270
271/* 1 - lock taken successfully */
272static inline int arch_spin_trylock(arch_spinlock_t *lock)
273{
274	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
275
276	smp_mb();
277
278	__asm__ __volatile__(
279	"1:	ex  %0, [%1]		\n"
280	: "+r" (val)
281	: "r"(&(lock->slock))
282	: "memory");
283
284	smp_mb();
285
286	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
287}
288
289static inline void arch_spin_unlock(arch_spinlock_t *lock)
290{
291	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
292
293	/*
294	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
295	 * is the only option
296	 */
297	smp_mb();
298
 
 
 
 
 
 
299	__asm__ __volatile__(
300	"	ex  %0, [%1]		\n"
301	: "+r" (val)
302	: "r"(&(lock->slock))
303	: "memory");
304
305	/*
306	 * superfluous, but keeping for now - see pairing version in
307	 * arch_spin_lock above
308	 */
309	smp_mb();
310}
311
312/*
313 * Read-write spinlocks, allowing multiple readers but only one writer.
314 * Unfair locking as Writers could be starved indefinitely by Reader(s)
315 *
316 * The spinlock itself is contained in @counter and access to it is
317 * serialized with @lock_mutex.
318 */
319
320/* 1 - lock taken successfully */
321static inline int arch_read_trylock(arch_rwlock_t *rw)
322{
323	int ret = 0;
324	unsigned long flags;
325
326	local_irq_save(flags);
327	arch_spin_lock(&(rw->lock_mutex));
328
329	/*
330	 * zero means writer holds the lock exclusively, deny Reader.
331	 * Otherwise grant lock to first/subseq reader
332	 */
333	if (rw->counter > 0) {
334		rw->counter--;
335		ret = 1;
336	}
337
338	arch_spin_unlock(&(rw->lock_mutex));
339	local_irq_restore(flags);
340
341	smp_mb();
342	return ret;
343}
344
345/* 1 - lock taken successfully */
346static inline int arch_write_trylock(arch_rwlock_t *rw)
347{
348	int ret = 0;
349	unsigned long flags;
350
351	local_irq_save(flags);
352	arch_spin_lock(&(rw->lock_mutex));
353
354	/*
355	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
356	 * deny writer. Otherwise if unlocked grant to writer
357	 * Hence the claim that Linux rwlocks are unfair to writers.
358	 * (can be starved for an indefinite time by readers).
359	 */
360	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
361		rw->counter = 0;
362		ret = 1;
363	}
364	arch_spin_unlock(&(rw->lock_mutex));
365	local_irq_restore(flags);
366
367	return ret;
368}
369
370static inline void arch_read_lock(arch_rwlock_t *rw)
371{
372	while (!arch_read_trylock(rw))
373		cpu_relax();
374}
375
376static inline void arch_write_lock(arch_rwlock_t *rw)
377{
378	while (!arch_write_trylock(rw))
379		cpu_relax();
380}
381
382static inline void arch_read_unlock(arch_rwlock_t *rw)
383{
384	unsigned long flags;
385
386	local_irq_save(flags);
387	arch_spin_lock(&(rw->lock_mutex));
388	rw->counter++;
389	arch_spin_unlock(&(rw->lock_mutex));
390	local_irq_restore(flags);
391}
392
393static inline void arch_write_unlock(arch_rwlock_t *rw)
394{
395	unsigned long flags;
396
397	local_irq_save(flags);
398	arch_spin_lock(&(rw->lock_mutex));
399	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
400	arch_spin_unlock(&(rw->lock_mutex));
401	local_irq_restore(flags);
402}
403
404#endif
405
406#define arch_read_can_lock(x)	((x)->counter > 0)
407#define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
408
409#define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
410#define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
411
412#define arch_spin_relax(lock)	cpu_relax()
413#define arch_read_relax(lock)	cpu_relax()
414#define arch_write_relax(lock)	cpu_relax()
415
416#endif /* __ASM_SPINLOCK_H */
v4.17
  1/*
  2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
  9#ifndef __ASM_SPINLOCK_H
 10#define __ASM_SPINLOCK_H
 11
 12#include <asm/spinlock_types.h>
 13#include <asm/processor.h>
 14#include <asm/barrier.h>
 15
 16#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
 
 
 
 
 
 
 17
 18#ifdef CONFIG_ARC_HAS_LLSC
 19
 20static inline void arch_spin_lock(arch_spinlock_t *lock)
 21{
 22	unsigned int val;
 23
 24	smp_mb();
 25
 26	__asm__ __volatile__(
 27	"1:	llock	%[val], [%[slock]]	\n"
 28	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
 29	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
 30	"	bnz	1b			\n"
 31	"					\n"
 32	: [val]		"=&r"	(val)
 33	: [slock]	"r"	(&(lock->slock)),
 34	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
 35	: "memory", "cc");
 36
 37	smp_mb();
 38}
 39
 40/* 1 - lock taken successfully */
 41static inline int arch_spin_trylock(arch_spinlock_t *lock)
 42{
 43	unsigned int val, got_it = 0;
 44
 45	smp_mb();
 46
 47	__asm__ __volatile__(
 48	"1:	llock	%[val], [%[slock]]	\n"
 49	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
 50	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
 51	"	bnz	1b			\n"
 52	"	mov	%[got_it], 1		\n"
 53	"4:					\n"
 54	"					\n"
 55	: [val]		"=&r"	(val),
 56	  [got_it]	"+&r"	(got_it)
 57	: [slock]	"r"	(&(lock->slock)),
 58	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
 59	: "memory", "cc");
 60
 61	smp_mb();
 62
 63	return got_it;
 64}
 65
 66static inline void arch_spin_unlock(arch_spinlock_t *lock)
 67{
 68	smp_mb();
 69
 70	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
 71
 72	smp_mb();
 73}
 74
 75/*
 76 * Read-write spinlocks, allowing multiple readers but only one writer.
 77 * Unfair locking as Writers could be starved indefinitely by Reader(s)
 78 */
 79
 80static inline void arch_read_lock(arch_rwlock_t *rw)
 81{
 82	unsigned int val;
 83
 84	smp_mb();
 85
 86	/*
 87	 * zero means writer holds the lock exclusively, deny Reader.
 88	 * Otherwise grant lock to first/subseq reader
 89	 *
 90	 * 	if (rw->counter > 0) {
 91	 *		rw->counter--;
 92	 *		ret = 1;
 93	 *	}
 94	 */
 95
 96	__asm__ __volatile__(
 97	"1:	llock	%[val], [%[rwlock]]	\n"
 98	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
 99	"	sub	%[val], %[val], 1	\n"	/* reader lock */
100	"	scond	%[val], [%[rwlock]]	\n"
101	"	bnz	1b			\n"
102	"					\n"
103	: [val]		"=&r"	(val)
104	: [rwlock]	"r"	(&(rw->counter)),
105	  [WR_LOCKED]	"ir"	(0)
106	: "memory", "cc");
107
108	smp_mb();
109}
110
111/* 1 - lock taken successfully */
112static inline int arch_read_trylock(arch_rwlock_t *rw)
113{
114	unsigned int val, got_it = 0;
115
116	smp_mb();
117
118	__asm__ __volatile__(
119	"1:	llock	%[val], [%[rwlock]]	\n"
120	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
121	"	sub	%[val], %[val], 1	\n"	/* counter-- */
122	"	scond	%[val], [%[rwlock]]	\n"
123	"	bnz	1b			\n"	/* retry if collided with someone */
124	"	mov	%[got_it], 1		\n"
125	"					\n"
126	"4: ; --- done ---			\n"
127
128	: [val]		"=&r"	(val),
129	  [got_it]	"+&r"	(got_it)
130	: [rwlock]	"r"	(&(rw->counter)),
131	  [WR_LOCKED]	"ir"	(0)
132	: "memory", "cc");
133
134	smp_mb();
135
136	return got_it;
137}
138
139static inline void arch_write_lock(arch_rwlock_t *rw)
140{
141	unsigned int val;
142
143	smp_mb();
144
145	/*
146	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
147	 * deny writer. Otherwise if unlocked grant to writer
148	 * Hence the claim that Linux rwlocks are unfair to writers.
149	 * (can be starved for an indefinite time by readers).
150	 *
151	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
152	 *		rw->counter = 0;
153	 *		ret = 1;
154	 *	}
155	 */
156
157	__asm__ __volatile__(
158	"1:	llock	%[val], [%[rwlock]]	\n"
159	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
160	"	mov	%[val], %[WR_LOCKED]	\n"
161	"	scond	%[val], [%[rwlock]]	\n"
162	"	bnz	1b			\n"
163	"					\n"
164	: [val]		"=&r"	(val)
165	: [rwlock]	"r"	(&(rw->counter)),
166	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
167	  [WR_LOCKED]	"ir"	(0)
168	: "memory", "cc");
169
170	smp_mb();
171}
172
173/* 1 - lock taken successfully */
174static inline int arch_write_trylock(arch_rwlock_t *rw)
175{
176	unsigned int val, got_it = 0;
177
178	smp_mb();
179
180	__asm__ __volatile__(
181	"1:	llock	%[val], [%[rwlock]]	\n"
182	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
183	"	mov	%[val], %[WR_LOCKED]	\n"
184	"	scond	%[val], [%[rwlock]]	\n"
185	"	bnz	1b			\n"	/* retry if collided with someone */
186	"	mov	%[got_it], 1		\n"
187	"					\n"
188	"4: ; --- done ---			\n"
189
190	: [val]		"=&r"	(val),
191	  [got_it]	"+&r"	(got_it)
192	: [rwlock]	"r"	(&(rw->counter)),
193	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
194	  [WR_LOCKED]	"ir"	(0)
195	: "memory", "cc");
196
197	smp_mb();
198
199	return got_it;
200}
201
202static inline void arch_read_unlock(arch_rwlock_t *rw)
203{
204	unsigned int val;
205
206	smp_mb();
207
208	/*
209	 * rw->counter++;
210	 */
211	__asm__ __volatile__(
212	"1:	llock	%[val], [%[rwlock]]	\n"
213	"	add	%[val], %[val], 1	\n"
214	"	scond	%[val], [%[rwlock]]	\n"
215	"	bnz	1b			\n"
216	"					\n"
217	: [val]		"=&r"	(val)
218	: [rwlock]	"r"	(&(rw->counter))
219	: "memory", "cc");
220
221	smp_mb();
222}
223
224static inline void arch_write_unlock(arch_rwlock_t *rw)
225{
226	smp_mb();
227
228	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
229
230	smp_mb();
231}
232
233#else	/* !CONFIG_ARC_HAS_LLSC */
234
235static inline void arch_spin_lock(arch_spinlock_t *lock)
236{
237	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
238
239	/*
240	 * This smp_mb() is technically superfluous, we only need the one
241	 * after the lock for providing the ACQUIRE semantics.
242	 * However doing the "right" thing was regressing hackbench
243	 * so keeping this, pending further investigation
244	 */
245	smp_mb();
246
247	__asm__ __volatile__(
248	"1:	ex  %0, [%1]		\n"
249#ifdef CONFIG_EZNPS_MTM_EXT
250	"	.word %3		\n"
251#endif
252	"	breq  %0, %2, 1b	\n"
253	: "+&r" (val)
254	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
255#ifdef CONFIG_EZNPS_MTM_EXT
256	, "i"(CTOP_INST_SCHD_RW)
257#endif
258	: "memory");
259
260	/*
261	 * ACQUIRE barrier to ensure load/store after taking the lock
262	 * don't "bleed-up" out of the critical section (leak-in is allowed)
263	 * http://www.spinics.net/lists/kernel/msg2010409.html
264	 *
265	 * ARCv2 only has load-load, store-store and all-all barrier
266	 * thus need the full all-all barrier
267	 */
268	smp_mb();
269}
270
271/* 1 - lock taken successfully */
272static inline int arch_spin_trylock(arch_spinlock_t *lock)
273{
274	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
275
276	smp_mb();
277
278	__asm__ __volatile__(
279	"1:	ex  %0, [%1]		\n"
280	: "+r" (val)
281	: "r"(&(lock->slock))
282	: "memory");
283
284	smp_mb();
285
286	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
287}
288
289static inline void arch_spin_unlock(arch_spinlock_t *lock)
290{
291	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
292
293	/*
294	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
295	 * is the only option
296	 */
297	smp_mb();
298
299	/*
300	 * EX is not really required here, a simple STore of 0 suffices.
301	 * However this causes tasklist livelocks in SystemC based SMP virtual
302	 * platforms where the systemc core scheduler uses EX as a cue for
303	 * moving to next core. Do a git log of this file for details
304	 */
305	__asm__ __volatile__(
306	"	ex  %0, [%1]		\n"
307	: "+r" (val)
308	: "r"(&(lock->slock))
309	: "memory");
310
311	/*
312	 * superfluous, but keeping for now - see pairing version in
313	 * arch_spin_lock above
314	 */
315	smp_mb();
316}
317
318/*
319 * Read-write spinlocks, allowing multiple readers but only one writer.
320 * Unfair locking as Writers could be starved indefinitely by Reader(s)
321 *
322 * The spinlock itself is contained in @counter and access to it is
323 * serialized with @lock_mutex.
324 */
325
326/* 1 - lock taken successfully */
327static inline int arch_read_trylock(arch_rwlock_t *rw)
328{
329	int ret = 0;
330	unsigned long flags;
331
332	local_irq_save(flags);
333	arch_spin_lock(&(rw->lock_mutex));
334
335	/*
336	 * zero means writer holds the lock exclusively, deny Reader.
337	 * Otherwise grant lock to first/subseq reader
338	 */
339	if (rw->counter > 0) {
340		rw->counter--;
341		ret = 1;
342	}
343
344	arch_spin_unlock(&(rw->lock_mutex));
345	local_irq_restore(flags);
346
347	smp_mb();
348	return ret;
349}
350
351/* 1 - lock taken successfully */
352static inline int arch_write_trylock(arch_rwlock_t *rw)
353{
354	int ret = 0;
355	unsigned long flags;
356
357	local_irq_save(flags);
358	arch_spin_lock(&(rw->lock_mutex));
359
360	/*
361	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
362	 * deny writer. Otherwise if unlocked grant to writer
363	 * Hence the claim that Linux rwlocks are unfair to writers.
364	 * (can be starved for an indefinite time by readers).
365	 */
366	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
367		rw->counter = 0;
368		ret = 1;
369	}
370	arch_spin_unlock(&(rw->lock_mutex));
371	local_irq_restore(flags);
372
373	return ret;
374}
375
376static inline void arch_read_lock(arch_rwlock_t *rw)
377{
378	while (!arch_read_trylock(rw))
379		cpu_relax();
380}
381
382static inline void arch_write_lock(arch_rwlock_t *rw)
383{
384	while (!arch_write_trylock(rw))
385		cpu_relax();
386}
387
388static inline void arch_read_unlock(arch_rwlock_t *rw)
389{
390	unsigned long flags;
391
392	local_irq_save(flags);
393	arch_spin_lock(&(rw->lock_mutex));
394	rw->counter++;
395	arch_spin_unlock(&(rw->lock_mutex));
396	local_irq_restore(flags);
397}
398
399static inline void arch_write_unlock(arch_rwlock_t *rw)
400{
401	unsigned long flags;
402
403	local_irq_save(flags);
404	arch_spin_lock(&(rw->lock_mutex));
405	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
406	arch_spin_unlock(&(rw->lock_mutex));
407	local_irq_restore(flags);
408}
409
410#endif
 
 
 
 
 
 
 
 
 
 
411
412#endif /* __ASM_SPINLOCK_H */