Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 
 
 
 
  4 */
  5
  6#ifndef __ASM_SPINLOCK_H
  7#define __ASM_SPINLOCK_H
  8
  9#include <asm/spinlock_types.h>
 10#include <asm/processor.h>
 11#include <asm/barrier.h>
 12
 13#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
 
 
 
 14
 15#ifdef CONFIG_ARC_HAS_LLSC
 16
 
 
 
 
 
 17static inline void arch_spin_lock(arch_spinlock_t *lock)
 18{
 19	unsigned int val;
 20
 
 
 21	__asm__ __volatile__(
 22	"1:	llock	%[val], [%[slock]]	\n"
 23	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
 24	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
 25	"	bnz	1b			\n"
 26	"					\n"
 27	: [val]		"=&r"	(val)
 28	: [slock]	"r"	(&(lock->slock)),
 29	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
 30	: "memory", "cc");
 31
 32	/*
 33	 * ACQUIRE barrier to ensure load/store after taking the lock
 34	 * don't "bleed-up" out of the critical section (leak-in is allowed)
 35	 * http://www.spinics.net/lists/kernel/msg2010409.html
 36	 *
 37	 * ARCv2 only has load-load, store-store and all-all barrier
 38	 * thus need the full all-all barrier
 39	 */
 40	smp_mb();
 41}
 42
 43/* 1 - lock taken successfully */
 44static inline int arch_spin_trylock(arch_spinlock_t *lock)
 45{
 46	unsigned int val, got_it = 0;
 47
 
 
 48	__asm__ __volatile__(
 49	"1:	llock	%[val], [%[slock]]	\n"
 50	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
 51	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
 52	"	bnz	1b			\n"
 53	"	mov	%[got_it], 1		\n"
 54	"4:					\n"
 55	"					\n"
 56	: [val]		"=&r"	(val),
 57	  [got_it]	"+&r"	(got_it)
 58	: [slock]	"r"	(&(lock->slock)),
 59	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
 60	: "memory", "cc");
 61
 62	smp_mb();
 63
 64	return got_it;
 65}
 66
 67static inline void arch_spin_unlock(arch_spinlock_t *lock)
 68{
 69	smp_mb();
 70
 71	WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
 
 
 72}
 73
 74/*
 75 * Read-write spinlocks, allowing multiple readers but only one writer.
 76 * Unfair locking as Writers could be starved indefinitely by Reader(s)
 77 */
 78
 79static inline void arch_read_lock(arch_rwlock_t *rw)
 80{
 81	unsigned int val;
 82
 
 
 83	/*
 84	 * zero means writer holds the lock exclusively, deny Reader.
 85	 * Otherwise grant lock to first/subseq reader
 86	 *
 87	 * 	if (rw->counter > 0) {
 88	 *		rw->counter--;
 89	 *		ret = 1;
 90	 *	}
 91	 */
 92
 93	__asm__ __volatile__(
 94	"1:	llock	%[val], [%[rwlock]]	\n"
 95	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
 96	"	sub	%[val], %[val], 1	\n"	/* reader lock */
 97	"	scond	%[val], [%[rwlock]]	\n"
 98	"	bnz	1b			\n"
 99	"					\n"
100	: [val]		"=&r"	(val)
101	: [rwlock]	"r"	(&(rw->counter)),
102	  [WR_LOCKED]	"ir"	(0)
103	: "memory", "cc");
104
105	smp_mb();
106}
107
108/* 1 - lock taken successfully */
109static inline int arch_read_trylock(arch_rwlock_t *rw)
110{
111	unsigned int val, got_it = 0;
112
 
 
113	__asm__ __volatile__(
114	"1:	llock	%[val], [%[rwlock]]	\n"
115	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
116	"	sub	%[val], %[val], 1	\n"	/* counter-- */
117	"	scond	%[val], [%[rwlock]]	\n"
118	"	bnz	1b			\n"	/* retry if collided with someone */
119	"	mov	%[got_it], 1		\n"
120	"					\n"
121	"4: ; --- done ---			\n"
122
123	: [val]		"=&r"	(val),
124	  [got_it]	"+&r"	(got_it)
125	: [rwlock]	"r"	(&(rw->counter)),
126	  [WR_LOCKED]	"ir"	(0)
127	: "memory", "cc");
128
129	smp_mb();
130
131	return got_it;
132}
133
134static inline void arch_write_lock(arch_rwlock_t *rw)
135{
136	unsigned int val;
137
 
 
138	/*
139	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
140	 * deny writer. Otherwise if unlocked grant to writer
141	 * Hence the claim that Linux rwlocks are unfair to writers.
142	 * (can be starved for an indefinite time by readers).
143	 *
144	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
145	 *		rw->counter = 0;
146	 *		ret = 1;
147	 *	}
148	 */
149
150	__asm__ __volatile__(
151	"1:	llock	%[val], [%[rwlock]]	\n"
152	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
153	"	mov	%[val], %[WR_LOCKED]	\n"
154	"	scond	%[val], [%[rwlock]]	\n"
155	"	bnz	1b			\n"
156	"					\n"
157	: [val]		"=&r"	(val)
158	: [rwlock]	"r"	(&(rw->counter)),
159	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
160	  [WR_LOCKED]	"ir"	(0)
161	: "memory", "cc");
162
163	smp_mb();
164}
165
166/* 1 - lock taken successfully */
167static inline int arch_write_trylock(arch_rwlock_t *rw)
168{
169	unsigned int val, got_it = 0;
170
 
 
171	__asm__ __volatile__(
172	"1:	llock	%[val], [%[rwlock]]	\n"
173	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
174	"	mov	%[val], %[WR_LOCKED]	\n"
175	"	scond	%[val], [%[rwlock]]	\n"
176	"	bnz	1b			\n"	/* retry if collided with someone */
177	"	mov	%[got_it], 1		\n"
178	"					\n"
179	"4: ; --- done ---			\n"
180
181	: [val]		"=&r"	(val),
182	  [got_it]	"+&r"	(got_it)
183	: [rwlock]	"r"	(&(rw->counter)),
184	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
185	  [WR_LOCKED]	"ir"	(0)
186	: "memory", "cc");
187
188	smp_mb();
189
190	return got_it;
191}
192
193static inline void arch_read_unlock(arch_rwlock_t *rw)
194{
195	unsigned int val;
196
197	smp_mb();
198
199	/*
200	 * rw->counter++;
201	 */
202	__asm__ __volatile__(
203	"1:	llock	%[val], [%[rwlock]]	\n"
204	"	add	%[val], %[val], 1	\n"
205	"	scond	%[val], [%[rwlock]]	\n"
206	"	bnz	1b			\n"
207	"					\n"
208	: [val]		"=&r"	(val)
209	: [rwlock]	"r"	(&(rw->counter))
210	: "memory", "cc");
 
 
211}
212
213static inline void arch_write_unlock(arch_rwlock_t *rw)
214{
215	smp_mb();
216
217	WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218}
219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220#else	/* !CONFIG_ARC_HAS_LLSC */
221
222static inline void arch_spin_lock(arch_spinlock_t *lock)
223{
224	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
225
226	/*
227	 * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
228	 * for ACQ and REL semantics respectively. However EX based spinlocks
229	 * need the extra smp_mb to workaround a hardware quirk.
 
230	 */
231	smp_mb();
232
233	__asm__ __volatile__(
234	"1:	ex  %0, [%1]		\n"
235	"	breq  %0, %2, 1b	\n"
236	: "+&r" (val)
237	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
238	: "memory");
239
 
 
 
 
 
 
 
 
240	smp_mb();
241}
242
243/* 1 - lock taken successfully */
244static inline int arch_spin_trylock(arch_spinlock_t *lock)
245{
246	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
247
248	smp_mb();
249
250	__asm__ __volatile__(
251	"1:	ex  %0, [%1]		\n"
252	: "+r" (val)
253	: "r"(&(lock->slock))
254	: "memory");
255
256	smp_mb();
257
258	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
259}
260
261static inline void arch_spin_unlock(arch_spinlock_t *lock)
262{
263	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
264
265	/*
266	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
267	 * is the only option
268	 */
269	smp_mb();
270
271	/*
272	 * EX is not really required here, a simple STore of 0 suffices.
273	 * However this causes tasklist livelocks in SystemC based SMP virtual
274	 * platforms where the systemc core scheduler uses EX as a cue for
275	 * moving to next core. Do a git log of this file for details
276	 */
277	__asm__ __volatile__(
278	"	ex  %0, [%1]		\n"
279	: "+r" (val)
280	: "r"(&(lock->slock))
281	: "memory");
282
283	/*
284	 * see pairing version/comment in arch_spin_lock above
 
285	 */
286	smp_mb();
287}
288
289/*
290 * Read-write spinlocks, allowing multiple readers but only one writer.
291 * Unfair locking as Writers could be starved indefinitely by Reader(s)
292 *
293 * The spinlock itself is contained in @counter and access to it is
294 * serialized with @lock_mutex.
295 */
296
297/* 1 - lock taken successfully */
298static inline int arch_read_trylock(arch_rwlock_t *rw)
299{
300	int ret = 0;
301	unsigned long flags;
302
303	local_irq_save(flags);
304	arch_spin_lock(&(rw->lock_mutex));
305
306	/*
307	 * zero means writer holds the lock exclusively, deny Reader.
308	 * Otherwise grant lock to first/subseq reader
309	 */
310	if (rw->counter > 0) {
311		rw->counter--;
312		ret = 1;
313	}
314
315	arch_spin_unlock(&(rw->lock_mutex));
316	local_irq_restore(flags);
317
 
318	return ret;
319}
320
321/* 1 - lock taken successfully */
322static inline int arch_write_trylock(arch_rwlock_t *rw)
323{
324	int ret = 0;
325	unsigned long flags;
326
327	local_irq_save(flags);
328	arch_spin_lock(&(rw->lock_mutex));
329
330	/*
331	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
332	 * deny writer. Otherwise if unlocked grant to writer
333	 * Hence the claim that Linux rwlocks are unfair to writers.
334	 * (can be starved for an indefinite time by readers).
335	 */
336	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
337		rw->counter = 0;
338		ret = 1;
339	}
340	arch_spin_unlock(&(rw->lock_mutex));
341	local_irq_restore(flags);
342
343	return ret;
344}
345
346static inline void arch_read_lock(arch_rwlock_t *rw)
347{
348	while (!arch_read_trylock(rw))
349		cpu_relax();
350}
351
352static inline void arch_write_lock(arch_rwlock_t *rw)
353{
354	while (!arch_write_trylock(rw))
355		cpu_relax();
356}
357
358static inline void arch_read_unlock(arch_rwlock_t *rw)
359{
360	unsigned long flags;
361
362	local_irq_save(flags);
363	arch_spin_lock(&(rw->lock_mutex));
364	rw->counter++;
365	arch_spin_unlock(&(rw->lock_mutex));
366	local_irq_restore(flags);
367}
368
369static inline void arch_write_unlock(arch_rwlock_t *rw)
370{
371	unsigned long flags;
372
373	local_irq_save(flags);
374	arch_spin_lock(&(rw->lock_mutex));
375	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
376	arch_spin_unlock(&(rw->lock_mutex));
377	local_irq_restore(flags);
378}
379
380#endif
 
 
 
 
 
 
 
 
 
 
381
382#endif /* __ASM_SPINLOCK_H */
v4.6
 
  1/*
  2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
  9#ifndef __ASM_SPINLOCK_H
 10#define __ASM_SPINLOCK_H
 11
 12#include <asm/spinlock_types.h>
 13#include <asm/processor.h>
 14#include <asm/barrier.h>
 15
 16#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
 17#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
 18#define arch_spin_unlock_wait(x) \
 19	do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
 20
 21#ifdef CONFIG_ARC_HAS_LLSC
 22
 23/*
 24 * A normal LLOCK/SCOND based system, w/o need for livelock workaround
 25 */
 26#ifndef CONFIG_ARC_STAR_9000923308
 27
 28static inline void arch_spin_lock(arch_spinlock_t *lock)
 29{
 30	unsigned int val;
 31
 32	smp_mb();
 33
 34	__asm__ __volatile__(
 35	"1:	llock	%[val], [%[slock]]	\n"
 36	"	breq	%[val], %[LOCKED], 1b	\n"	/* spin while LOCKED */
 37	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
 38	"	bnz	1b			\n"
 39	"					\n"
 40	: [val]		"=&r"	(val)
 41	: [slock]	"r"	(&(lock->slock)),
 42	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
 43	: "memory", "cc");
 44
 
 
 
 
 
 
 
 
 45	smp_mb();
 46}
 47
 48/* 1 - lock taken successfully */
 49static inline int arch_spin_trylock(arch_spinlock_t *lock)
 50{
 51	unsigned int val, got_it = 0;
 52
 53	smp_mb();
 54
 55	__asm__ __volatile__(
 56	"1:	llock	%[val], [%[slock]]	\n"
 57	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
 58	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
 59	"	bnz	1b			\n"
 60	"	mov	%[got_it], 1		\n"
 61	"4:					\n"
 62	"					\n"
 63	: [val]		"=&r"	(val),
 64	  [got_it]	"+&r"	(got_it)
 65	: [slock]	"r"	(&(lock->slock)),
 66	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
 67	: "memory", "cc");
 68
 69	smp_mb();
 70
 71	return got_it;
 72}
 73
 74static inline void arch_spin_unlock(arch_spinlock_t *lock)
 75{
 76	smp_mb();
 77
 78	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
 79
 80	smp_mb();
 81}
 82
 83/*
 84 * Read-write spinlocks, allowing multiple readers but only one writer.
 85 * Unfair locking as Writers could be starved indefinitely by Reader(s)
 86 */
 87
 88static inline void arch_read_lock(arch_rwlock_t *rw)
 89{
 90	unsigned int val;
 91
 92	smp_mb();
 93
 94	/*
 95	 * zero means writer holds the lock exclusively, deny Reader.
 96	 * Otherwise grant lock to first/subseq reader
 97	 *
 98	 * 	if (rw->counter > 0) {
 99	 *		rw->counter--;
100	 *		ret = 1;
101	 *	}
102	 */
103
104	__asm__ __volatile__(
105	"1:	llock	%[val], [%[rwlock]]	\n"
106	"	brls	%[val], %[WR_LOCKED], 1b\n"	/* <= 0: spin while write locked */
107	"	sub	%[val], %[val], 1	\n"	/* reader lock */
108	"	scond	%[val], [%[rwlock]]	\n"
109	"	bnz	1b			\n"
110	"					\n"
111	: [val]		"=&r"	(val)
112	: [rwlock]	"r"	(&(rw->counter)),
113	  [WR_LOCKED]	"ir"	(0)
114	: "memory", "cc");
115
116	smp_mb();
117}
118
119/* 1 - lock taken successfully */
120static inline int arch_read_trylock(arch_rwlock_t *rw)
121{
122	unsigned int val, got_it = 0;
123
124	smp_mb();
125
126	__asm__ __volatile__(
127	"1:	llock	%[val], [%[rwlock]]	\n"
128	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
129	"	sub	%[val], %[val], 1	\n"	/* counter-- */
130	"	scond	%[val], [%[rwlock]]	\n"
131	"	bnz	1b			\n"	/* retry if collided with someone */
132	"	mov	%[got_it], 1		\n"
133	"					\n"
134	"4: ; --- done ---			\n"
135
136	: [val]		"=&r"	(val),
137	  [got_it]	"+&r"	(got_it)
138	: [rwlock]	"r"	(&(rw->counter)),
139	  [WR_LOCKED]	"ir"	(0)
140	: "memory", "cc");
141
142	smp_mb();
143
144	return got_it;
145}
146
147static inline void arch_write_lock(arch_rwlock_t *rw)
148{
149	unsigned int val;
150
151	smp_mb();
152
153	/*
154	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
155	 * deny writer. Otherwise if unlocked grant to writer
156	 * Hence the claim that Linux rwlocks are unfair to writers.
157	 * (can be starved for an indefinite time by readers).
158	 *
159	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
160	 *		rw->counter = 0;
161	 *		ret = 1;
162	 *	}
163	 */
164
165	__asm__ __volatile__(
166	"1:	llock	%[val], [%[rwlock]]	\n"
167	"	brne	%[val], %[UNLOCKED], 1b	\n"	/* while !UNLOCKED spin */
168	"	mov	%[val], %[WR_LOCKED]	\n"
169	"	scond	%[val], [%[rwlock]]	\n"
170	"	bnz	1b			\n"
171	"					\n"
172	: [val]		"=&r"	(val)
173	: [rwlock]	"r"	(&(rw->counter)),
174	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
175	  [WR_LOCKED]	"ir"	(0)
176	: "memory", "cc");
177
178	smp_mb();
179}
180
181/* 1 - lock taken successfully */
182static inline int arch_write_trylock(arch_rwlock_t *rw)
183{
184	unsigned int val, got_it = 0;
185
186	smp_mb();
187
188	__asm__ __volatile__(
189	"1:	llock	%[val], [%[rwlock]]	\n"
190	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
191	"	mov	%[val], %[WR_LOCKED]	\n"
192	"	scond	%[val], [%[rwlock]]	\n"
193	"	bnz	1b			\n"	/* retry if collided with someone */
194	"	mov	%[got_it], 1		\n"
195	"					\n"
196	"4: ; --- done ---			\n"
197
198	: [val]		"=&r"	(val),
199	  [got_it]	"+&r"	(got_it)
200	: [rwlock]	"r"	(&(rw->counter)),
201	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
202	  [WR_LOCKED]	"ir"	(0)
203	: "memory", "cc");
204
205	smp_mb();
206
207	return got_it;
208}
209
210static inline void arch_read_unlock(arch_rwlock_t *rw)
211{
212	unsigned int val;
213
214	smp_mb();
215
216	/*
217	 * rw->counter++;
218	 */
219	__asm__ __volatile__(
220	"1:	llock	%[val], [%[rwlock]]	\n"
221	"	add	%[val], %[val], 1	\n"
222	"	scond	%[val], [%[rwlock]]	\n"
223	"	bnz	1b			\n"
224	"					\n"
225	: [val]		"=&r"	(val)
226	: [rwlock]	"r"	(&(rw->counter))
227	: "memory", "cc");
228
229	smp_mb();
230}
231
232static inline void arch_write_unlock(arch_rwlock_t *rw)
233{
234	smp_mb();
235
236	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
237
238	smp_mb();
239}
240
241#else	/* CONFIG_ARC_STAR_9000923308 */
242
243/*
244 * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245 * coherency transactions in the SCU. The exclusive line state keeps rotating
246 * among contenting cores leading to a never ending cycle. So break the cycle
247 * by deferring the retry of failed exclusive access (SCOND). The actual delay
248 * needed is function of number of contending cores as well as the unrelated
249 * coherency traffic from other cores. To keep the code simple, start off with
250 * small delay of 1 which would suffice most cases and in case of contention
251 * double the delay. Eventually the delay is sufficient such that the coherency
252 * pipeline is drained, thus a subsequent exclusive access would succeed.
253 */
254
255#define SCOND_FAIL_RETRY_VAR_DEF						\
256	unsigned int delay, tmp;						\
257
258#define SCOND_FAIL_RETRY_ASM							\
259	"   ; --- scond fail delay ---		\n"				\
260	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
261	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
262	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
263	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
264	"	b	1b			\n"	/* start over */	\
265	"					\n"				\
266	"4: ; --- done ---			\n"				\
267
268#define SCOND_FAIL_RETRY_VARS							\
269	  ,[delay] "=&r" (delay), [tmp] "=&r"	(tmp)				\
270
271static inline void arch_spin_lock(arch_spinlock_t *lock)
272{
273	unsigned int val;
274	SCOND_FAIL_RETRY_VAR_DEF;
275
276	smp_mb();
277
278	__asm__ __volatile__(
279	"0:	mov	%[delay], 1		\n"
280	"1:	llock	%[val], [%[slock]]	\n"
281	"	breq	%[val], %[LOCKED], 0b	\n"	/* spin while LOCKED */
282	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
283	"	bz	4f			\n"	/* done */
284	"					\n"
285	SCOND_FAIL_RETRY_ASM
286
287	: [val]		"=&r"	(val)
288	  SCOND_FAIL_RETRY_VARS
289	: [slock]	"r"	(&(lock->slock)),
290	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
291	: "memory", "cc");
292
293	smp_mb();
294}
295
296/* 1 - lock taken successfully */
297static inline int arch_spin_trylock(arch_spinlock_t *lock)
298{
299	unsigned int val, got_it = 0;
300	SCOND_FAIL_RETRY_VAR_DEF;
301
302	smp_mb();
303
304	__asm__ __volatile__(
305	"0:	mov	%[delay], 1		\n"
306	"1:	llock	%[val], [%[slock]]	\n"
307	"	breq	%[val], %[LOCKED], 4f	\n"	/* already LOCKED, just bail */
308	"	scond	%[LOCKED], [%[slock]]	\n"	/* acquire */
309	"	bz.d	4f			\n"
310	"	mov.z	%[got_it], 1		\n"	/* got it */
311	"					\n"
312	SCOND_FAIL_RETRY_ASM
313
314	: [val]		"=&r"	(val),
315	  [got_it]	"+&r"	(got_it)
316	  SCOND_FAIL_RETRY_VARS
317	: [slock]	"r"	(&(lock->slock)),
318	  [LOCKED]	"r"	(__ARCH_SPIN_LOCK_LOCKED__)
319	: "memory", "cc");
320
321	smp_mb();
322
323	return got_it;
324}
325
326static inline void arch_spin_unlock(arch_spinlock_t *lock)
327{
328	smp_mb();
329
330	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
331
332	smp_mb();
333}
334
335/*
336 * Read-write spinlocks, allowing multiple readers but only one writer.
337 * Unfair locking as Writers could be starved indefinitely by Reader(s)
338 */
339
340static inline void arch_read_lock(arch_rwlock_t *rw)
341{
342	unsigned int val;
343	SCOND_FAIL_RETRY_VAR_DEF;
344
345	smp_mb();
346
347	/*
348	 * zero means writer holds the lock exclusively, deny Reader.
349	 * Otherwise grant lock to first/subseq reader
350	 *
351	 * 	if (rw->counter > 0) {
352	 *		rw->counter--;
353	 *		ret = 1;
354	 *	}
355	 */
356
357	__asm__ __volatile__(
358	"0:	mov	%[delay], 1		\n"
359	"1:	llock	%[val], [%[rwlock]]	\n"
360	"	brls	%[val], %[WR_LOCKED], 0b\n"	/* <= 0: spin while write locked */
361	"	sub	%[val], %[val], 1	\n"	/* reader lock */
362	"	scond	%[val], [%[rwlock]]	\n"
363	"	bz	4f			\n"	/* done */
364	"					\n"
365	SCOND_FAIL_RETRY_ASM
366
367	: [val]		"=&r"	(val)
368	  SCOND_FAIL_RETRY_VARS
369	: [rwlock]	"r"	(&(rw->counter)),
370	  [WR_LOCKED]	"ir"	(0)
371	: "memory", "cc");
372
373	smp_mb();
374}
375
376/* 1 - lock taken successfully */
377static inline int arch_read_trylock(arch_rwlock_t *rw)
378{
379	unsigned int val, got_it = 0;
380	SCOND_FAIL_RETRY_VAR_DEF;
381
382	smp_mb();
383
384	__asm__ __volatile__(
385	"0:	mov	%[delay], 1		\n"
386	"1:	llock	%[val], [%[rwlock]]	\n"
387	"	brls	%[val], %[WR_LOCKED], 4f\n"	/* <= 0: already write locked, bail */
388	"	sub	%[val], %[val], 1	\n"	/* counter-- */
389	"	scond	%[val], [%[rwlock]]	\n"
390	"	bz.d	4f			\n"
391	"	mov.z	%[got_it], 1		\n"	/* got it */
392	"					\n"
393	SCOND_FAIL_RETRY_ASM
394
395	: [val]		"=&r"	(val),
396	  [got_it]	"+&r"	(got_it)
397	  SCOND_FAIL_RETRY_VARS
398	: [rwlock]	"r"	(&(rw->counter)),
399	  [WR_LOCKED]	"ir"	(0)
400	: "memory", "cc");
401
402	smp_mb();
403
404	return got_it;
405}
406
407static inline void arch_write_lock(arch_rwlock_t *rw)
408{
409	unsigned int val;
410	SCOND_FAIL_RETRY_VAR_DEF;
411
412	smp_mb();
413
414	/*
415	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
416	 * deny writer. Otherwise if unlocked grant to writer
417	 * Hence the claim that Linux rwlocks are unfair to writers.
418	 * (can be starved for an indefinite time by readers).
419	 *
420	 *	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
421	 *		rw->counter = 0;
422	 *		ret = 1;
423	 *	}
424	 */
425
426	__asm__ __volatile__(
427	"0:	mov	%[delay], 1		\n"
428	"1:	llock	%[val], [%[rwlock]]	\n"
429	"	brne	%[val], %[UNLOCKED], 0b	\n"	/* while !UNLOCKED spin */
430	"	mov	%[val], %[WR_LOCKED]	\n"
431	"	scond	%[val], [%[rwlock]]	\n"
432	"	bz	4f			\n"
433	"					\n"
434	SCOND_FAIL_RETRY_ASM
435
436	: [val]		"=&r"	(val)
437	  SCOND_FAIL_RETRY_VARS
438	: [rwlock]	"r"	(&(rw->counter)),
439	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
440	  [WR_LOCKED]	"ir"	(0)
441	: "memory", "cc");
442
443	smp_mb();
444}
445
446/* 1 - lock taken successfully */
447static inline int arch_write_trylock(arch_rwlock_t *rw)
448{
449	unsigned int val, got_it = 0;
450	SCOND_FAIL_RETRY_VAR_DEF;
451
452	smp_mb();
453
454	__asm__ __volatile__(
455	"0:	mov	%[delay], 1		\n"
456	"1:	llock	%[val], [%[rwlock]]	\n"
457	"	brne	%[val], %[UNLOCKED], 4f	\n"	/* !UNLOCKED, bail */
458	"	mov	%[val], %[WR_LOCKED]	\n"
459	"	scond	%[val], [%[rwlock]]	\n"
460	"	bz.d	4f			\n"
461	"	mov.z	%[got_it], 1		\n"	/* got it */
462	"					\n"
463	SCOND_FAIL_RETRY_ASM
464
465	: [val]		"=&r"	(val),
466	  [got_it]	"+&r"	(got_it)
467	  SCOND_FAIL_RETRY_VARS
468	: [rwlock]	"r"	(&(rw->counter)),
469	  [UNLOCKED]	"ir"	(__ARCH_RW_LOCK_UNLOCKED__),
470	  [WR_LOCKED]	"ir"	(0)
471	: "memory", "cc");
472
473	smp_mb();
474
475	return got_it;
476}
477
478static inline void arch_read_unlock(arch_rwlock_t *rw)
479{
480	unsigned int val;
481
482	smp_mb();
483
484	/*
485	 * rw->counter++;
486	 */
487	__asm__ __volatile__(
488	"1:	llock	%[val], [%[rwlock]]	\n"
489	"	add	%[val], %[val], 1	\n"
490	"	scond	%[val], [%[rwlock]]	\n"
491	"	bnz	1b			\n"
492	"					\n"
493	: [val]		"=&r"	(val)
494	: [rwlock]	"r"	(&(rw->counter))
495	: "memory", "cc");
496
497	smp_mb();
498}
499
500static inline void arch_write_unlock(arch_rwlock_t *rw)
501{
502	unsigned int val;
503
504	smp_mb();
505
506	/*
507	 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
508	 */
509	__asm__ __volatile__(
510	"1:	llock	%[val], [%[rwlock]]	\n"
511	"	scond	%[UNLOCKED], [%[rwlock]]\n"
512	"	bnz	1b			\n"
513	"					\n"
514	: [val]		"=&r"	(val)
515	: [rwlock]	"r"	(&(rw->counter)),
516	  [UNLOCKED]	"r"	(__ARCH_RW_LOCK_UNLOCKED__)
517	: "memory", "cc");
518
519	smp_mb();
520}
521
522#undef SCOND_FAIL_RETRY_VAR_DEF
523#undef SCOND_FAIL_RETRY_ASM
524#undef SCOND_FAIL_RETRY_VARS
525
526#endif	/* CONFIG_ARC_STAR_9000923308 */
527
528#else	/* !CONFIG_ARC_HAS_LLSC */
529
530static inline void arch_spin_lock(arch_spinlock_t *lock)
531{
532	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
533
534	/*
535	 * This smp_mb() is technically superfluous, we only need the one
536	 * after the lock for providing the ACQUIRE semantics.
537	 * However doing the "right" thing was regressing hackbench
538	 * so keeping this, pending further investigation
539	 */
540	smp_mb();
541
542	__asm__ __volatile__(
543	"1:	ex  %0, [%1]		\n"
544	"	breq  %0, %2, 1b	\n"
545	: "+&r" (val)
546	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
547	: "memory");
548
549	/*
550	 * ACQUIRE barrier to ensure load/store after taking the lock
551	 * don't "bleed-up" out of the critical section (leak-in is allowed)
552	 * http://www.spinics.net/lists/kernel/msg2010409.html
553	 *
554	 * ARCv2 only has load-load, store-store and all-all barrier
555	 * thus need the full all-all barrier
556	 */
557	smp_mb();
558}
559
560/* 1 - lock taken successfully */
561static inline int arch_spin_trylock(arch_spinlock_t *lock)
562{
563	unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
564
565	smp_mb();
566
567	__asm__ __volatile__(
568	"1:	ex  %0, [%1]		\n"
569	: "+r" (val)
570	: "r"(&(lock->slock))
571	: "memory");
572
573	smp_mb();
574
575	return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
576}
577
578static inline void arch_spin_unlock(arch_spinlock_t *lock)
579{
580	unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
581
582	/*
583	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
584	 * is the only option
585	 */
586	smp_mb();
587
 
 
 
 
 
 
588	__asm__ __volatile__(
589	"	ex  %0, [%1]		\n"
590	: "+r" (val)
591	: "r"(&(lock->slock))
592	: "memory");
593
594	/*
595	 * superfluous, but keeping for now - see pairing version in
596	 * arch_spin_lock above
597	 */
598	smp_mb();
599}
600
601/*
602 * Read-write spinlocks, allowing multiple readers but only one writer.
603 * Unfair locking as Writers could be starved indefinitely by Reader(s)
604 *
605 * The spinlock itself is contained in @counter and access to it is
606 * serialized with @lock_mutex.
607 */
608
609/* 1 - lock taken successfully */
610static inline int arch_read_trylock(arch_rwlock_t *rw)
611{
612	int ret = 0;
 
613
 
614	arch_spin_lock(&(rw->lock_mutex));
615
616	/*
617	 * zero means writer holds the lock exclusively, deny Reader.
618	 * Otherwise grant lock to first/subseq reader
619	 */
620	if (rw->counter > 0) {
621		rw->counter--;
622		ret = 1;
623	}
624
625	arch_spin_unlock(&(rw->lock_mutex));
 
626
627	smp_mb();
628	return ret;
629}
630
631/* 1 - lock taken successfully */
632static inline int arch_write_trylock(arch_rwlock_t *rw)
633{
634	int ret = 0;
 
635
 
636	arch_spin_lock(&(rw->lock_mutex));
637
638	/*
639	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
640	 * deny writer. Otherwise if unlocked grant to writer
641	 * Hence the claim that Linux rwlocks are unfair to writers.
642	 * (can be starved for an indefinite time by readers).
643	 */
644	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
645		rw->counter = 0;
646		ret = 1;
647	}
648	arch_spin_unlock(&(rw->lock_mutex));
 
649
650	return ret;
651}
652
653static inline void arch_read_lock(arch_rwlock_t *rw)
654{
655	while (!arch_read_trylock(rw))
656		cpu_relax();
657}
658
659static inline void arch_write_lock(arch_rwlock_t *rw)
660{
661	while (!arch_write_trylock(rw))
662		cpu_relax();
663}
664
665static inline void arch_read_unlock(arch_rwlock_t *rw)
666{
 
 
 
667	arch_spin_lock(&(rw->lock_mutex));
668	rw->counter++;
669	arch_spin_unlock(&(rw->lock_mutex));
 
670}
671
672static inline void arch_write_unlock(arch_rwlock_t *rw)
673{
 
 
 
674	arch_spin_lock(&(rw->lock_mutex));
675	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
676	arch_spin_unlock(&(rw->lock_mutex));
 
677}
678
679#endif
680
681#define arch_read_can_lock(x)	((x)->counter > 0)
682#define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
683
684#define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
685#define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
686
687#define arch_spin_relax(lock)	cpu_relax()
688#define arch_read_relax(lock)	cpu_relax()
689#define arch_write_relax(lock)	cpu_relax()
690
691#endif /* __ASM_SPINLOCK_H */