Linux Audio

Check our new training course

Loading...
v6.9.4
 1/* SPDX-License-Identifier: GPL-2.0-or-later */
 2#ifndef __ASM_SPINLOCK_H
 3#define __ASM_SPINLOCK_H
 4#ifdef __KERNEL__
 5
 6#ifdef CONFIG_PPC_QUEUED_SPINLOCKS
 7#include <asm/qspinlock.h>
 8#include <asm/qrwlock.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 9#else
10#include <asm/simple_spinlock.h>
11#endif
12
13/* See include/linux/spinlock.h */
14#define smp_mb__after_spinlock()	smp_mb()
 
 
 
 
 
 
 
 
 
 
15
16#ifndef CONFIG_PPC_QUEUED_SPINLOCKS
17static inline void pv_spinlocks_init(void) { }
 
 
 
 
18#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
20#endif /* __KERNEL__ */
21#endif /* __ASM_SPINLOCK_H */
v4.10.11
 
  1#ifndef __ASM_SPINLOCK_H
  2#define __ASM_SPINLOCK_H
  3#ifdef __KERNEL__
  4
  5/*
  6 * Simple spin lock operations.  
  7 *
  8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
  9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
 11 *	Rework to support virtual processors
 12 *
 13 * Type of int is used as a full 64b word is not necessary.
 14 *
 15 * This program is free software; you can redistribute it and/or
 16 * modify it under the terms of the GNU General Public License
 17 * as published by the Free Software Foundation; either version
 18 * 2 of the License, or (at your option) any later version.
 19 *
 20 * (the type definitions are in asm/spinlock_types.h)
 21 */
 22#include <linux/irqflags.h>
 23#ifdef CONFIG_PPC64
 24#include <asm/paca.h>
 25#include <asm/hvcall.h>
 26#endif
 27#include <asm/asm-compat.h>
 28#include <asm/synch.h>
 29#include <asm/ppc-opcode.h>
 30
 31#ifdef CONFIG_PPC64
 32/* use 0x800000yy when locked, where yy == CPU number */
 33#ifdef __BIG_ENDIAN__
 34#define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
 35#else
 36#define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
 37#endif
 38#else
 39#define LOCK_TOKEN	1
 40#endif
 41
 42#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
 43#define CLEAR_IO_SYNC	(get_paca()->io_sync = 0)
 44#define SYNC_IO		do {						\
 45				if (unlikely(get_paca()->io_sync)) {	\
 46					mb();				\
 47					get_paca()->io_sync = 0;	\
 48				}					\
 49			} while (0)
 50#else
 51#define CLEAR_IO_SYNC
 52#define SYNC_IO
 53#endif
 54
 55#ifdef CONFIG_PPC_PSERIES
 56#define vcpu_is_preempted vcpu_is_preempted
 57static inline bool vcpu_is_preempted(int cpu)
 58{
 59	return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
 60}
 61#endif
 62
 63static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 64{
 65	return lock.slock == 0;
 66}
 67
 68static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 69{
 70	smp_mb();
 71	return !arch_spin_value_unlocked(*lock);
 72}
 73
 74/*
 75 * This returns the old value in the lock, so we succeeded
 76 * in getting the lock if the return value is 0.
 77 */
 78static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
 79{
 80	unsigned long tmp, token;
 81
 82	token = LOCK_TOKEN;
 83	__asm__ __volatile__(
 84"1:	" PPC_LWARX(%0,0,%2,1) "\n\
 85	cmpwi		0,%0,0\n\
 86	bne-		2f\n\
 87	stwcx.		%1,0,%2\n\
 88	bne-		1b\n"
 89	PPC_ACQUIRE_BARRIER
 90"2:"
 91	: "=&r" (tmp)
 92	: "r" (token), "r" (&lock->slock)
 93	: "cr0", "memory");
 94
 95	return tmp;
 96}
 97
 98static inline int arch_spin_trylock(arch_spinlock_t *lock)
 99{
100	CLEAR_IO_SYNC;
101	return __arch_spin_trylock(lock) == 0;
102}
103
104/*
105 * On a system with shared processors (that is, where a physical
106 * processor is multiplexed between several virtual processors),
107 * there is no point spinning on a lock if the holder of the lock
108 * isn't currently scheduled on a physical processor.  Instead
109 * we detect this situation and ask the hypervisor to give the
110 * rest of our timeslice to the lock holder.
111 *
112 * So that we can tell which virtual processor is holding a lock,
113 * we put 0x80000000 | smp_processor_id() in the lock when it is
114 * held.  Conveniently, we have a word in the paca that holds this
115 * value.
116 */
117
118#if defined(CONFIG_PPC_SPLPAR)
119/* We only yield to the hypervisor if we are in shared processor mode */
120#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
121extern void __spin_yield(arch_spinlock_t *lock);
122extern void __rw_yield(arch_rwlock_t *lock);
123#else /* SPLPAR */
124#define __spin_yield(x)	barrier()
125#define __rw_yield(x)	barrier()
126#define SHARED_PROCESSOR	0
127#endif
128
129static inline void arch_spin_lock(arch_spinlock_t *lock)
130{
131	CLEAR_IO_SYNC;
132	while (1) {
133		if (likely(__arch_spin_trylock(lock) == 0))
134			break;
135		do {
136			HMT_low();
137			if (SHARED_PROCESSOR)
138				__spin_yield(lock);
139		} while (unlikely(lock->slock != 0));
140		HMT_medium();
141	}
142}
143
144static inline
145void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
146{
147	unsigned long flags_dis;
148
149	CLEAR_IO_SYNC;
150	while (1) {
151		if (likely(__arch_spin_trylock(lock) == 0))
152			break;
153		local_save_flags(flags_dis);
154		local_irq_restore(flags);
155		do {
156			HMT_low();
157			if (SHARED_PROCESSOR)
158				__spin_yield(lock);
159		} while (unlikely(lock->slock != 0));
160		HMT_medium();
161		local_irq_restore(flags_dis);
162	}
163}
164
165static inline void arch_spin_unlock(arch_spinlock_t *lock)
166{
167	SYNC_IO;
168	__asm__ __volatile__("# arch_spin_unlock\n\t"
169				PPC_RELEASE_BARRIER: : :"memory");
170	lock->slock = 0;
171}
172
173static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
174{
175	arch_spinlock_t lock_val;
176
177	smp_mb();
178
179	/*
180	 * Atomically load and store back the lock value (unchanged). This
181	 * ensures that our observation of the lock value is ordered with
182	 * respect to other lock operations.
183	 */
184	__asm__ __volatile__(
185"1:	" PPC_LWARX(%0, 0, %2, 0) "\n"
186"	stwcx. %0, 0, %2\n"
187"	bne- 1b\n"
188	: "=&r" (lock_val), "+m" (*lock)
189	: "r" (lock)
190	: "cr0", "xer");
191
192	if (arch_spin_value_unlocked(lock_val))
193		goto out;
194
195	while (lock->slock) {
196		HMT_low();
197		if (SHARED_PROCESSOR)
198			__spin_yield(lock);
199	}
200	HMT_medium();
201
202out:
203	smp_mb();
204}
205
206/*
207 * Read-write spinlocks, allowing multiple readers
208 * but only one writer.
209 *
210 * NOTE! it is quite common to have readers in interrupts
211 * but no interrupt writers. For those circumstances we
212 * can "mix" irq-safe locks - any writer needs to get a
213 * irq-safe write-lock, but readers can get non-irqsafe
214 * read-locks.
215 */
216
217#define arch_read_can_lock(rw)		((rw)->lock >= 0)
218#define arch_write_can_lock(rw)	(!(rw)->lock)
219
220#ifdef CONFIG_PPC64
221#define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
222#define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
223#else
224#define __DO_SIGN_EXTEND
225#define WRLOCK_TOKEN		(-1)
226#endif
227
228/*
229 * This returns the old value in the lock + 1,
230 * so we got a read lock if the return value is > 0.
231 */
232static inline long __arch_read_trylock(arch_rwlock_t *rw)
233{
234	long tmp;
235
236	__asm__ __volatile__(
237"1:	" PPC_LWARX(%0,0,%1,1) "\n"
238	__DO_SIGN_EXTEND
239"	addic.		%0,%0,1\n\
240	ble-		2f\n"
241	PPC405_ERR77(0,%1)
242"	stwcx.		%0,0,%1\n\
243	bne-		1b\n"
244	PPC_ACQUIRE_BARRIER
245"2:"	: "=&r" (tmp)
246	: "r" (&rw->lock)
247	: "cr0", "xer", "memory");
248
249	return tmp;
250}
251
252/*
253 * This returns the old value in the lock,
254 * so we got the write lock if the return value is 0.
255 */
256static inline long __arch_write_trylock(arch_rwlock_t *rw)
257{
258	long tmp, token;
259
260	token = WRLOCK_TOKEN;
261	__asm__ __volatile__(
262"1:	" PPC_LWARX(%0,0,%2,1) "\n\
263	cmpwi		0,%0,0\n\
264	bne-		2f\n"
265	PPC405_ERR77(0,%1)
266"	stwcx.		%1,0,%2\n\
267	bne-		1b\n"
268	PPC_ACQUIRE_BARRIER
269"2:"	: "=&r" (tmp)
270	: "r" (token), "r" (&rw->lock)
271	: "cr0", "memory");
272
273	return tmp;
274}
275
276static inline void arch_read_lock(arch_rwlock_t *rw)
277{
278	while (1) {
279		if (likely(__arch_read_trylock(rw) > 0))
280			break;
281		do {
282			HMT_low();
283			if (SHARED_PROCESSOR)
284				__rw_yield(rw);
285		} while (unlikely(rw->lock < 0));
286		HMT_medium();
287	}
288}
289
290static inline void arch_write_lock(arch_rwlock_t *rw)
291{
292	while (1) {
293		if (likely(__arch_write_trylock(rw) == 0))
294			break;
295		do {
296			HMT_low();
297			if (SHARED_PROCESSOR)
298				__rw_yield(rw);
299		} while (unlikely(rw->lock != 0));
300		HMT_medium();
301	}
302}
303
304static inline int arch_read_trylock(arch_rwlock_t *rw)
305{
306	return __arch_read_trylock(rw) > 0;
307}
308
309static inline int arch_write_trylock(arch_rwlock_t *rw)
310{
311	return __arch_write_trylock(rw) == 0;
312}
313
314static inline void arch_read_unlock(arch_rwlock_t *rw)
315{
316	long tmp;
317
318	__asm__ __volatile__(
319	"# read_unlock\n\t"
320	PPC_RELEASE_BARRIER
321"1:	lwarx		%0,0,%1\n\
322	addic		%0,%0,-1\n"
323	PPC405_ERR77(0,%1)
324"	stwcx.		%0,0,%1\n\
325	bne-		1b"
326	: "=&r"(tmp)
327	: "r"(&rw->lock)
328	: "cr0", "xer", "memory");
329}
330
331static inline void arch_write_unlock(arch_rwlock_t *rw)
332{
333	__asm__ __volatile__("# write_unlock\n\t"
334				PPC_RELEASE_BARRIER: : :"memory");
335	rw->lock = 0;
336}
337
338#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
339#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
340
341#define arch_spin_relax(lock)	__spin_yield(lock)
342#define arch_read_relax(lock)	__rw_yield(lock)
343#define arch_write_relax(lock)	__rw_yield(lock)
344
345#endif /* __KERNEL__ */
346#endif /* __ASM_SPINLOCK_H */