Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Out of line spinlock code.
  4 *
  5 *    Copyright IBM Corp. 2004, 2006
  6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7 */
  8
  9#include <linux/types.h>
 10#include <linux/export.h>
 11#include <linux/spinlock.h>
 12#include <linux/jiffies.h>
 13#include <linux/init.h>
 14#include <linux/smp.h>
 15#include <linux/percpu.h>
 16#include <linux/io.h>
 17#include <asm/alternative.h>
 18#include <asm/asm.h>
 19
 20int spin_retry = -1;
 21
 22static int __init spin_retry_init(void)
 23{
 24	if (spin_retry < 0)
 25		spin_retry = 1000;
 26	return 0;
 27}
 28early_initcall(spin_retry_init);
 29
 30/*
 31 * spin_retry= parameter
 32 */
 33static int __init spin_retry_setup(char *str)
 34{
 35	spin_retry = simple_strtoul(str, &str, 0);
 36	return 1;
 37}
 38__setup("spin_retry=", spin_retry_setup);
 39
 40struct spin_wait {
 41	struct spin_wait *next, *prev;
 42	int node_id;
 43} __aligned(32);
 44
 45static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
 46
 47#define _Q_LOCK_CPU_OFFSET	0
 48#define _Q_LOCK_STEAL_OFFSET	16
 49#define _Q_TAIL_IDX_OFFSET	18
 50#define _Q_TAIL_CPU_OFFSET	20
 51
 52#define _Q_LOCK_CPU_MASK	0x0000ffff
 53#define _Q_LOCK_STEAL_ADD	0x00010000
 54#define _Q_LOCK_STEAL_MASK	0x00030000
 55#define _Q_TAIL_IDX_MASK	0x000c0000
 56#define _Q_TAIL_CPU_MASK	0xfff00000
 57
 58#define _Q_LOCK_MASK		(_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
 59#define _Q_TAIL_MASK		(_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
 60
 61void arch_spin_lock_setup(int cpu)
 62{
 63	struct spin_wait *node;
 64	int ix;
 65
 66	node = per_cpu_ptr(&spin_wait[0], cpu);
 67	for (ix = 0; ix < 4; ix++, node++) {
 68		memset(node, 0, sizeof(*node));
 69		node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
 70			(ix << _Q_TAIL_IDX_OFFSET);
 71	}
 72}
 73
 74static inline int arch_load_niai4(int *lock)
 75{
 76	int owner;
 77
 78	asm_inline volatile(
 79		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */
 80		"	l	%[owner],%[lock]\n"
 81		: [owner] "=d" (owner) : [lock] "R" (*lock) : "memory");
 82	return owner;
 83}
 84
 85#ifdef __HAVE_ASM_FLAG_OUTPUTS__
 86
 87static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
 88{
 89	int cc;
 90
 91	asm_inline volatile(
 92		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
 93		"	cs	%[old],%[new],%[lock]\n"
 94		: [old] "+d" (old), [lock] "+Q" (*lock), "=@cc" (cc)
 95		: [new] "d" (new)
 96		: "memory");
 97	return cc == 0;
 98}
 99
100#else /* __HAVE_ASM_FLAG_OUTPUTS__ */
101
102static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
103{
104	int expected = old;
105
106	asm_inline volatile(
107		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
108		"	cs	%[old],%[new],%[lock]\n"
109		: [old] "+d" (old), [lock] "+Q" (*lock)
110		: [new] "d" (new)
111		: "cc", "memory");
112	return expected == old;
113}
114
115#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
116
117static inline struct spin_wait *arch_spin_decode_tail(int lock)
118{
119	int ix, cpu;
120
121	ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
122	cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
123	return per_cpu_ptr(&spin_wait[ix], cpu - 1);
124}
125
126static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
127{
128	if (lock & _Q_LOCK_CPU_MASK)
129		return lock & _Q_LOCK_CPU_MASK;
130	if (node == NULL || node->prev == NULL)
131		return 0;	/* 0 -> no target cpu */
132	while (node->prev)
133		node = node->prev;
134	return node->node_id >> _Q_TAIL_CPU_OFFSET;
135}
136
137static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
138{
139	struct spin_wait *node, *next;
140	int lockval, ix, node_id, tail_id, old, new, owner, count;
141
142	ix = get_lowcore()->spinlock_index++;
143	barrier();
144	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
145	node = this_cpu_ptr(&spin_wait[ix]);
146	node->prev = node->next = NULL;
147	node_id = node->node_id;
148
149	/* Enqueue the node for this CPU in the spinlock wait queue */
150	old = READ_ONCE(lp->lock);
151	while (1) {
152		if ((old & _Q_LOCK_CPU_MASK) == 0 &&
153		    (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
154			/*
155			 * The lock is free but there may be waiters.
156			 * With no waiters simply take the lock, if there
157			 * are waiters try to steal the lock. The lock may
158			 * be stolen three times before the next queued
159			 * waiter will get the lock.
160			 */
161			new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
162			if (arch_try_cmpxchg(&lp->lock, &old, new))
163				/* Got the lock */
164				goto out;
165			/* lock passing in progress */
166			continue;
167		}
168		/* Make the node of this CPU the new tail. */
169		new = node_id | (old & _Q_LOCK_MASK);
170		if (arch_try_cmpxchg(&lp->lock, &old, new))
171			break;
172	}
173	/* Set the 'next' pointer of the tail node in the queue */
174	tail_id = old & _Q_TAIL_MASK;
175	if (tail_id != 0) {
176		node->prev = arch_spin_decode_tail(tail_id);
177		WRITE_ONCE(node->prev->next, node);
178	}
179
180	/* Pass the virtual CPU to the lock holder if it is not running */
181	owner = arch_spin_yield_target(old, node);
182	if (owner && arch_vcpu_is_preempted(owner - 1))
183		smp_yield_cpu(owner - 1);
184
185	/* Spin on the CPU local node->prev pointer */
186	if (tail_id != 0) {
187		count = spin_retry;
188		while (READ_ONCE(node->prev) != NULL) {
189			if (count-- >= 0)
190				continue;
191			count = spin_retry;
192			/* Query running state of lock holder again. */
193			owner = arch_spin_yield_target(old, node);
194			if (owner && arch_vcpu_is_preempted(owner - 1))
195				smp_yield_cpu(owner - 1);
196		}
197	}
198
199	/* Spin on the lock value in the spinlock_t */
200	count = spin_retry;
201	while (1) {
202		old = READ_ONCE(lp->lock);
203		owner = old & _Q_LOCK_CPU_MASK;
204		if (!owner) {
205			tail_id = old & _Q_TAIL_MASK;
206			new = ((tail_id != node_id) ? tail_id : 0) | lockval;
207			if (arch_try_cmpxchg(&lp->lock, &old, new))
208				/* Got the lock */
209				break;
210			continue;
211		}
212		if (count-- >= 0)
 
 
 
213			continue;
 
 
214		count = spin_retry;
215		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
216			smp_yield_cpu(owner - 1);
217	}
218
219	/* Pass lock_spin job to next CPU in the queue */
220	if (node_id && tail_id != node_id) {
221		/* Wait until the next CPU has set up the 'next' pointer */
222		while ((next = READ_ONCE(node->next)) == NULL)
223			;
224		next->prev = NULL;
 
 
 
 
 
 
225	}
226
227 out:
228	get_lowcore()->spinlock_index--;
229}
 
230
231static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
232{
233	int lockval, old, new, owner, count;
234
235	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
236
237	/* Pass the virtual CPU to the lock holder if it is not running */
238	owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
239	if (owner && arch_vcpu_is_preempted(owner - 1))
240		smp_yield_cpu(owner - 1);
241
242	count = spin_retry;
243	while (1) {
244		old = arch_load_niai4(&lp->lock);
245		owner = old & _Q_LOCK_CPU_MASK;
246		/* Try to get the lock if it is free. */
247		if (!owner) {
248			new = (old & _Q_TAIL_MASK) | lockval;
249			if (arch_try_cmpxchg_niai8(&lp->lock, old, new)) {
250				/* Got the lock */
251				return;
252			}
253			continue;
254		}
255		if (count-- >= 0)
 
 
 
256			continue;
 
 
257		count = spin_retry;
258		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
259			smp_yield_cpu(owner - 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260	}
261}
262
263void arch_spin_lock_wait(arch_spinlock_t *lp)
264{
265	if (test_cpu_flag(CIF_DEDICATED_CPU))
266		arch_spin_lock_queued(lp);
267	else
268		arch_spin_lock_classic(lp);
269}
270EXPORT_SYMBOL(arch_spin_lock_wait);
271
272int arch_spin_trylock_retry(arch_spinlock_t *lp)
273{
274	int cpu = SPINLOCK_LOCKVAL;
275	int owner, count;
 
276
277	for (count = spin_retry; count > 0; count--) {
278		owner = READ_ONCE(lp->lock);
279		/* Try to get the lock if it is free. */
280		if (!owner) {
281			if (arch_try_cmpxchg(&lp->lock, &owner, cpu))
282				return 1;
283		}
 
284	}
285	return 0;
286}
287EXPORT_SYMBOL(arch_spin_trylock_retry);
288
289void arch_read_lock_wait(arch_rwlock_t *rw)
290{
291	if (unlikely(in_interrupt())) {
292		while (READ_ONCE(rw->cnts) & 0x10000)
293			barrier();
294		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295	}
 
 
296
297	/* Remove this reader again to allow recursive read locking */
298	__atomic_add_const(-1, &rw->cnts);
299	/* Put the reader into the wait queue */
300	arch_spin_lock(&rw->wait);
301	/* Now add this reader to the count value again */
302	__atomic_add_const(1, &rw->cnts);
303	/* Loop until the writer is done */
304	while (READ_ONCE(rw->cnts) & 0x10000)
305		barrier();
306	arch_spin_unlock(&rw->wait);
307}
308EXPORT_SYMBOL(arch_read_lock_wait);
309
310void arch_write_lock_wait(arch_rwlock_t *rw)
311{
312	int old;
 
313
314	/* Add this CPU to the write waiters */
315	__atomic_add(0x20000, &rw->cnts);
 
 
 
 
 
 
 
 
 
 
 
316
317	/* Put the writer into the wait queue */
318	arch_spin_lock(&rw->wait);
319
 
 
 
 
 
 
320	while (1) {
321		old = READ_ONCE(rw->cnts);
322		if ((old & 0x1ffff) == 0 &&
323		    arch_try_cmpxchg(&rw->cnts, &old, old | 0x10000))
324			/* Got the lock */
 
 
 
 
 
 
 
 
 
325			break;
326		barrier();
 
327	}
 
 
328
329	arch_spin_unlock(&rw->wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330}
331EXPORT_SYMBOL(arch_write_lock_wait);
332
333void arch_spin_relax(arch_spinlock_t *lp)
 
 
334{
335	int cpu;
 
336
337	cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338	if (!cpu)
339		return;
340	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
341		return;
342	smp_yield_cpu(cpu - 1);
343}
344EXPORT_SYMBOL(arch_spin_relax);
v4.6
 
  1/*
  2 *    Out of line spinlock code.
  3 *
  4 *    Copyright IBM Corp. 2004, 2006
  5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6 */
  7
  8#include <linux/types.h>
  9#include <linux/module.h>
 10#include <linux/spinlock.h>
 
 11#include <linux/init.h>
 12#include <linux/smp.h>
 13#include <asm/io.h>
 
 
 
 14
 15int spin_retry = -1;
 16
 17static int __init spin_retry_init(void)
 18{
 19	if (spin_retry < 0)
 20		spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
 21	return 0;
 22}
 23early_initcall(spin_retry_init);
 24
 25/**
 26 * spin_retry= parameter
 27 */
 28static int __init spin_retry_setup(char *str)
 29{
 30	spin_retry = simple_strtoul(str, &str, 0);
 31	return 1;
 32}
 33__setup("spin_retry=", spin_retry_setup);
 34
 35static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36{
 37	asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
 
 
 
 
 
 
 
 
 38}
 39
 40static inline int cpu_is_preempted(int cpu)
 
 
 41{
 42	if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
 43		return 0;
 44	if (smp_vcpu_scheduled(cpu))
 45		return 0;
 46	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47}
 48
 49void arch_spin_lock_wait(arch_spinlock_t *lp)
 50{
 51	unsigned int cpu = SPINLOCK_LOCKVAL;
 52	unsigned int owner;
 53	int count, first_diag;
 
 
 
 
 
 
 54
 55	first_diag = 1;
 
 56	while (1) {
 57		owner = ACCESS_ONCE(lp->lock);
 58		/* Try to get the lock if it is free. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59		if (!owner) {
 60			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
 61				return;
 
 
 
 62			continue;
 63		}
 64		/* First iteration: check if the lock owner is running. */
 65		if (first_diag && cpu_is_preempted(~owner)) {
 66			smp_yield_cpu(~owner);
 67			first_diag = 0;
 68			continue;
 69		}
 70		/* Loop for a while on the lock value. */
 71		count = spin_retry;
 72		do {
 73			if (MACHINE_HAS_CAD)
 74				_raw_compare_and_delay(&lp->lock, owner);
 75			owner = ACCESS_ONCE(lp->lock);
 76		} while (owner && count-- > 0);
 77		if (!owner)
 78			continue;
 79		/*
 80		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
 81		 * yield the CPU unconditionally. For LPAR rely on the
 82		 * sense running status.
 83		 */
 84		if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
 85			smp_yield_cpu(~owner);
 86			first_diag = 0;
 87		}
 88	}
 
 
 
 89}
 90EXPORT_SYMBOL(arch_spin_lock_wait);
 91
 92void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 93{
 94	unsigned int cpu = SPINLOCK_LOCKVAL;
 95	unsigned int owner;
 96	int count, first_diag;
 97
 98	local_irq_restore(flags);
 99	first_diag = 1;
 
 
 
 
100	while (1) {
101		owner = ACCESS_ONCE(lp->lock);
 
102		/* Try to get the lock if it is free. */
103		if (!owner) {
104			local_irq_disable();
105			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
 
106				return;
107			local_irq_restore(flags);
108			continue;
109		}
110		/* Check if the lock owner is running. */
111		if (first_diag && cpu_is_preempted(~owner)) {
112			smp_yield_cpu(~owner);
113			first_diag = 0;
114			continue;
115		}
116		/* Loop for a while on the lock value. */
117		count = spin_retry;
118		do {
119			if (MACHINE_HAS_CAD)
120				_raw_compare_and_delay(&lp->lock, owner);
121			owner = ACCESS_ONCE(lp->lock);
122		} while (owner && count-- > 0);
123		if (!owner)
124			continue;
125		/*
126		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
127		 * yield the CPU unconditionally. For LPAR rely on the
128		 * sense running status.
129		 */
130		if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
131			smp_yield_cpu(~owner);
132			first_diag = 0;
133		}
134	}
135}
136EXPORT_SYMBOL(arch_spin_lock_wait_flags);
 
 
 
 
 
 
 
 
137
138int arch_spin_trylock_retry(arch_spinlock_t *lp)
139{
140	unsigned int cpu = SPINLOCK_LOCKVAL;
141	unsigned int owner;
142	int count;
143
144	for (count = spin_retry; count > 0; count--) {
145		owner = ACCESS_ONCE(lp->lock);
146		/* Try to get the lock if it is free. */
147		if (!owner) {
148			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
149				return 1;
150		} else if (MACHINE_HAS_CAD)
151			_raw_compare_and_delay(&lp->lock, owner);
152	}
153	return 0;
154}
155EXPORT_SYMBOL(arch_spin_trylock_retry);
156
157void _raw_read_lock_wait(arch_rwlock_t *rw)
158{
159	unsigned int owner, old;
160	int count = spin_retry;
161
162#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
163	__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
164#endif
165	owner = 0;
166	while (1) {
167		if (count-- <= 0) {
168			if (owner && cpu_is_preempted(~owner))
169				smp_yield_cpu(~owner);
170			count = spin_retry;
171		}
172		old = ACCESS_ONCE(rw->lock);
173		owner = ACCESS_ONCE(rw->owner);
174		if ((int) old < 0) {
175			if (MACHINE_HAS_CAD)
176				_raw_compare_and_delay(&rw->lock, old);
177			continue;
178		}
179		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
180			return;
181	}
182}
183EXPORT_SYMBOL(_raw_read_lock_wait);
184
185int _raw_read_trylock_retry(arch_rwlock_t *rw)
 
 
 
 
 
 
 
 
 
 
 
 
 
186{
187	unsigned int old;
188	int count = spin_retry;
189
190	while (count-- > 0) {
191		old = ACCESS_ONCE(rw->lock);
192		if ((int) old < 0) {
193			if (MACHINE_HAS_CAD)
194				_raw_compare_and_delay(&rw->lock, old);
195			continue;
196		}
197		if (_raw_compare_and_swap(&rw->lock, old, old + 1))
198			return 1;
199	}
200	return 0;
201}
202EXPORT_SYMBOL(_raw_read_trylock_retry);
203
204#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 
205
206void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
207{
208	unsigned int owner, old;
209	int count = spin_retry;
210
211	owner = 0;
212	while (1) {
213		if (count-- <= 0) {
214			if (owner && cpu_is_preempted(~owner))
215				smp_yield_cpu(~owner);
216			count = spin_retry;
217		}
218		old = ACCESS_ONCE(rw->lock);
219		owner = ACCESS_ONCE(rw->owner);
220		smp_mb();
221		if ((int) old >= 0) {
222			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
223			old = prev;
224		}
225		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
226			break;
227		if (MACHINE_HAS_CAD)
228			_raw_compare_and_delay(&rw->lock, old);
229	}
230}
231EXPORT_SYMBOL(_raw_write_lock_wait);
232
233#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
234
235void _raw_write_lock_wait(arch_rwlock_t *rw)
236{
237	unsigned int owner, old, prev;
238	int count = spin_retry;
239
240	prev = 0x80000000;
241	owner = 0;
242	while (1) {
243		if (count-- <= 0) {
244			if (owner && cpu_is_preempted(~owner))
245				smp_yield_cpu(~owner);
246			count = spin_retry;
247		}
248		old = ACCESS_ONCE(rw->lock);
249		owner = ACCESS_ONCE(rw->owner);
250		if ((int) old >= 0 &&
251		    _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
252			prev = old;
253		else
254			smp_mb();
255		if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
256			break;
257		if (MACHINE_HAS_CAD)
258			_raw_compare_and_delay(&rw->lock, old);
259	}
260}
261EXPORT_SYMBOL(_raw_write_lock_wait);
262
263#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
264
265int _raw_write_trylock_retry(arch_rwlock_t *rw)
266{
267	unsigned int old;
268	int count = spin_retry;
269
270	while (count-- > 0) {
271		old = ACCESS_ONCE(rw->lock);
272		if (old) {
273			if (MACHINE_HAS_CAD)
274				_raw_compare_and_delay(&rw->lock, old);
275			continue;
276		}
277		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
278			return 1;
279	}
280	return 0;
281}
282EXPORT_SYMBOL(_raw_write_trylock_retry);
283
284void arch_lock_relax(unsigned int cpu)
285{
286	if (!cpu)
287		return;
288	if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
289		return;
290	smp_yield_cpu(~cpu);
291}
292EXPORT_SYMBOL(arch_lock_relax);