Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Queued spinlock
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 15 * (C) Copyright 2013-2014 Red Hat, Inc.
 16 * (C) Copyright 2015 Intel Corp.
 17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
 18 *
 19 * Authors: Waiman Long <waiman.long@hpe.com>
 20 *          Peter Zijlstra <peterz@infradead.org>
 21 */
 22
 23#ifndef _GEN_PV_LOCK_SLOWPATH
 24
 25#include <linux/smp.h>
 26#include <linux/bug.h>
 27#include <linux/cpumask.h>
 28#include <linux/percpu.h>
 29#include <linux/hardirq.h>
 30#include <linux/mutex.h>
 31#include <linux/prefetch.h>
 32#include <asm/byteorder.h>
 33#include <asm/qspinlock.h>
 34
 35/*
 36 * The basic principle of a queue-based spinlock can best be understood
 37 * by studying a classic queue-based spinlock implementation called the
 38 * MCS lock. The paper below provides a good description for this kind
 39 * of lock.
 40 *
 41 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
 42 *
 43 * This queued spinlock implementation is based on the MCS lock, however to make
 44 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
 45 * API, we must modify it somehow.
 46 *
 47 * In particular; where the traditional MCS lock consists of a tail pointer
 48 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
 49 * unlock the next pending (next->locked), we compress both these: {tail,
 50 * next->locked} into a single u32 value.
 51 *
 52 * Since a spinlock disables recursion of its own context and there is a limit
 53 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
 54 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
 55 * we can encode the tail by combining the 2-bit nesting level with the cpu
 56 * number. With one byte for the lock value and 3 bytes for the tail, only a
 57 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
 58 * we extend it to a full byte to achieve better performance for architectures
 59 * that support atomic byte write.
 60 *
 61 * We also change the first spinner to spin on the lock bit instead of its
 62 * node; whereby avoiding the need to carry a node from lock to unlock, and
 63 * preserving existing lock API. This also makes the unlock code simpler and
 64 * faster.
 65 *
 66 * N.B. The current implementation only supports architectures that allow
 67 *      atomic operations on smaller 8-bit and 16-bit data types.
 68 *
 69 */
 70
 71#include "mcs_spinlock.h"
 72
 73#ifdef CONFIG_PARAVIRT_SPINLOCKS
 74#define MAX_NODES	8
 75#else
 76#define MAX_NODES	4
 77#endif
 78
 79/*
 80 * Per-CPU queue node structures; we can never have more than 4 nested
 81 * contexts: task, softirq, hardirq, nmi.
 82 *
 83 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
 84 *
 85 * PV doubles the storage and uses the second cacheline for PV state.
 86 */
 87static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
 88
 89/*
 90 * We must be able to distinguish between no-tail and the tail at 0:0,
 91 * therefore increment the cpu number by one.
 92 */
 93
 94static inline __pure u32 encode_tail(int cpu, int idx)
 95{
 96	u32 tail;
 97
 98#ifdef CONFIG_DEBUG_SPINLOCK
 99	BUG_ON(idx > 3);
100#endif
101	tail  = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
102	tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
103
104	return tail;
105}
106
107static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
108{
109	int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
110	int idx = (tail &  _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
111
112	return per_cpu_ptr(&mcs_nodes[idx], cpu);
113}
114
115#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
116
117/*
118 * By using the whole 2nd least significant byte for the pending bit, we
119 * can allow better optimization of the lock acquisition for the pending
120 * bit holder.
121 *
122 * This internal structure is also used by the set_locked function which
123 * is not restricted to _Q_PENDING_BITS == 8.
124 */
125struct __qspinlock {
126	union {
127		atomic_t val;
128#ifdef __LITTLE_ENDIAN
129		struct {
130			u8	locked;
131			u8	pending;
132		};
133		struct {
134			u16	locked_pending;
135			u16	tail;
136		};
137#else
138		struct {
139			u16	tail;
140			u16	locked_pending;
141		};
142		struct {
143			u8	reserved[2];
144			u8	pending;
145			u8	locked;
146		};
147#endif
148	};
149};
150
151#if _Q_PENDING_BITS == 8
152/**
153 * clear_pending_set_locked - take ownership and clear the pending bit.
154 * @lock: Pointer to queued spinlock structure
155 *
156 * *,1,0 -> *,0,1
157 *
158 * Lock stealing is not allowed if this function is used.
159 */
160static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
161{
162	struct __qspinlock *l = (void *)lock;
163
164	WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
165}
166
167/*
168 * xchg_tail - Put in the new queue tail code word & retrieve previous one
169 * @lock : Pointer to queued spinlock structure
170 * @tail : The new queue tail code word
171 * Return: The previous queue tail code word
172 *
173 * xchg(lock, tail), which heads an address dependency
174 *
175 * p,*,* -> n,*,* ; prev = xchg(lock, node)
176 */
177static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
178{
179	struct __qspinlock *l = (void *)lock;
180
181	/*
182	 * Use release semantics to make sure that the MCS node is properly
183	 * initialized before changing the tail code.
184	 */
185	return (u32)xchg_release(&l->tail,
186				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
187}
188
189#else /* _Q_PENDING_BITS == 8 */
190
191/**
192 * clear_pending_set_locked - take ownership and clear the pending bit.
193 * @lock: Pointer to queued spinlock structure
194 *
195 * *,1,0 -> *,0,1
196 */
197static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
198{
199	atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
200}
201
202/**
203 * xchg_tail - Put in the new queue tail code word & retrieve previous one
204 * @lock : Pointer to queued spinlock structure
205 * @tail : The new queue tail code word
206 * Return: The previous queue tail code word
207 *
208 * xchg(lock, tail)
209 *
210 * p,*,* -> n,*,* ; prev = xchg(lock, node)
211 */
212static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
213{
214	u32 old, new, val = atomic_read(&lock->val);
215
216	for (;;) {
217		new = (val & _Q_LOCKED_PENDING_MASK) | tail;
218		/*
219		 * Use release semantics to make sure that the MCS node is
220		 * properly initialized before changing the tail code.
221		 */
222		old = atomic_cmpxchg_release(&lock->val, val, new);
223		if (old == val)
224			break;
225
226		val = old;
227	}
228	return old;
229}
230#endif /* _Q_PENDING_BITS == 8 */
231
232/**
233 * set_locked - Set the lock bit and own the lock
234 * @lock: Pointer to queued spinlock structure
235 *
236 * *,*,0 -> *,0,1
237 */
238static __always_inline void set_locked(struct qspinlock *lock)
239{
240	struct __qspinlock *l = (void *)lock;
241
242	WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
243}
244
245
246/*
247 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
248 * all the PV callbacks.
249 */
250
251static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
252static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
253					   struct mcs_spinlock *prev) { }
254static __always_inline void __pv_kick_node(struct qspinlock *lock,
255					   struct mcs_spinlock *node) { }
256static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
257						   struct mcs_spinlock *node)
258						   { return 0; }
259
260#define pv_enabled()		false
261
262#define pv_init_node		__pv_init_node
263#define pv_wait_node		__pv_wait_node
264#define pv_kick_node		__pv_kick_node
265#define pv_wait_head_or_lock	__pv_wait_head_or_lock
266
267#ifdef CONFIG_PARAVIRT_SPINLOCKS
268#define queued_spin_lock_slowpath	native_queued_spin_lock_slowpath
269#endif
270
271#endif /* _GEN_PV_LOCK_SLOWPATH */
272
273/**
274 * queued_spin_lock_slowpath - acquire the queued spinlock
275 * @lock: Pointer to queued spinlock structure
276 * @val: Current value of the queued spinlock 32-bit word
277 *
278 * (queue tail, pending bit, lock value)
279 *
280 *              fast     :    slow                                  :    unlock
281 *                       :                                          :
282 * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
283 *                       :       | ^--------.------.             /  :
284 *                       :       v           \      \            |  :
285 * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
286 *                       :       | ^--'              |           |  :
287 *                       :       v                   |           |  :
288 * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
289 *   queue               :       | ^--'                          |  :
290 *                       :       v                               |  :
291 * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
292 *   queue               :         ^--'                             :
293 */
294void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
295{
296	struct mcs_spinlock *prev, *next, *node;
297	u32 new, old, tail;
298	int idx;
299
300	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
301
302	if (pv_enabled())
303		goto queue;
304
305	if (virt_spin_lock(lock))
306		return;
307
308	/*
309	 * wait for in-progress pending->locked hand-overs
310	 *
311	 * 0,1,0 -> 0,0,1
312	 */
313	if (val == _Q_PENDING_VAL) {
314		while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
315			cpu_relax();
316	}
317
318	/*
319	 * trylock || pending
320	 *
321	 * 0,0,0 -> 0,0,1 ; trylock
322	 * 0,0,1 -> 0,1,1 ; pending
323	 */
324	for (;;) {
325		/*
326		 * If we observe any contention; queue.
327		 */
328		if (val & ~_Q_LOCKED_MASK)
329			goto queue;
330
331		new = _Q_LOCKED_VAL;
332		if (val == new)
333			new |= _Q_PENDING_VAL;
334
335		/*
336		 * Acquire semantic is required here as the function may
337		 * return immediately if the lock was free.
338		 */
339		old = atomic_cmpxchg_acquire(&lock->val, val, new);
340		if (old == val)
341			break;
342
343		val = old;
344	}
345
346	/*
347	 * we won the trylock
348	 */
349	if (new == _Q_LOCKED_VAL)
350		return;
351
352	/*
353	 * we're pending, wait for the owner to go away.
354	 *
355	 * *,1,1 -> *,1,0
356	 *
357	 * this wait loop must be a load-acquire such that we match the
358	 * store-release that clears the locked bit and create lock
359	 * sequentiality; this is because not all clear_pending_set_locked()
360	 * implementations imply full barriers.
361	 */
362	smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
363
364	/*
365	 * take ownership and clear the pending bit.
366	 *
367	 * *,1,0 -> *,0,1
368	 */
369	clear_pending_set_locked(lock);
370	return;
371
372	/*
373	 * End of pending bit optimistic spinning and beginning of MCS
374	 * queuing.
375	 */
376queue:
377	node = this_cpu_ptr(&mcs_nodes[0]);
378	idx = node->count++;
379	tail = encode_tail(smp_processor_id(), idx);
380
381	node += idx;
382
383	/*
384	 * Ensure that we increment the head node->count before initialising
385	 * the actual node. If the compiler is kind enough to reorder these
386	 * stores, then an IRQ could overwrite our assignments.
387	 */
388	barrier();
389
390	node->locked = 0;
391	node->next = NULL;
392	pv_init_node(node);
393
394	/*
395	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
396	 * attempt the trylock once more in the hope someone let go while we
397	 * weren't watching.
398	 */
399	if (queued_spin_trylock(lock))
400		goto release;
401
402	/*
403	 * We have already touched the queueing cacheline; don't bother with
404	 * pending stuff.
405	 *
406	 * p,*,* -> n,*,*
407	 *
408	 * RELEASE, such that the stores to @node must be complete.
409	 */
410	old = xchg_tail(lock, tail);
411	next = NULL;
412
413	/*
414	 * if there was a previous node; link it and wait until reaching the
415	 * head of the waitqueue.
416	 */
417	if (old & _Q_TAIL_MASK) {
418		prev = decode_tail(old);
419
420		/*
421		 * We must ensure that the stores to @node are observed before
422		 * the write to prev->next. The address dependency from
423		 * xchg_tail is not sufficient to ensure this because the read
424		 * component of xchg_tail is unordered with respect to the
425		 * initialisation of @node.
426		 */
427		smp_store_release(&prev->next, node);
428
429		pv_wait_node(node, prev);
430		arch_mcs_spin_lock_contended(&node->locked);
431
432		/*
433		 * While waiting for the MCS lock, the next pointer may have
434		 * been set by another lock waiter. We optimistically load
435		 * the next pointer & prefetch the cacheline for writing
436		 * to reduce latency in the upcoming MCS unlock operation.
437		 */
438		next = READ_ONCE(node->next);
439		if (next)
440			prefetchw(next);
441	}
442
443	/*
444	 * we're at the head of the waitqueue, wait for the owner & pending to
445	 * go away.
446	 *
447	 * *,x,y -> *,0,0
448	 *
449	 * this wait loop must use a load-acquire such that we match the
450	 * store-release that clears the locked bit and create lock
451	 * sequentiality; this is because the set_locked() function below
452	 * does not imply a full barrier.
453	 *
454	 * The PV pv_wait_head_or_lock function, if active, will acquire
455	 * the lock and return a non-zero value. So we have to skip the
456	 * smp_cond_load_acquire() call. As the next PV queue head hasn't been
457	 * designated yet, there is no way for the locked value to become
458	 * _Q_SLOW_VAL. So both the set_locked() and the
459	 * atomic_cmpxchg_relaxed() calls will be safe.
460	 *
461	 * If PV isn't active, 0 will be returned instead.
462	 *
463	 */
464	if ((val = pv_wait_head_or_lock(lock, node)))
465		goto locked;
466
467	val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK));
468
469locked:
470	/*
471	 * claim the lock:
472	 *
473	 * n,0,0 -> 0,0,1 : lock, uncontended
474	 * *,0,0 -> *,0,1 : lock, contended
475	 *
476	 * If the queue head is the only one in the queue (lock value == tail),
477	 * clear the tail code and grab the lock. Otherwise, we only need
478	 * to grab the lock.
479	 */
480	for (;;) {
481		/* In the PV case we might already have _Q_LOCKED_VAL set */
482		if ((val & _Q_TAIL_MASK) != tail) {
483			set_locked(lock);
484			break;
485		}
486		/*
487		 * The smp_cond_load_acquire() call above has provided the
488		 * necessary acquire semantics required for locking. At most
489		 * two iterations of this loop may be ran.
490		 */
491		old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
492		if (old == val)
493			goto release;	/* No contention */
494
495		val = old;
496	}
497
498	/*
499	 * contended path; wait for next if not observed yet, release.
500	 */
501	if (!next) {
502		while (!(next = READ_ONCE(node->next)))
503			cpu_relax();
504	}
505
506	arch_mcs_spin_unlock_contended(&next->locked);
507	pv_kick_node(lock, next);
508
509release:
510	/*
511	 * release the node
512	 */
513	__this_cpu_dec(mcs_nodes[0].count);
514}
515EXPORT_SYMBOL(queued_spin_lock_slowpath);
516
517/*
518 * Generate the paravirt code for queued_spin_unlock_slowpath().
519 */
520#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
521#define _GEN_PV_LOCK_SLOWPATH
522
523#undef  pv_enabled
524#define pv_enabled()	true
525
526#undef pv_init_node
527#undef pv_wait_node
528#undef pv_kick_node
529#undef pv_wait_head_or_lock
530
531#undef  queued_spin_lock_slowpath
532#define queued_spin_lock_slowpath	__pv_queued_spin_lock_slowpath
533
534#include "qspinlock_paravirt.h"
535#include "qspinlock.c"
536
537#endif
v4.6
  1/*
  2 * Queued spinlock
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 15 * (C) Copyright 2013-2014 Red Hat, Inc.
 16 * (C) Copyright 2015 Intel Corp.
 17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
 18 *
 19 * Authors: Waiman Long <waiman.long@hpe.com>
 20 *          Peter Zijlstra <peterz@infradead.org>
 21 */
 22
 23#ifndef _GEN_PV_LOCK_SLOWPATH
 24
 25#include <linux/smp.h>
 26#include <linux/bug.h>
 27#include <linux/cpumask.h>
 28#include <linux/percpu.h>
 29#include <linux/hardirq.h>
 30#include <linux/mutex.h>
 
 31#include <asm/byteorder.h>
 32#include <asm/qspinlock.h>
 33
 34/*
 35 * The basic principle of a queue-based spinlock can best be understood
 36 * by studying a classic queue-based spinlock implementation called the
 37 * MCS lock. The paper below provides a good description for this kind
 38 * of lock.
 39 *
 40 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
 41 *
 42 * This queued spinlock implementation is based on the MCS lock, however to make
 43 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
 44 * API, we must modify it somehow.
 45 *
 46 * In particular; where the traditional MCS lock consists of a tail pointer
 47 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
 48 * unlock the next pending (next->locked), we compress both these: {tail,
 49 * next->locked} into a single u32 value.
 50 *
 51 * Since a spinlock disables recursion of its own context and there is a limit
 52 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
 53 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
 54 * we can encode the tail by combining the 2-bit nesting level with the cpu
 55 * number. With one byte for the lock value and 3 bytes for the tail, only a
 56 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
 57 * we extend it to a full byte to achieve better performance for architectures
 58 * that support atomic byte write.
 59 *
 60 * We also change the first spinner to spin on the lock bit instead of its
 61 * node; whereby avoiding the need to carry a node from lock to unlock, and
 62 * preserving existing lock API. This also makes the unlock code simpler and
 63 * faster.
 64 *
 65 * N.B. The current implementation only supports architectures that allow
 66 *      atomic operations on smaller 8-bit and 16-bit data types.
 67 *
 68 */
 69
 70#include "mcs_spinlock.h"
 71
 72#ifdef CONFIG_PARAVIRT_SPINLOCKS
 73#define MAX_NODES	8
 74#else
 75#define MAX_NODES	4
 76#endif
 77
 78/*
 79 * Per-CPU queue node structures; we can never have more than 4 nested
 80 * contexts: task, softirq, hardirq, nmi.
 81 *
 82 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
 83 *
 84 * PV doubles the storage and uses the second cacheline for PV state.
 85 */
 86static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
 87
 88/*
 89 * We must be able to distinguish between no-tail and the tail at 0:0,
 90 * therefore increment the cpu number by one.
 91 */
 92
 93static inline u32 encode_tail(int cpu, int idx)
 94{
 95	u32 tail;
 96
 97#ifdef CONFIG_DEBUG_SPINLOCK
 98	BUG_ON(idx > 3);
 99#endif
100	tail  = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
101	tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
102
103	return tail;
104}
105
106static inline struct mcs_spinlock *decode_tail(u32 tail)
107{
108	int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
109	int idx = (tail &  _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
110
111	return per_cpu_ptr(&mcs_nodes[idx], cpu);
112}
113
114#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
115
116/*
117 * By using the whole 2nd least significant byte for the pending bit, we
118 * can allow better optimization of the lock acquisition for the pending
119 * bit holder.
120 *
121 * This internal structure is also used by the set_locked function which
122 * is not restricted to _Q_PENDING_BITS == 8.
123 */
124struct __qspinlock {
125	union {
126		atomic_t val;
127#ifdef __LITTLE_ENDIAN
128		struct {
129			u8	locked;
130			u8	pending;
131		};
132		struct {
133			u16	locked_pending;
134			u16	tail;
135		};
136#else
137		struct {
138			u16	tail;
139			u16	locked_pending;
140		};
141		struct {
142			u8	reserved[2];
143			u8	pending;
144			u8	locked;
145		};
146#endif
147	};
148};
149
150#if _Q_PENDING_BITS == 8
151/**
152 * clear_pending_set_locked - take ownership and clear the pending bit.
153 * @lock: Pointer to queued spinlock structure
154 *
155 * *,1,0 -> *,0,1
156 *
157 * Lock stealing is not allowed if this function is used.
158 */
159static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
160{
161	struct __qspinlock *l = (void *)lock;
162
163	WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
164}
165
166/*
167 * xchg_tail - Put in the new queue tail code word & retrieve previous one
168 * @lock : Pointer to queued spinlock structure
169 * @tail : The new queue tail code word
170 * Return: The previous queue tail code word
171 *
172 * xchg(lock, tail)
173 *
174 * p,*,* -> n,*,* ; prev = xchg(lock, node)
175 */
176static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
177{
178	struct __qspinlock *l = (void *)lock;
179
180	/*
181	 * Use release semantics to make sure that the MCS node is properly
182	 * initialized before changing the tail code.
183	 */
184	return (u32)xchg_release(&l->tail,
185				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
186}
187
188#else /* _Q_PENDING_BITS == 8 */
189
190/**
191 * clear_pending_set_locked - take ownership and clear the pending bit.
192 * @lock: Pointer to queued spinlock structure
193 *
194 * *,1,0 -> *,0,1
195 */
196static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
197{
198	atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
199}
200
201/**
202 * xchg_tail - Put in the new queue tail code word & retrieve previous one
203 * @lock : Pointer to queued spinlock structure
204 * @tail : The new queue tail code word
205 * Return: The previous queue tail code word
206 *
207 * xchg(lock, tail)
208 *
209 * p,*,* -> n,*,* ; prev = xchg(lock, node)
210 */
211static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
212{
213	u32 old, new, val = atomic_read(&lock->val);
214
215	for (;;) {
216		new = (val & _Q_LOCKED_PENDING_MASK) | tail;
217		/*
218		 * Use release semantics to make sure that the MCS node is
219		 * properly initialized before changing the tail code.
220		 */
221		old = atomic_cmpxchg_release(&lock->val, val, new);
222		if (old == val)
223			break;
224
225		val = old;
226	}
227	return old;
228}
229#endif /* _Q_PENDING_BITS == 8 */
230
231/**
232 * set_locked - Set the lock bit and own the lock
233 * @lock: Pointer to queued spinlock structure
234 *
235 * *,*,0 -> *,0,1
236 */
237static __always_inline void set_locked(struct qspinlock *lock)
238{
239	struct __qspinlock *l = (void *)lock;
240
241	WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
242}
243
244
245/*
246 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
247 * all the PV callbacks.
248 */
249
250static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
251static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
252					   struct mcs_spinlock *prev) { }
253static __always_inline void __pv_kick_node(struct qspinlock *lock,
254					   struct mcs_spinlock *node) { }
255static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
256						   struct mcs_spinlock *node)
257						   { return 0; }
258
259#define pv_enabled()		false
260
261#define pv_init_node		__pv_init_node
262#define pv_wait_node		__pv_wait_node
263#define pv_kick_node		__pv_kick_node
264#define pv_wait_head_or_lock	__pv_wait_head_or_lock
265
266#ifdef CONFIG_PARAVIRT_SPINLOCKS
267#define queued_spin_lock_slowpath	native_queued_spin_lock_slowpath
268#endif
269
270#endif /* _GEN_PV_LOCK_SLOWPATH */
271
272/**
273 * queued_spin_lock_slowpath - acquire the queued spinlock
274 * @lock: Pointer to queued spinlock structure
275 * @val: Current value of the queued spinlock 32-bit word
276 *
277 * (queue tail, pending bit, lock value)
278 *
279 *              fast     :    slow                                  :    unlock
280 *                       :                                          :
281 * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
282 *                       :       | ^--------.------.             /  :
283 *                       :       v           \      \            |  :
284 * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
285 *                       :       | ^--'              |           |  :
286 *                       :       v                   |           |  :
287 * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
288 *   queue               :       | ^--'                          |  :
289 *                       :       v                               |  :
290 * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
291 *   queue               :         ^--'                             :
292 */
293void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
294{
295	struct mcs_spinlock *prev, *next, *node;
296	u32 new, old, tail;
297	int idx;
298
299	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
300
301	if (pv_enabled())
302		goto queue;
303
304	if (virt_spin_lock(lock))
305		return;
306
307	/*
308	 * wait for in-progress pending->locked hand-overs
309	 *
310	 * 0,1,0 -> 0,0,1
311	 */
312	if (val == _Q_PENDING_VAL) {
313		while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
314			cpu_relax();
315	}
316
317	/*
318	 * trylock || pending
319	 *
320	 * 0,0,0 -> 0,0,1 ; trylock
321	 * 0,0,1 -> 0,1,1 ; pending
322	 */
323	for (;;) {
324		/*
325		 * If we observe any contention; queue.
326		 */
327		if (val & ~_Q_LOCKED_MASK)
328			goto queue;
329
330		new = _Q_LOCKED_VAL;
331		if (val == new)
332			new |= _Q_PENDING_VAL;
333
334		/*
335		 * Acquire semantic is required here as the function may
336		 * return immediately if the lock was free.
337		 */
338		old = atomic_cmpxchg_acquire(&lock->val, val, new);
339		if (old == val)
340			break;
341
342		val = old;
343	}
344
345	/*
346	 * we won the trylock
347	 */
348	if (new == _Q_LOCKED_VAL)
349		return;
350
351	/*
352	 * we're pending, wait for the owner to go away.
353	 *
354	 * *,1,1 -> *,1,0
355	 *
356	 * this wait loop must be a load-acquire such that we match the
357	 * store-release that clears the locked bit and create lock
358	 * sequentiality; this is because not all clear_pending_set_locked()
359	 * implementations imply full barriers.
360	 */
361	smp_cond_acquire(!(atomic_read(&lock->val) & _Q_LOCKED_MASK));
362
363	/*
364	 * take ownership and clear the pending bit.
365	 *
366	 * *,1,0 -> *,0,1
367	 */
368	clear_pending_set_locked(lock);
369	return;
370
371	/*
372	 * End of pending bit optimistic spinning and beginning of MCS
373	 * queuing.
374	 */
375queue:
376	node = this_cpu_ptr(&mcs_nodes[0]);
377	idx = node->count++;
378	tail = encode_tail(smp_processor_id(), idx);
379
380	node += idx;
 
 
 
 
 
 
 
 
381	node->locked = 0;
382	node->next = NULL;
383	pv_init_node(node);
384
385	/*
386	 * We touched a (possibly) cold cacheline in the per-cpu queue node;
387	 * attempt the trylock once more in the hope someone let go while we
388	 * weren't watching.
389	 */
390	if (queued_spin_trylock(lock))
391		goto release;
392
393	/*
394	 * We have already touched the queueing cacheline; don't bother with
395	 * pending stuff.
396	 *
397	 * p,*,* -> n,*,*
 
 
398	 */
399	old = xchg_tail(lock, tail);
400	next = NULL;
401
402	/*
403	 * if there was a previous node; link it and wait until reaching the
404	 * head of the waitqueue.
405	 */
406	if (old & _Q_TAIL_MASK) {
407		prev = decode_tail(old);
408		WRITE_ONCE(prev->next, node);
 
 
 
 
 
 
 
 
409
410		pv_wait_node(node, prev);
411		arch_mcs_spin_lock_contended(&node->locked);
412
413		/*
414		 * While waiting for the MCS lock, the next pointer may have
415		 * been set by another lock waiter. We optimistically load
416		 * the next pointer & prefetch the cacheline for writing
417		 * to reduce latency in the upcoming MCS unlock operation.
418		 */
419		next = READ_ONCE(node->next);
420		if (next)
421			prefetchw(next);
422	}
423
424	/*
425	 * we're at the head of the waitqueue, wait for the owner & pending to
426	 * go away.
427	 *
428	 * *,x,y -> *,0,0
429	 *
430	 * this wait loop must use a load-acquire such that we match the
431	 * store-release that clears the locked bit and create lock
432	 * sequentiality; this is because the set_locked() function below
433	 * does not imply a full barrier.
434	 *
435	 * The PV pv_wait_head_or_lock function, if active, will acquire
436	 * the lock and return a non-zero value. So we have to skip the
437	 * smp_cond_acquire() call. As the next PV queue head hasn't been
438	 * designated yet, there is no way for the locked value to become
439	 * _Q_SLOW_VAL. So both the set_locked() and the
440	 * atomic_cmpxchg_relaxed() calls will be safe.
441	 *
442	 * If PV isn't active, 0 will be returned instead.
443	 *
444	 */
445	if ((val = pv_wait_head_or_lock(lock, node)))
446		goto locked;
447
448	smp_cond_acquire(!((val = atomic_read(&lock->val)) & _Q_LOCKED_PENDING_MASK));
449
450locked:
451	/*
452	 * claim the lock:
453	 *
454	 * n,0,0 -> 0,0,1 : lock, uncontended
455	 * *,0,0 -> *,0,1 : lock, contended
456	 *
457	 * If the queue head is the only one in the queue (lock value == tail),
458	 * clear the tail code and grab the lock. Otherwise, we only need
459	 * to grab the lock.
460	 */
461	for (;;) {
462		/* In the PV case we might already have _Q_LOCKED_VAL set */
463		if ((val & _Q_TAIL_MASK) != tail) {
464			set_locked(lock);
465			break;
466		}
467		/*
468		 * The smp_cond_acquire() call above has provided the necessary
469		 * acquire semantics required for locking. At most two
470		 * iterations of this loop may be ran.
471		 */
472		old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
473		if (old == val)
474			goto release;	/* No contention */
475
476		val = old;
477	}
478
479	/*
480	 * contended path; wait for next if not observed yet, release.
481	 */
482	if (!next) {
483		while (!(next = READ_ONCE(node->next)))
484			cpu_relax();
485	}
486
487	arch_mcs_spin_unlock_contended(&next->locked);
488	pv_kick_node(lock, next);
489
490release:
491	/*
492	 * release the node
493	 */
494	this_cpu_dec(mcs_nodes[0].count);
495}
496EXPORT_SYMBOL(queued_spin_lock_slowpath);
497
498/*
499 * Generate the paravirt code for queued_spin_unlock_slowpath().
500 */
501#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
502#define _GEN_PV_LOCK_SLOWPATH
503
504#undef  pv_enabled
505#define pv_enabled()	true
506
507#undef pv_init_node
508#undef pv_wait_node
509#undef pv_kick_node
510#undef pv_wait_head_or_lock
511
512#undef  queued_spin_lock_slowpath
513#define queued_spin_lock_slowpath	__pv_queued_spin_lock_slowpath
514
515#include "qspinlock_paravirt.h"
516#include "qspinlock.c"
517
518#endif