Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * MCS lock defines
  3 *
  4 * This file contains the main data structure and API definitions of MCS lock.
  5 *
  6 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
  7 * with the desirable properties of being fair, and with each cpu trying
  8 * to acquire the lock spinning on a local variable.
  9 * It avoids expensive cache bouncings that common test-and-set spin-lock
 10 * implementations incur.
 11 */
 12#ifndef __LINUX_MCS_SPINLOCK_H
 13#define __LINUX_MCS_SPINLOCK_H
 14
 15#include <asm/mcs_spinlock.h>
 16
 17struct mcs_spinlock {
 18	struct mcs_spinlock *next;
 19	int locked; /* 1 if lock acquired */
 
 20};
 21
 22#ifndef arch_mcs_spin_lock_contended
 23/*
 24 * Using smp_load_acquire() provides a memory barrier that ensures
 25 * subsequent operations happen after the lock is acquired.
 
 
 
 26 */
 27#define arch_mcs_spin_lock_contended(l)					\
 28do {									\
 29	while (!(smp_load_acquire(l)))					\
 30		arch_mutex_cpu_relax();					\
 31} while (0)
 32#endif
 33
 34#ifndef arch_mcs_spin_unlock_contended
 35/*
 36 * smp_store_release() provides a memory barrier to ensure all
 37 * operations in the critical section has been completed before
 38 * unlocking.
 39 */
 40#define arch_mcs_spin_unlock_contended(l)				\
 41	smp_store_release((l), 1)
 42#endif
 43
 44/*
 45 * Note: the smp_load_acquire/smp_store_release pair is not
 46 * sufficient to form a full memory barrier across
 47 * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
 48 * For applications that need a full barrier across multiple cpus
 49 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
 50 * used after mcs_lock.
 51 */
 52
 53/*
 54 * In order to acquire the lock, the caller should declare a local node and
 55 * pass a reference of the node to this function in addition to the lock.
 56 * If the lock has already been acquired, then this will proceed to spin
 57 * on this node->locked until the previous lock holder sets the node->locked
 58 * in mcs_spin_unlock().
 59 *
 60 * We don't inline mcs_spin_lock() so that perf can correctly account for the
 61 * time spent in this lock function.
 62 */
 63static inline
 64void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 65{
 66	struct mcs_spinlock *prev;
 67
 68	/* Init node */
 69	node->locked = 0;
 70	node->next   = NULL;
 71
 
 
 
 
 
 
 72	prev = xchg(lock, node);
 73	if (likely(prev == NULL)) {
 74		/*
 75		 * Lock acquired, don't need to set node->locked to 1. Threads
 76		 * only spin on its own node->locked value for lock acquisition.
 77		 * However, since this thread can immediately acquire the lock
 78		 * and does not proceed to spin on its own node->locked, this
 79		 * value won't be used. If a debug mode is needed to
 80		 * audit lock status, then set node->locked value here.
 81		 */
 82		return;
 83	}
 84	ACCESS_ONCE(prev->next) = node;
 85
 86	/* Wait until the lock holder passes the lock down. */
 87	arch_mcs_spin_lock_contended(&node->locked);
 88}
 89
 90/*
 91 * Releases the lock. The caller should pass in the corresponding node that
 92 * was used to acquire the lock.
 93 */
 94static inline
 95void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 96{
 97	struct mcs_spinlock *next = ACCESS_ONCE(node->next);
 98
 99	if (likely(!next)) {
100		/*
101		 * Release the lock by setting it to NULL
102		 */
103		if (likely(cmpxchg(lock, node, NULL) == node))
104			return;
105		/* Wait until the next pointer is set */
106		while (!(next = ACCESS_ONCE(node->next)))
107			arch_mutex_cpu_relax();
108	}
109
110	/* Pass lock to next waiter. */
111	arch_mcs_spin_unlock_contended(&next->locked);
112}
113
114/*
115 * Cancellable version of the MCS lock above.
116 *
117 * Intended for adaptive spinning of sleeping locks:
118 * mutex_lock()/rwsem_down_{read,write}() etc.
119 */
120
121struct optimistic_spin_queue {
122	struct optimistic_spin_queue *next, *prev;
123	int locked; /* 1 if lock acquired */
124};
125
126extern bool osq_lock(struct optimistic_spin_queue **lock);
127extern void osq_unlock(struct optimistic_spin_queue **lock);
128
129#endif /* __LINUX_MCS_SPINLOCK_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * MCS lock defines
  4 *
  5 * This file contains the main data structure and API definitions of MCS lock.
  6 *
  7 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
  8 * with the desirable properties of being fair, and with each cpu trying
  9 * to acquire the lock spinning on a local variable.
 10 * It avoids expensive cache bounces that common test-and-set spin-lock
 11 * implementations incur.
 12 */
 13#ifndef __LINUX_MCS_SPINLOCK_H
 14#define __LINUX_MCS_SPINLOCK_H
 15
 16#include <asm/mcs_spinlock.h>
 17
 18struct mcs_spinlock {
 19	struct mcs_spinlock *next;
 20	int locked; /* 1 if lock acquired */
 21	int count;  /* nesting count, see qspinlock.c */
 22};
 23
 24#ifndef arch_mcs_spin_lock_contended
 25/*
 26 * Using smp_cond_load_acquire() provides the acquire semantics
 27 * required so that subsequent operations happen after the
 28 * lock is acquired. Additionally, some architectures such as
 29 * ARM64 would like to do spin-waiting instead of purely
 30 * spinning, and smp_cond_load_acquire() provides that behavior.
 31 */
 32#define arch_mcs_spin_lock_contended(l)					\
 33do {									\
 34	smp_cond_load_acquire(l, VAL);					\
 
 35} while (0)
 36#endif
 37
 38#ifndef arch_mcs_spin_unlock_contended
 39/*
 40 * smp_store_release() provides a memory barrier to ensure all
 41 * operations in the critical section has been completed before
 42 * unlocking.
 43 */
 44#define arch_mcs_spin_unlock_contended(l)				\
 45	smp_store_release((l), 1)
 46#endif
 47
 48/*
 49 * Note: the smp_load_acquire/smp_store_release pair is not
 50 * sufficient to form a full memory barrier across
 51 * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
 52 * For applications that need a full barrier across multiple cpus
 53 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
 54 * used after mcs_lock.
 55 */
 56
 57/*
 58 * In order to acquire the lock, the caller should declare a local node and
 59 * pass a reference of the node to this function in addition to the lock.
 60 * If the lock has already been acquired, then this will proceed to spin
 61 * on this node->locked until the previous lock holder sets the node->locked
 62 * in mcs_spin_unlock().
 
 
 
 63 */
 64static inline
 65void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 66{
 67	struct mcs_spinlock *prev;
 68
 69	/* Init node */
 70	node->locked = 0;
 71	node->next   = NULL;
 72
 73	/*
 74	 * We rely on the full barrier with global transitivity implied by the
 75	 * below xchg() to order the initialization stores above against any
 76	 * observation of @node. And to provide the ACQUIRE ordering associated
 77	 * with a LOCK primitive.
 78	 */
 79	prev = xchg(lock, node);
 80	if (likely(prev == NULL)) {
 81		/*
 82		 * Lock acquired, don't need to set node->locked to 1. Threads
 83		 * only spin on its own node->locked value for lock acquisition.
 84		 * However, since this thread can immediately acquire the lock
 85		 * and does not proceed to spin on its own node->locked, this
 86		 * value won't be used. If a debug mode is needed to
 87		 * audit lock status, then set node->locked value here.
 88		 */
 89		return;
 90	}
 91	WRITE_ONCE(prev->next, node);
 92
 93	/* Wait until the lock holder passes the lock down. */
 94	arch_mcs_spin_lock_contended(&node->locked);
 95}
 96
 97/*
 98 * Releases the lock. The caller should pass in the corresponding node that
 99 * was used to acquire the lock.
100 */
101static inline
102void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
103{
104	struct mcs_spinlock *next = READ_ONCE(node->next);
105
106	if (likely(!next)) {
107		/*
108		 * Release the lock by setting it to NULL
109		 */
110		if (likely(cmpxchg_release(lock, node, NULL) == node))
111			return;
112		/* Wait until the next pointer is set */
113		while (!(next = READ_ONCE(node->next)))
114			cpu_relax();
115	}
116
117	/* Pass lock to next waiter. */
118	arch_mcs_spin_unlock_contended(&next->locked);
119}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
121#endif /* __LINUX_MCS_SPINLOCK_H */