Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * MCS lock defines
  3 *
  4 * This file contains the main data structure and API definitions of MCS lock.
  5 *
  6 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
  7 * with the desirable properties of being fair, and with each cpu trying
  8 * to acquire the lock spinning on a local variable.
  9 * It avoids expensive cache bouncings that common test-and-set spin-lock
 10 * implementations incur.
 11 */
 12#ifndef __LINUX_MCS_SPINLOCK_H
 13#define __LINUX_MCS_SPINLOCK_H
 14
 15#include <asm/mcs_spinlock.h>
 16
 17struct mcs_spinlock {
 18	struct mcs_spinlock *next;
 19	int locked; /* 1 if lock acquired */
 20	int count;  /* nesting count, see qspinlock.c */
 21};
 22
 23#ifndef arch_mcs_spin_lock_contended
 24/*
 25 * Using smp_load_acquire() provides a memory barrier that ensures
 26 * subsequent operations happen after the lock is acquired.
 27 */
 28#define arch_mcs_spin_lock_contended(l)					\
 29do {									\
 30	while (!(smp_load_acquire(l)))					\
 31		cpu_relax();						\
 32} while (0)
 33#endif
 34
 35#ifndef arch_mcs_spin_unlock_contended
 36/*
 37 * smp_store_release() provides a memory barrier to ensure all
 38 * operations in the critical section has been completed before
 39 * unlocking.
 40 */
 41#define arch_mcs_spin_unlock_contended(l)				\
 42	smp_store_release((l), 1)
 43#endif
 44
 45/*
 46 * Note: the smp_load_acquire/smp_store_release pair is not
 47 * sufficient to form a full memory barrier across
 48 * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
 49 * For applications that need a full barrier across multiple cpus
 50 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
 51 * used after mcs_lock.
 52 */
 53
 54/*
 55 * In order to acquire the lock, the caller should declare a local node and
 56 * pass a reference of the node to this function in addition to the lock.
 57 * If the lock has already been acquired, then this will proceed to spin
 58 * on this node->locked until the previous lock holder sets the node->locked
 59 * in mcs_spin_unlock().
 60 */
 61static inline
 62void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 63{
 64	struct mcs_spinlock *prev;
 65
 66	/* Init node */
 67	node->locked = 0;
 68	node->next   = NULL;
 69
 70	/*
 71	 * We rely on the full barrier with global transitivity implied by the
 72	 * below xchg() to order the initialization stores above against any
 73	 * observation of @node. And to provide the ACQUIRE ordering associated
 74	 * with a LOCK primitive.
 75	 */
 76	prev = xchg(lock, node);
 77	if (likely(prev == NULL)) {
 78		/*
 79		 * Lock acquired, don't need to set node->locked to 1. Threads
 80		 * only spin on its own node->locked value for lock acquisition.
 81		 * However, since this thread can immediately acquire the lock
 82		 * and does not proceed to spin on its own node->locked, this
 83		 * value won't be used. If a debug mode is needed to
 84		 * audit lock status, then set node->locked value here.
 85		 */
 86		return;
 87	}
 88	WRITE_ONCE(prev->next, node);
 89
 90	/* Wait until the lock holder passes the lock down. */
 91	arch_mcs_spin_lock_contended(&node->locked);
 92}
 93
 94/*
 95 * Releases the lock. The caller should pass in the corresponding node that
 96 * was used to acquire the lock.
 97 */
 98static inline
 99void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
100{
101	struct mcs_spinlock *next = READ_ONCE(node->next);
102
103	if (likely(!next)) {
104		/*
105		 * Release the lock by setting it to NULL
106		 */
107		if (likely(cmpxchg_release(lock, node, NULL) == node))
108			return;
109		/* Wait until the next pointer is set */
110		while (!(next = READ_ONCE(node->next)))
111			cpu_relax();
112	}
113
114	/* Pass lock to next waiter. */
115	arch_mcs_spin_unlock_contended(&next->locked);
116}
117
118#endif /* __LINUX_MCS_SPINLOCK_H */
v4.6
  1/*
  2 * MCS lock defines
  3 *
  4 * This file contains the main data structure and API definitions of MCS lock.
  5 *
  6 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
  7 * with the desirable properties of being fair, and with each cpu trying
  8 * to acquire the lock spinning on a local variable.
  9 * It avoids expensive cache bouncings that common test-and-set spin-lock
 10 * implementations incur.
 11 */
 12#ifndef __LINUX_MCS_SPINLOCK_H
 13#define __LINUX_MCS_SPINLOCK_H
 14
 15#include <asm/mcs_spinlock.h>
 16
 17struct mcs_spinlock {
 18	struct mcs_spinlock *next;
 19	int locked; /* 1 if lock acquired */
 20	int count;  /* nesting count, see qspinlock.c */
 21};
 22
 23#ifndef arch_mcs_spin_lock_contended
 24/*
 25 * Using smp_load_acquire() provides a memory barrier that ensures
 26 * subsequent operations happen after the lock is acquired.
 27 */
 28#define arch_mcs_spin_lock_contended(l)					\
 29do {									\
 30	while (!(smp_load_acquire(l)))					\
 31		cpu_relax_lowlatency();					\
 32} while (0)
 33#endif
 34
 35#ifndef arch_mcs_spin_unlock_contended
 36/*
 37 * smp_store_release() provides a memory barrier to ensure all
 38 * operations in the critical section has been completed before
 39 * unlocking.
 40 */
 41#define arch_mcs_spin_unlock_contended(l)				\
 42	smp_store_release((l), 1)
 43#endif
 44
 45/*
 46 * Note: the smp_load_acquire/smp_store_release pair is not
 47 * sufficient to form a full memory barrier across
 48 * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
 49 * For applications that need a full barrier across multiple cpus
 50 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
 51 * used after mcs_lock.
 52 */
 53
 54/*
 55 * In order to acquire the lock, the caller should declare a local node and
 56 * pass a reference of the node to this function in addition to the lock.
 57 * If the lock has already been acquired, then this will proceed to spin
 58 * on this node->locked until the previous lock holder sets the node->locked
 59 * in mcs_spin_unlock().
 60 */
 61static inline
 62void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 63{
 64	struct mcs_spinlock *prev;
 65
 66	/* Init node */
 67	node->locked = 0;
 68	node->next   = NULL;
 69
 70	/*
 71	 * We rely on the full barrier with global transitivity implied by the
 72	 * below xchg() to order the initialization stores above against any
 73	 * observation of @node. And to provide the ACQUIRE ordering associated
 74	 * with a LOCK primitive.
 75	 */
 76	prev = xchg(lock, node);
 77	if (likely(prev == NULL)) {
 78		/*
 79		 * Lock acquired, don't need to set node->locked to 1. Threads
 80		 * only spin on its own node->locked value for lock acquisition.
 81		 * However, since this thread can immediately acquire the lock
 82		 * and does not proceed to spin on its own node->locked, this
 83		 * value won't be used. If a debug mode is needed to
 84		 * audit lock status, then set node->locked value here.
 85		 */
 86		return;
 87	}
 88	WRITE_ONCE(prev->next, node);
 89
 90	/* Wait until the lock holder passes the lock down. */
 91	arch_mcs_spin_lock_contended(&node->locked);
 92}
 93
 94/*
 95 * Releases the lock. The caller should pass in the corresponding node that
 96 * was used to acquire the lock.
 97 */
 98static inline
 99void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
100{
101	struct mcs_spinlock *next = READ_ONCE(node->next);
102
103	if (likely(!next)) {
104		/*
105		 * Release the lock by setting it to NULL
106		 */
107		if (likely(cmpxchg_release(lock, node, NULL) == node))
108			return;
109		/* Wait until the next pointer is set */
110		while (!(next = READ_ONCE(node->next)))
111			cpu_relax_lowlatency();
112	}
113
114	/* Pass lock to next waiter. */
115	arch_mcs_spin_unlock_contended(&next->locked);
116}
117
118#endif /* __LINUX_MCS_SPINLOCK_H */