Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * MCS lock defines
  3 *
  4 * This file contains the main data structure and API definitions of MCS lock.
  5 *
  6 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
  7 * with the desirable properties of being fair, and with each cpu trying
  8 * to acquire the lock spinning on a local variable.
  9 * It avoids expensive cache bouncings that common test-and-set spin-lock
 10 * implementations incur.
 11 */
 12#ifndef __LINUX_MCS_SPINLOCK_H
 13#define __LINUX_MCS_SPINLOCK_H
 14
 15#include <asm/mcs_spinlock.h>
 16
 17struct mcs_spinlock {
 18	struct mcs_spinlock *next;
 19	int locked; /* 1 if lock acquired */
 
 20};
 21
 22#ifndef arch_mcs_spin_lock_contended
 23/*
 24 * Using smp_load_acquire() provides a memory barrier that ensures
 25 * subsequent operations happen after the lock is acquired.
 26 */
 27#define arch_mcs_spin_lock_contended(l)					\
 28do {									\
 29	while (!(smp_load_acquire(l)))					\
 30		arch_mutex_cpu_relax();					\
 31} while (0)
 32#endif
 33
 34#ifndef arch_mcs_spin_unlock_contended
 35/*
 36 * smp_store_release() provides a memory barrier to ensure all
 37 * operations in the critical section has been completed before
 38 * unlocking.
 39 */
 40#define arch_mcs_spin_unlock_contended(l)				\
 41	smp_store_release((l), 1)
 42#endif
 43
 44/*
 45 * Note: the smp_load_acquire/smp_store_release pair is not
 46 * sufficient to form a full memory barrier across
 47 * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
 48 * For applications that need a full barrier across multiple cpus
 49 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
 50 * used after mcs_lock.
 51 */
 52
 53/*
 54 * In order to acquire the lock, the caller should declare a local node and
 55 * pass a reference of the node to this function in addition to the lock.
 56 * If the lock has already been acquired, then this will proceed to spin
 57 * on this node->locked until the previous lock holder sets the node->locked
 58 * in mcs_spin_unlock().
 59 *
 60 * We don't inline mcs_spin_lock() so that perf can correctly account for the
 61 * time spent in this lock function.
 62 */
 63static inline
 64void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 65{
 66	struct mcs_spinlock *prev;
 67
 68	/* Init node */
 69	node->locked = 0;
 70	node->next   = NULL;
 71
 
 
 
 
 
 
 72	prev = xchg(lock, node);
 73	if (likely(prev == NULL)) {
 74		/*
 75		 * Lock acquired, don't need to set node->locked to 1. Threads
 76		 * only spin on its own node->locked value for lock acquisition.
 77		 * However, since this thread can immediately acquire the lock
 78		 * and does not proceed to spin on its own node->locked, this
 79		 * value won't be used. If a debug mode is needed to
 80		 * audit lock status, then set node->locked value here.
 81		 */
 82		return;
 83	}
 84	ACCESS_ONCE(prev->next) = node;
 85
 86	/* Wait until the lock holder passes the lock down. */
 87	arch_mcs_spin_lock_contended(&node->locked);
 88}
 89
 90/*
 91 * Releases the lock. The caller should pass in the corresponding node that
 92 * was used to acquire the lock.
 93 */
 94static inline
 95void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 96{
 97	struct mcs_spinlock *next = ACCESS_ONCE(node->next);
 98
 99	if (likely(!next)) {
100		/*
101		 * Release the lock by setting it to NULL
102		 */
103		if (likely(cmpxchg(lock, node, NULL) == node))
104			return;
105		/* Wait until the next pointer is set */
106		while (!(next = ACCESS_ONCE(node->next)))
107			arch_mutex_cpu_relax();
108	}
109
110	/* Pass lock to next waiter. */
111	arch_mcs_spin_unlock_contended(&next->locked);
112}
113
114/*
115 * Cancellable version of the MCS lock above.
116 *
117 * Intended for adaptive spinning of sleeping locks:
118 * mutex_lock()/rwsem_down_{read,write}() etc.
119 */
120
121struct optimistic_spin_queue {
122	struct optimistic_spin_queue *next, *prev;
123	int locked; /* 1 if lock acquired */
124};
125
126extern bool osq_lock(struct optimistic_spin_queue **lock);
127extern void osq_unlock(struct optimistic_spin_queue **lock);
128
129#endif /* __LINUX_MCS_SPINLOCK_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * MCS lock defines
  4 *
  5 * This file contains the main data structure and API definitions of MCS lock.
  6 *
  7 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
  8 * with the desirable properties of being fair, and with each cpu trying
  9 * to acquire the lock spinning on a local variable.
 10 * It avoids expensive cache bouncings that common test-and-set spin-lock
 11 * implementations incur.
 12 */
 13#ifndef __LINUX_MCS_SPINLOCK_H
 14#define __LINUX_MCS_SPINLOCK_H
 15
 16#include <asm/mcs_spinlock.h>
 17
 18struct mcs_spinlock {
 19	struct mcs_spinlock *next;
 20	int locked; /* 1 if lock acquired */
 21	int count;  /* nesting count, see qspinlock.c */
 22};
 23
 24#ifndef arch_mcs_spin_lock_contended
 25/*
 26 * Using smp_load_acquire() provides a memory barrier that ensures
 27 * subsequent operations happen after the lock is acquired.
 28 */
 29#define arch_mcs_spin_lock_contended(l)					\
 30do {									\
 31	while (!(smp_load_acquire(l)))					\
 32		cpu_relax();						\
 33} while (0)
 34#endif
 35
 36#ifndef arch_mcs_spin_unlock_contended
 37/*
 38 * smp_store_release() provides a memory barrier to ensure all
 39 * operations in the critical section has been completed before
 40 * unlocking.
 41 */
 42#define arch_mcs_spin_unlock_contended(l)				\
 43	smp_store_release((l), 1)
 44#endif
 45
 46/*
 47 * Note: the smp_load_acquire/smp_store_release pair is not
 48 * sufficient to form a full memory barrier across
 49 * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
 50 * For applications that need a full barrier across multiple cpus
 51 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
 52 * used after mcs_lock.
 53 */
 54
 55/*
 56 * In order to acquire the lock, the caller should declare a local node and
 57 * pass a reference of the node to this function in addition to the lock.
 58 * If the lock has already been acquired, then this will proceed to spin
 59 * on this node->locked until the previous lock holder sets the node->locked
 60 * in mcs_spin_unlock().
 
 
 
 61 */
 62static inline
 63void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 64{
 65	struct mcs_spinlock *prev;
 66
 67	/* Init node */
 68	node->locked = 0;
 69	node->next   = NULL;
 70
 71	/*
 72	 * We rely on the full barrier with global transitivity implied by the
 73	 * below xchg() to order the initialization stores above against any
 74	 * observation of @node. And to provide the ACQUIRE ordering associated
 75	 * with a LOCK primitive.
 76	 */
 77	prev = xchg(lock, node);
 78	if (likely(prev == NULL)) {
 79		/*
 80		 * Lock acquired, don't need to set node->locked to 1. Threads
 81		 * only spin on its own node->locked value for lock acquisition.
 82		 * However, since this thread can immediately acquire the lock
 83		 * and does not proceed to spin on its own node->locked, this
 84		 * value won't be used. If a debug mode is needed to
 85		 * audit lock status, then set node->locked value here.
 86		 */
 87		return;
 88	}
 89	WRITE_ONCE(prev->next, node);
 90
 91	/* Wait until the lock holder passes the lock down. */
 92	arch_mcs_spin_lock_contended(&node->locked);
 93}
 94
 95/*
 96 * Releases the lock. The caller should pass in the corresponding node that
 97 * was used to acquire the lock.
 98 */
 99static inline
100void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
101{
102	struct mcs_spinlock *next = READ_ONCE(node->next);
103
104	if (likely(!next)) {
105		/*
106		 * Release the lock by setting it to NULL
107		 */
108		if (likely(cmpxchg_release(lock, node, NULL) == node))
109			return;
110		/* Wait until the next pointer is set */
111		while (!(next = READ_ONCE(node->next)))
112			cpu_relax();
113	}
114
115	/* Pass lock to next waiter. */
116	arch_mcs_spin_unlock_contended(&next->locked);
117}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
119#endif /* __LINUX_MCS_SPINLOCK_H */