Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1
  2#include <linux/percpu.h>
  3#include <linux/mutex.h>
  4#include <linux/sched.h>
  5#include "mcs_spinlock.h"
  6
  7#ifdef CONFIG_SMP
  8
  9/*
 10 * An MCS like lock especially tailored for optimistic spinning for sleeping
 11 * lock implementations (mutex, rwsem, etc).
 12 *
 13 * Using a single mcs node per CPU is safe because sleeping locks should not be
 14 * called from interrupt context and we have preemption disabled while
 15 * spinning.
 16 */
 17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
 18
 19/*
 20 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
 21 * Can return NULL in case we were the last queued and we updated @lock instead.
 22 */
 23static inline struct optimistic_spin_queue *
 24osq_wait_next(struct optimistic_spin_queue **lock,
 25	      struct optimistic_spin_queue *node,
 26	      struct optimistic_spin_queue *prev)
 27{
 28	struct optimistic_spin_queue *next = NULL;
 29
 30	for (;;) {
 31		if (*lock == node && cmpxchg(lock, node, prev) == node) {
 32			/*
 33			 * We were the last queued, we moved @lock back. @prev
 34			 * will now observe @lock and will complete its
 35			 * unlock()/unqueue().
 36			 */
 37			break;
 38		}
 39
 40		/*
 41		 * We must xchg() the @node->next value, because if we were to
 42		 * leave it in, a concurrent unlock()/unqueue() from
 43		 * @node->next might complete Step-A and think its @prev is
 44		 * still valid.
 45		 *
 46		 * If the concurrent unlock()/unqueue() wins the race, we'll
 47		 * wait for either @lock to point to us, through its Step-B, or
 48		 * wait for a new @node->next from its Step-C.
 49		 */
 50		if (node->next) {
 51			next = xchg(&node->next, NULL);
 52			if (next)
 53				break;
 54		}
 55
 56		arch_mutex_cpu_relax();
 57	}
 58
 59	return next;
 60}
 61
 62bool osq_lock(struct optimistic_spin_queue **lock)
 63{
 64	struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
 65	struct optimistic_spin_queue *prev, *next;
 66
 67	node->locked = 0;
 68	node->next = NULL;
 69
 70	node->prev = prev = xchg(lock, node);
 71	if (likely(prev == NULL))
 72		return true;
 73
 74	ACCESS_ONCE(prev->next) = node;
 75
 76	/*
 77	 * Normally @prev is untouchable after the above store; because at that
 78	 * moment unlock can proceed and wipe the node element from stack.
 79	 *
 80	 * However, since our nodes are static per-cpu storage, we're
 81	 * guaranteed their existence -- this allows us to apply
 82	 * cmpxchg in an attempt to undo our queueing.
 83	 */
 84
 85	while (!smp_load_acquire(&node->locked)) {
 86		/*
 87		 * If we need to reschedule bail... so we can block.
 88		 */
 89		if (need_resched())
 90			goto unqueue;
 91
 92		arch_mutex_cpu_relax();
 93	}
 94	return true;
 95
 96unqueue:
 97	/*
 98	 * Step - A  -- stabilize @prev
 99	 *
100	 * Undo our @prev->next assignment; this will make @prev's
101	 * unlock()/unqueue() wait for a next pointer since @lock points to us
102	 * (or later).
103	 */
104
105	for (;;) {
106		if (prev->next == node &&
107		    cmpxchg(&prev->next, node, NULL) == node)
108			break;
109
110		/*
111		 * We can only fail the cmpxchg() racing against an unlock(),
112		 * in which case we should observe @node->locked becomming
113		 * true.
114		 */
115		if (smp_load_acquire(&node->locked))
116			return true;
117
118		arch_mutex_cpu_relax();
119
120		/*
121		 * Or we race against a concurrent unqueue()'s step-B, in which
122		 * case its step-C will write us a new @node->prev pointer.
123		 */
124		prev = ACCESS_ONCE(node->prev);
125	}
126
127	/*
128	 * Step - B -- stabilize @next
129	 *
130	 * Similar to unlock(), wait for @node->next or move @lock from @node
131	 * back to @prev.
132	 */
133
134	next = osq_wait_next(lock, node, prev);
135	if (!next)
136		return false;
137
138	/*
139	 * Step - C -- unlink
140	 *
141	 * @prev is stable because its still waiting for a new @prev->next
142	 * pointer, @next is stable because our @node->next pointer is NULL and
143	 * it will wait in Step-A.
144	 */
145
146	ACCESS_ONCE(next->prev) = prev;
147	ACCESS_ONCE(prev->next) = next;
148
149	return false;
150}
151
152void osq_unlock(struct optimistic_spin_queue **lock)
153{
154	struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
155	struct optimistic_spin_queue *next;
156
157	/*
158	 * Fast path for the uncontended case.
159	 */
160	if (likely(cmpxchg(lock, node, NULL) == node))
161		return;
162
163	/*
164	 * Second most likely case.
165	 */
166	next = xchg(&node->next, NULL);
167	if (next) {
168		ACCESS_ONCE(next->locked) = 1;
169		return;
170	}
171
172	next = osq_wait_next(lock, node, NULL);
173	if (next)
174		ACCESS_ONCE(next->locked) = 1;
175}
176
177#endif
178