Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic waiting primitives.
  4 *
  5 * (C) 2004 Nadia Yvette Chambers, Oracle
  6 */
  7#include "sched.h"
  8
  9void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
 10{
 11	spin_lock_init(&wq_head->lock);
 12	lockdep_set_class_and_name(&wq_head->lock, key, name);
 13	INIT_LIST_HEAD(&wq_head->head);
 14}
 15
 16EXPORT_SYMBOL(__init_waitqueue_head);
 17
 18void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 19{
 20	unsigned long flags;
 21
 22	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
 23	spin_lock_irqsave(&wq_head->lock, flags);
 24	__add_wait_queue(wq_head, wq_entry);
 25	spin_unlock_irqrestore(&wq_head->lock, flags);
 26}
 27EXPORT_SYMBOL(add_wait_queue);
 28
 29void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 30{
 31	unsigned long flags;
 32
 33	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 34	spin_lock_irqsave(&wq_head->lock, flags);
 35	__add_wait_queue_entry_tail(wq_head, wq_entry);
 36	spin_unlock_irqrestore(&wq_head->lock, flags);
 37}
 38EXPORT_SYMBOL(add_wait_queue_exclusive);
 39
 40void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 41{
 42	unsigned long flags;
 43
 44	spin_lock_irqsave(&wq_head->lock, flags);
 45	__remove_wait_queue(wq_head, wq_entry);
 46	spin_unlock_irqrestore(&wq_head->lock, flags);
 47}
 48EXPORT_SYMBOL(remove_wait_queue);
 49
 50/*
 51 * Scan threshold to break wait queue walk.
 52 * This allows a waker to take a break from holding the
 53 * wait queue lock during the wait queue walk.
 54 */
 55#define WAITQUEUE_WALK_BREAK_CNT 64
 56
 57/*
 58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
 59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
 60 * number) then we wake all the non-exclusive tasks and one exclusive task.
 61 *
 62 * There are circumstances in which we can try to wake a task which has already
 63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
 64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
 65 */
 66static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
 67			int nr_exclusive, int wake_flags, void *key,
 68			wait_queue_entry_t *bookmark)
 69{
 70	wait_queue_entry_t *curr, *next;
 71	int cnt = 0;
 72
 73	lockdep_assert_held(&wq_head->lock);
 74
 75	if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
 76		curr = list_next_entry(bookmark, entry);
 77
 78		list_del(&bookmark->entry);
 79		bookmark->flags = 0;
 80	} else
 81		curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
 82
 83	if (&curr->entry == &wq_head->head)
 84		return nr_exclusive;
 85
 86	list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
 87		unsigned flags = curr->flags;
 88		int ret;
 89
 90		if (flags & WQ_FLAG_BOOKMARK)
 91			continue;
 92
 93		ret = curr->func(curr, mode, wake_flags, key);
 94		if (ret < 0)
 95			break;
 96		if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
 97			break;
 98
 99		if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
100				(&next->entry != &wq_head->head)) {
101			bookmark->flags = WQ_FLAG_BOOKMARK;
102			list_add_tail(&bookmark->entry, &next->entry);
103			break;
104		}
105	}
106
107	return nr_exclusive;
108}
109
110static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
111			int nr_exclusive, int wake_flags, void *key)
112{
113	unsigned long flags;
114	wait_queue_entry_t bookmark;
115
116	bookmark.flags = 0;
117	bookmark.private = NULL;
118	bookmark.func = NULL;
119	INIT_LIST_HEAD(&bookmark.entry);
120
121	do {
122		spin_lock_irqsave(&wq_head->lock, flags);
123		nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
124						wake_flags, key, &bookmark);
125		spin_unlock_irqrestore(&wq_head->lock, flags);
126	} while (bookmark.flags & WQ_FLAG_BOOKMARK);
127}
128
129/**
130 * __wake_up - wake up threads blocked on a waitqueue.
131 * @wq_head: the waitqueue
132 * @mode: which threads
133 * @nr_exclusive: how many wake-one or wake-many threads to wake up
134 * @key: is directly passed to the wakeup function
135 *
136 * If this function wakes up a task, it executes a full memory barrier before
137 * accessing the task state.
138 */
139void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
140			int nr_exclusive, void *key)
141{
142	__wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
143}
144EXPORT_SYMBOL(__wake_up);
145
146/*
147 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
148 */
149void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
150{
151	__wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
152}
153EXPORT_SYMBOL_GPL(__wake_up_locked);
154
155void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
156{
157	__wake_up_common(wq_head, mode, 1, 0, key, NULL);
158}
159EXPORT_SYMBOL_GPL(__wake_up_locked_key);
160
161void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
162		unsigned int mode, void *key, wait_queue_entry_t *bookmark)
163{
164	__wake_up_common(wq_head, mode, 1, 0, key, bookmark);
165}
166EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
167
168/**
169 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170 * @wq_head: the waitqueue
171 * @mode: which threads
172 * @nr_exclusive: how many wake-one or wake-many threads to wake up
173 * @key: opaque value to be passed to wakeup targets
174 *
175 * The sync wakeup differs that the waker knows that it will schedule
176 * away soon, so while the target thread will be woken up, it will not
177 * be migrated to another CPU - ie. the two threads are 'synchronized'
178 * with each other. This can prevent needless bouncing between CPUs.
179 *
180 * On UP it can prevent extra preemption.
181 *
182 * If this function wakes up a task, it executes a full memory barrier before
183 * accessing the task state.
184 */
185void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
186			int nr_exclusive, void *key)
187{
188	int wake_flags = 1; /* XXX WF_SYNC */
189
190	if (unlikely(!wq_head))
191		return;
192
193	if (unlikely(nr_exclusive != 1))
194		wake_flags = 0;
195
196	__wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
197}
198EXPORT_SYMBOL_GPL(__wake_up_sync_key);
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200/*
201 * __wake_up_sync - see __wake_up_sync_key()
202 */
203void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
204{
205	__wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
206}
207EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
208
209/*
210 * Note: we use "set_current_state()" _after_ the wait-queue add,
211 * because we need a memory barrier there on SMP, so that any
212 * wake-function that tests for the wait-queue being active
213 * will be guaranteed to see waitqueue addition _or_ subsequent
214 * tests in this thread will see the wakeup having taken place.
215 *
216 * The spin_unlock() itself is semi-permeable and only protects
217 * one way (it only protects stuff inside the critical region and
218 * stops them from bleeding out - it would still allow subsequent
219 * loads to move into the critical region).
220 */
221void
222prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
223{
224	unsigned long flags;
225
226	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
227	spin_lock_irqsave(&wq_head->lock, flags);
228	if (list_empty(&wq_entry->entry))
229		__add_wait_queue(wq_head, wq_entry);
230	set_current_state(state);
231	spin_unlock_irqrestore(&wq_head->lock, flags);
232}
233EXPORT_SYMBOL(prepare_to_wait);
234
235void
236prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
237{
238	unsigned long flags;
239
240	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
241	spin_lock_irqsave(&wq_head->lock, flags);
242	if (list_empty(&wq_entry->entry))
243		__add_wait_queue_entry_tail(wq_head, wq_entry);
244	set_current_state(state);
245	spin_unlock_irqrestore(&wq_head->lock, flags);
246}
247EXPORT_SYMBOL(prepare_to_wait_exclusive);
248
249void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
250{
251	wq_entry->flags = flags;
252	wq_entry->private = current;
253	wq_entry->func = autoremove_wake_function;
254	INIT_LIST_HEAD(&wq_entry->entry);
255}
256EXPORT_SYMBOL(init_wait_entry);
257
258long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
259{
260	unsigned long flags;
261	long ret = 0;
262
263	spin_lock_irqsave(&wq_head->lock, flags);
264	if (signal_pending_state(state, current)) {
265		/*
266		 * Exclusive waiter must not fail if it was selected by wakeup,
267		 * it should "consume" the condition we were waiting for.
268		 *
269		 * The caller will recheck the condition and return success if
270		 * we were already woken up, we can not miss the event because
271		 * wakeup locks/unlocks the same wq_head->lock.
272		 *
273		 * But we need to ensure that set-condition + wakeup after that
274		 * can't see us, it should wake up another exclusive waiter if
275		 * we fail.
276		 */
277		list_del_init(&wq_entry->entry);
278		ret = -ERESTARTSYS;
279	} else {
280		if (list_empty(&wq_entry->entry)) {
281			if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
282				__add_wait_queue_entry_tail(wq_head, wq_entry);
283			else
284				__add_wait_queue(wq_head, wq_entry);
285		}
286		set_current_state(state);
287	}
288	spin_unlock_irqrestore(&wq_head->lock, flags);
289
290	return ret;
291}
292EXPORT_SYMBOL(prepare_to_wait_event);
293
294/*
295 * Note! These two wait functions are entered with the
296 * wait-queue lock held (and interrupts off in the _irq
297 * case), so there is no race with testing the wakeup
298 * condition in the caller before they add the wait
299 * entry to the wake queue.
300 */
301int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
302{
303	if (likely(list_empty(&wait->entry)))
304		__add_wait_queue_entry_tail(wq, wait);
305
306	set_current_state(TASK_INTERRUPTIBLE);
307	if (signal_pending(current))
308		return -ERESTARTSYS;
309
310	spin_unlock(&wq->lock);
311	schedule();
312	spin_lock(&wq->lock);
313
314	return 0;
315}
316EXPORT_SYMBOL(do_wait_intr);
317
318int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
319{
320	if (likely(list_empty(&wait->entry)))
321		__add_wait_queue_entry_tail(wq, wait);
322
323	set_current_state(TASK_INTERRUPTIBLE);
324	if (signal_pending(current))
325		return -ERESTARTSYS;
326
327	spin_unlock_irq(&wq->lock);
328	schedule();
329	spin_lock_irq(&wq->lock);
330
331	return 0;
332}
333EXPORT_SYMBOL(do_wait_intr_irq);
334
335/**
336 * finish_wait - clean up after waiting in a queue
337 * @wq_head: waitqueue waited on
338 * @wq_entry: wait descriptor
339 *
340 * Sets current thread back to running state and removes
341 * the wait descriptor from the given waitqueue if still
342 * queued.
343 */
344void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
345{
346	unsigned long flags;
347
348	__set_current_state(TASK_RUNNING);
349	/*
350	 * We can check for list emptiness outside the lock
351	 * IFF:
352	 *  - we use the "careful" check that verifies both
353	 *    the next and prev pointers, so that there cannot
354	 *    be any half-pending updates in progress on other
355	 *    CPU's that we haven't seen yet (and that might
356	 *    still change the stack area.
357	 * and
358	 *  - all other users take the lock (ie we can only
359	 *    have _one_ other CPU that looks at or modifies
360	 *    the list).
361	 */
362	if (!list_empty_careful(&wq_entry->entry)) {
363		spin_lock_irqsave(&wq_head->lock, flags);
364		list_del_init(&wq_entry->entry);
365		spin_unlock_irqrestore(&wq_head->lock, flags);
366	}
367}
368EXPORT_SYMBOL(finish_wait);
369
370int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
371{
372	int ret = default_wake_function(wq_entry, mode, sync, key);
373
374	if (ret)
375		list_del_init(&wq_entry->entry);
376
377	return ret;
378}
379EXPORT_SYMBOL(autoremove_wake_function);
380
381static inline bool is_kthread_should_stop(void)
382{
383	return (current->flags & PF_KTHREAD) && kthread_should_stop();
384}
385
386/*
387 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
388 *
389 * add_wait_queue(&wq_head, &wait);
390 * for (;;) {
391 *     if (condition)
392 *         break;
393 *
394 *     // in wait_woken()			// in woken_wake_function()
395 *
396 *     p->state = mode;				wq_entry->flags |= WQ_FLAG_WOKEN;
397 *     smp_mb(); // A				try_to_wake_up():
398 *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))	   <full barrier>
399 *         schedule()				   if (p->state & mode)
400 *     p->state = TASK_RUNNING;			      p->state = TASK_RUNNING;
401 *     wq_entry->flags &= ~WQ_FLAG_WOKEN;	~~~~~~~~~~~~~~~~~~
402 *     smp_mb(); // B				condition = true;
403 * }						smp_mb(); // C
404 * remove_wait_queue(&wq_head, &wait);		wq_entry->flags |= WQ_FLAG_WOKEN;
405 */
406long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
407{
408	/*
409	 * The below executes an smp_mb(), which matches with the full barrier
410	 * executed by the try_to_wake_up() in woken_wake_function() such that
411	 * either we see the store to wq_entry->flags in woken_wake_function()
412	 * or woken_wake_function() sees our store to current->state.
413	 */
414	set_current_state(mode); /* A */
415	if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
416		timeout = schedule_timeout(timeout);
417	__set_current_state(TASK_RUNNING);
418
419	/*
420	 * The below executes an smp_mb(), which matches with the smp_mb() (C)
421	 * in woken_wake_function() such that either we see the wait condition
422	 * being true or the store to wq_entry->flags in woken_wake_function()
423	 * follows ours in the coherence order.
424	 */
425	smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
426
427	return timeout;
428}
429EXPORT_SYMBOL(wait_woken);
430
431int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
432{
433	/* Pairs with the smp_store_mb() in wait_woken(). */
434	smp_mb(); /* C */
435	wq_entry->flags |= WQ_FLAG_WOKEN;
436
437	return default_wake_function(wq_entry, mode, sync, key);
438}
439EXPORT_SYMBOL(woken_wake_function);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic waiting primitives.
  4 *
  5 * (C) 2004 Nadia Yvette Chambers, Oracle
  6 */
  7#include "sched.h"
  8
  9void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
 10{
 11	spin_lock_init(&wq_head->lock);
 12	lockdep_set_class_and_name(&wq_head->lock, key, name);
 13	INIT_LIST_HEAD(&wq_head->head);
 14}
 15
 16EXPORT_SYMBOL(__init_waitqueue_head);
 17
 18void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 19{
 20	unsigned long flags;
 21
 22	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
 23	spin_lock_irqsave(&wq_head->lock, flags);
 24	__add_wait_queue(wq_head, wq_entry);
 25	spin_unlock_irqrestore(&wq_head->lock, flags);
 26}
 27EXPORT_SYMBOL(add_wait_queue);
 28
 29void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 30{
 31	unsigned long flags;
 32
 33	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 34	spin_lock_irqsave(&wq_head->lock, flags);
 35	__add_wait_queue_entry_tail(wq_head, wq_entry);
 36	spin_unlock_irqrestore(&wq_head->lock, flags);
 37}
 38EXPORT_SYMBOL(add_wait_queue_exclusive);
 39
 40void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 41{
 42	unsigned long flags;
 43
 44	spin_lock_irqsave(&wq_head->lock, flags);
 45	__remove_wait_queue(wq_head, wq_entry);
 46	spin_unlock_irqrestore(&wq_head->lock, flags);
 47}
 48EXPORT_SYMBOL(remove_wait_queue);
 49
 50/*
 51 * Scan threshold to break wait queue walk.
 52 * This allows a waker to take a break from holding the
 53 * wait queue lock during the wait queue walk.
 54 */
 55#define WAITQUEUE_WALK_BREAK_CNT 64
 56
 57/*
 58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
 59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
 60 * number) then we wake all the non-exclusive tasks and one exclusive task.
 61 *
 62 * There are circumstances in which we can try to wake a task which has already
 63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
 64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
 65 */
 66static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
 67			int nr_exclusive, int wake_flags, void *key,
 68			wait_queue_entry_t *bookmark)
 69{
 70	wait_queue_entry_t *curr, *next;
 71	int cnt = 0;
 72
 73	lockdep_assert_held(&wq_head->lock);
 74
 75	if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
 76		curr = list_next_entry(bookmark, entry);
 77
 78		list_del(&bookmark->entry);
 79		bookmark->flags = 0;
 80	} else
 81		curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
 82
 83	if (&curr->entry == &wq_head->head)
 84		return nr_exclusive;
 85
 86	list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
 87		unsigned flags = curr->flags;
 88		int ret;
 89
 90		if (flags & WQ_FLAG_BOOKMARK)
 91			continue;
 92
 93		ret = curr->func(curr, mode, wake_flags, key);
 94		if (ret < 0)
 95			break;
 96		if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
 97			break;
 98
 99		if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
100				(&next->entry != &wq_head->head)) {
101			bookmark->flags = WQ_FLAG_BOOKMARK;
102			list_add_tail(&bookmark->entry, &next->entry);
103			break;
104		}
105	}
106
107	return nr_exclusive;
108}
109
110static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
111			int nr_exclusive, int wake_flags, void *key)
112{
113	unsigned long flags;
114	wait_queue_entry_t bookmark;
115
116	bookmark.flags = 0;
117	bookmark.private = NULL;
118	bookmark.func = NULL;
119	INIT_LIST_HEAD(&bookmark.entry);
120
121	do {
122		spin_lock_irqsave(&wq_head->lock, flags);
123		nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
124						wake_flags, key, &bookmark);
125		spin_unlock_irqrestore(&wq_head->lock, flags);
126	} while (bookmark.flags & WQ_FLAG_BOOKMARK);
127}
128
129/**
130 * __wake_up - wake up threads blocked on a waitqueue.
131 * @wq_head: the waitqueue
132 * @mode: which threads
133 * @nr_exclusive: how many wake-one or wake-many threads to wake up
134 * @key: is directly passed to the wakeup function
135 *
136 * If this function wakes up a task, it executes a full memory barrier before
137 * accessing the task state.
138 */
139void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
140			int nr_exclusive, void *key)
141{
142	__wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
143}
144EXPORT_SYMBOL(__wake_up);
145
146/*
147 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
148 */
149void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
150{
151	__wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
152}
153EXPORT_SYMBOL_GPL(__wake_up_locked);
154
155void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
156{
157	__wake_up_common(wq_head, mode, 1, 0, key, NULL);
158}
159EXPORT_SYMBOL_GPL(__wake_up_locked_key);
160
161void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
162		unsigned int mode, void *key, wait_queue_entry_t *bookmark)
163{
164	__wake_up_common(wq_head, mode, 1, 0, key, bookmark);
165}
166EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
167
168/**
169 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170 * @wq_head: the waitqueue
171 * @mode: which threads
 
172 * @key: opaque value to be passed to wakeup targets
173 *
174 * The sync wakeup differs that the waker knows that it will schedule
175 * away soon, so while the target thread will be woken up, it will not
176 * be migrated to another CPU - ie. the two threads are 'synchronized'
177 * with each other. This can prevent needless bouncing between CPUs.
178 *
179 * On UP it can prevent extra preemption.
180 *
181 * If this function wakes up a task, it executes a full memory barrier before
182 * accessing the task state.
183 */
184void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
185			void *key)
186{
 
 
187	if (unlikely(!wq_head))
188		return;
189
190	__wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
 
 
 
191}
192EXPORT_SYMBOL_GPL(__wake_up_sync_key);
193
194/**
195 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
196 * @wq_head: the waitqueue
197 * @mode: which threads
198 * @key: opaque value to be passed to wakeup targets
199 *
200 * The sync wakeup differs in that the waker knows that it will schedule
201 * away soon, so while the target thread will be woken up, it will not
202 * be migrated to another CPU - ie. the two threads are 'synchronized'
203 * with each other. This can prevent needless bouncing between CPUs.
204 *
205 * On UP it can prevent extra preemption.
206 *
207 * If this function wakes up a task, it executes a full memory barrier before
208 * accessing the task state.
209 */
210void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
211			       unsigned int mode, void *key)
212{
213        __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
214}
215EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
216
217/*
218 * __wake_up_sync - see __wake_up_sync_key()
219 */
220void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
221{
222	__wake_up_sync_key(wq_head, mode, NULL);
223}
224EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
225
226/*
227 * Note: we use "set_current_state()" _after_ the wait-queue add,
228 * because we need a memory barrier there on SMP, so that any
229 * wake-function that tests for the wait-queue being active
230 * will be guaranteed to see waitqueue addition _or_ subsequent
231 * tests in this thread will see the wakeup having taken place.
232 *
233 * The spin_unlock() itself is semi-permeable and only protects
234 * one way (it only protects stuff inside the critical region and
235 * stops them from bleeding out - it would still allow subsequent
236 * loads to move into the critical region).
237 */
238void
239prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
240{
241	unsigned long flags;
242
243	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
244	spin_lock_irqsave(&wq_head->lock, flags);
245	if (list_empty(&wq_entry->entry))
246		__add_wait_queue(wq_head, wq_entry);
247	set_current_state(state);
248	spin_unlock_irqrestore(&wq_head->lock, flags);
249}
250EXPORT_SYMBOL(prepare_to_wait);
251
252void
253prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
254{
255	unsigned long flags;
256
257	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
258	spin_lock_irqsave(&wq_head->lock, flags);
259	if (list_empty(&wq_entry->entry))
260		__add_wait_queue_entry_tail(wq_head, wq_entry);
261	set_current_state(state);
262	spin_unlock_irqrestore(&wq_head->lock, flags);
263}
264EXPORT_SYMBOL(prepare_to_wait_exclusive);
265
266void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
267{
268	wq_entry->flags = flags;
269	wq_entry->private = current;
270	wq_entry->func = autoremove_wake_function;
271	INIT_LIST_HEAD(&wq_entry->entry);
272}
273EXPORT_SYMBOL(init_wait_entry);
274
275long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
276{
277	unsigned long flags;
278	long ret = 0;
279
280	spin_lock_irqsave(&wq_head->lock, flags);
281	if (signal_pending_state(state, current)) {
282		/*
283		 * Exclusive waiter must not fail if it was selected by wakeup,
284		 * it should "consume" the condition we were waiting for.
285		 *
286		 * The caller will recheck the condition and return success if
287		 * we were already woken up, we can not miss the event because
288		 * wakeup locks/unlocks the same wq_head->lock.
289		 *
290		 * But we need to ensure that set-condition + wakeup after that
291		 * can't see us, it should wake up another exclusive waiter if
292		 * we fail.
293		 */
294		list_del_init(&wq_entry->entry);
295		ret = -ERESTARTSYS;
296	} else {
297		if (list_empty(&wq_entry->entry)) {
298			if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
299				__add_wait_queue_entry_tail(wq_head, wq_entry);
300			else
301				__add_wait_queue(wq_head, wq_entry);
302		}
303		set_current_state(state);
304	}
305	spin_unlock_irqrestore(&wq_head->lock, flags);
306
307	return ret;
308}
309EXPORT_SYMBOL(prepare_to_wait_event);
310
311/*
312 * Note! These two wait functions are entered with the
313 * wait-queue lock held (and interrupts off in the _irq
314 * case), so there is no race with testing the wakeup
315 * condition in the caller before they add the wait
316 * entry to the wake queue.
317 */
318int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
319{
320	if (likely(list_empty(&wait->entry)))
321		__add_wait_queue_entry_tail(wq, wait);
322
323	set_current_state(TASK_INTERRUPTIBLE);
324	if (signal_pending(current))
325		return -ERESTARTSYS;
326
327	spin_unlock(&wq->lock);
328	schedule();
329	spin_lock(&wq->lock);
330
331	return 0;
332}
333EXPORT_SYMBOL(do_wait_intr);
334
335int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
336{
337	if (likely(list_empty(&wait->entry)))
338		__add_wait_queue_entry_tail(wq, wait);
339
340	set_current_state(TASK_INTERRUPTIBLE);
341	if (signal_pending(current))
342		return -ERESTARTSYS;
343
344	spin_unlock_irq(&wq->lock);
345	schedule();
346	spin_lock_irq(&wq->lock);
347
348	return 0;
349}
350EXPORT_SYMBOL(do_wait_intr_irq);
351
352/**
353 * finish_wait - clean up after waiting in a queue
354 * @wq_head: waitqueue waited on
355 * @wq_entry: wait descriptor
356 *
357 * Sets current thread back to running state and removes
358 * the wait descriptor from the given waitqueue if still
359 * queued.
360 */
361void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
362{
363	unsigned long flags;
364
365	__set_current_state(TASK_RUNNING);
366	/*
367	 * We can check for list emptiness outside the lock
368	 * IFF:
369	 *  - we use the "careful" check that verifies both
370	 *    the next and prev pointers, so that there cannot
371	 *    be any half-pending updates in progress on other
372	 *    CPU's that we haven't seen yet (and that might
373	 *    still change the stack area.
374	 * and
375	 *  - all other users take the lock (ie we can only
376	 *    have _one_ other CPU that looks at or modifies
377	 *    the list).
378	 */
379	if (!list_empty_careful(&wq_entry->entry)) {
380		spin_lock_irqsave(&wq_head->lock, flags);
381		list_del_init(&wq_entry->entry);
382		spin_unlock_irqrestore(&wq_head->lock, flags);
383	}
384}
385EXPORT_SYMBOL(finish_wait);
386
387int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
388{
389	int ret = default_wake_function(wq_entry, mode, sync, key);
390
391	if (ret)
392		list_del_init_careful(&wq_entry->entry);
393
394	return ret;
395}
396EXPORT_SYMBOL(autoremove_wake_function);
397
398static inline bool is_kthread_should_stop(void)
399{
400	return (current->flags & PF_KTHREAD) && kthread_should_stop();
401}
402
403/*
404 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
405 *
406 * add_wait_queue(&wq_head, &wait);
407 * for (;;) {
408 *     if (condition)
409 *         break;
410 *
411 *     // in wait_woken()			// in woken_wake_function()
412 *
413 *     p->state = mode;				wq_entry->flags |= WQ_FLAG_WOKEN;
414 *     smp_mb(); // A				try_to_wake_up():
415 *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))	   <full barrier>
416 *         schedule()				   if (p->state & mode)
417 *     p->state = TASK_RUNNING;			      p->state = TASK_RUNNING;
418 *     wq_entry->flags &= ~WQ_FLAG_WOKEN;	~~~~~~~~~~~~~~~~~~
419 *     smp_mb(); // B				condition = true;
420 * }						smp_mb(); // C
421 * remove_wait_queue(&wq_head, &wait);		wq_entry->flags |= WQ_FLAG_WOKEN;
422 */
423long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
424{
425	/*
426	 * The below executes an smp_mb(), which matches with the full barrier
427	 * executed by the try_to_wake_up() in woken_wake_function() such that
428	 * either we see the store to wq_entry->flags in woken_wake_function()
429	 * or woken_wake_function() sees our store to current->state.
430	 */
431	set_current_state(mode); /* A */
432	if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
433		timeout = schedule_timeout(timeout);
434	__set_current_state(TASK_RUNNING);
435
436	/*
437	 * The below executes an smp_mb(), which matches with the smp_mb() (C)
438	 * in woken_wake_function() such that either we see the wait condition
439	 * being true or the store to wq_entry->flags in woken_wake_function()
440	 * follows ours in the coherence order.
441	 */
442	smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
443
444	return timeout;
445}
446EXPORT_SYMBOL(wait_woken);
447
448int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
449{
450	/* Pairs with the smp_store_mb() in wait_woken(). */
451	smp_mb(); /* C */
452	wq_entry->flags |= WQ_FLAG_WOKEN;
453
454	return default_wake_function(wq_entry, mode, sync, key);
455}
456EXPORT_SYMBOL(woken_wake_function);