Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Generic waiting primitives.
  4 *
  5 * (C) 2004 Nadia Yvette Chambers, Oracle
  6 */
  7
  8void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
  9{
 10	spin_lock_init(&wq_head->lock);
 11	lockdep_set_class_and_name(&wq_head->lock, key, name);
 12	INIT_LIST_HEAD(&wq_head->head);
 
 
 
 
 
 
 
 13}
 14
 15EXPORT_SYMBOL(__init_waitqueue_head);
 16
 17void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 18{
 19	unsigned long flags;
 20
 21	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
 22	spin_lock_irqsave(&wq_head->lock, flags);
 23	__add_wait_queue(wq_head, wq_entry);
 24	spin_unlock_irqrestore(&wq_head->lock, flags);
 25}
 26EXPORT_SYMBOL(add_wait_queue);
 27
 28void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 29{
 30	unsigned long flags;
 31
 32	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 33	spin_lock_irqsave(&wq_head->lock, flags);
 34	__add_wait_queue_entry_tail(wq_head, wq_entry);
 35	spin_unlock_irqrestore(&wq_head->lock, flags);
 36}
 37EXPORT_SYMBOL(add_wait_queue_exclusive);
 38
 39void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 40{
 41	unsigned long flags;
 42
 43	wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
 44	spin_lock_irqsave(&wq_head->lock, flags);
 45	__add_wait_queue(wq_head, wq_entry);
 46	spin_unlock_irqrestore(&wq_head->lock, flags);
 47}
 48EXPORT_SYMBOL_GPL(add_wait_queue_priority);
 49
 50void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 51{
 52	unsigned long flags;
 53
 54	spin_lock_irqsave(&wq_head->lock, flags);
 55	__remove_wait_queue(wq_head, wq_entry);
 56	spin_unlock_irqrestore(&wq_head->lock, flags);
 57}
 58EXPORT_SYMBOL(remove_wait_queue);
 59
 60/*
 61 * Scan threshold to break wait queue walk.
 62 * This allows a waker to take a break from holding the
 63 * wait queue lock during the wait queue walk.
 64 */
 65#define WAITQUEUE_WALK_BREAK_CNT 64
 66
 67/*
 68 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
 69 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
 70 * number) then we wake that number of exclusive tasks, and potentially all
 71 * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
 72 * the list and any non-exclusive tasks will be woken first. A priority task
 73 * may be at the head of the list, and can consume the event without any other
 74 * tasks being woken.
 75 *
 76 * There are circumstances in which we can try to wake a task which has already
 77 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
 78 * zero in this (rare) case, and we handle it by continuing to scan the queue.
 79 */
 80static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
 81			int nr_exclusive, int wake_flags, void *key,
 82			wait_queue_entry_t *bookmark)
 83{
 84	wait_queue_entry_t *curr, *next;
 85	int cnt = 0;
 86
 87	lockdep_assert_held(&wq_head->lock);
 88
 89	if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
 90		curr = list_next_entry(bookmark, entry);
 91
 92		list_del(&bookmark->entry);
 93		bookmark->flags = 0;
 94	} else
 95		curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
 96
 97	if (&curr->entry == &wq_head->head)
 98		return nr_exclusive;
 99
100	list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
101		unsigned flags = curr->flags;
102		int ret;
103
104		if (flags & WQ_FLAG_BOOKMARK)
105			continue;
106
107		ret = curr->func(curr, mode, wake_flags, key);
108		if (ret < 0)
109			break;
110		if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
111			break;
112
113		if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
114				(&next->entry != &wq_head->head)) {
115			bookmark->flags = WQ_FLAG_BOOKMARK;
116			list_add_tail(&bookmark->entry, &next->entry);
117			break;
118		}
119	}
120
121	return nr_exclusive;
122}
123
124static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
125			int nr_exclusive, int wake_flags, void *key)
126{
127	unsigned long flags;
128	wait_queue_entry_t bookmark;
129	int remaining = nr_exclusive;
130
131	bookmark.flags = 0;
132	bookmark.private = NULL;
133	bookmark.func = NULL;
134	INIT_LIST_HEAD(&bookmark.entry);
135
136	do {
137		spin_lock_irqsave(&wq_head->lock, flags);
138		remaining = __wake_up_common(wq_head, mode, remaining,
139						wake_flags, key, &bookmark);
140		spin_unlock_irqrestore(&wq_head->lock, flags);
141	} while (bookmark.flags & WQ_FLAG_BOOKMARK);
142
143	return nr_exclusive - remaining;
144}
145
146/**
147 * __wake_up - wake up threads blocked on a waitqueue.
148 * @wq_head: the waitqueue
149 * @mode: which threads
150 * @nr_exclusive: how many wake-one or wake-many threads to wake up
151 * @key: is directly passed to the wakeup function
152 *
153 * If this function wakes up a task, it executes a full memory barrier
154 * before accessing the task state.  Returns the number of exclusive
155 * tasks that were awaken.
156 */
157int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
158	      int nr_exclusive, void *key)
159{
160	return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
 
 
 
 
161}
162EXPORT_SYMBOL(__wake_up);
163
164/*
165 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
166 */
167void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
168{
169	__wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
170}
171EXPORT_SYMBOL_GPL(__wake_up_locked);
172
173void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
174{
175	__wake_up_common(wq_head, mode, 1, 0, key, NULL);
176}
177EXPORT_SYMBOL_GPL(__wake_up_locked_key);
178
179void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
180		unsigned int mode, void *key, wait_queue_entry_t *bookmark)
181{
182	__wake_up_common(wq_head, mode, 1, 0, key, bookmark);
183}
184EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
185
186/**
187 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
188 * @wq_head: the waitqueue
189 * @mode: which threads
 
190 * @key: opaque value to be passed to wakeup targets
191 *
192 * The sync wakeup differs that the waker knows that it will schedule
193 * away soon, so while the target thread will be woken up, it will not
194 * be migrated to another CPU - ie. the two threads are 'synchronized'
195 * with each other. This can prevent needless bouncing between CPUs.
196 *
197 * On UP it can prevent extra preemption.
198 *
199 * If this function wakes up a task, it executes a full memory barrier before
200 * accessing the task state.
201 */
202void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
203			void *key)
204{
205	if (unlikely(!wq_head))
 
 
 
206		return;
207
208	__wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
209}
210EXPORT_SYMBOL_GPL(__wake_up_sync_key);
211
212/**
213 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
214 * @wq_head: the waitqueue
215 * @mode: which threads
216 * @key: opaque value to be passed to wakeup targets
217 *
218 * The sync wakeup differs in that the waker knows that it will schedule
219 * away soon, so while the target thread will be woken up, it will not
220 * be migrated to another CPU - ie. the two threads are 'synchronized'
221 * with each other. This can prevent needless bouncing between CPUs.
222 *
223 * On UP it can prevent extra preemption.
224 *
225 * If this function wakes up a task, it executes a full memory barrier before
226 * accessing the task state.
227 */
228void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
229			       unsigned int mode, void *key)
230{
231        __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
232}
233EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
234
235/*
236 * __wake_up_sync - see __wake_up_sync_key()
237 */
238void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
239{
240	__wake_up_sync_key(wq_head, mode, NULL);
241}
242EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
243
244void __wake_up_pollfree(struct wait_queue_head *wq_head)
245{
246	__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
247	/* POLLFREE must have cleared the queue. */
248	WARN_ON_ONCE(waitqueue_active(wq_head));
249}
250
251/*
252 * Note: we use "set_current_state()" _after_ the wait-queue add,
253 * because we need a memory barrier there on SMP, so that any
254 * wake-function that tests for the wait-queue being active
255 * will be guaranteed to see waitqueue addition _or_ subsequent
256 * tests in this thread will see the wakeup having taken place.
257 *
258 * The spin_unlock() itself is semi-permeable and only protects
259 * one way (it only protects stuff inside the critical region and
260 * stops them from bleeding out - it would still allow subsequent
261 * loads to move into the critical region).
262 */
263void
264prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
265{
266	unsigned long flags;
267
268	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
269	spin_lock_irqsave(&wq_head->lock, flags);
270	if (list_empty(&wq_entry->entry))
271		__add_wait_queue(wq_head, wq_entry);
272	set_current_state(state);
273	spin_unlock_irqrestore(&wq_head->lock, flags);
274}
275EXPORT_SYMBOL(prepare_to_wait);
276
277/* Returns true if we are the first waiter in the queue, false otherwise. */
278bool
279prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
280{
281	unsigned long flags;
282	bool was_empty = false;
283
284	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
285	spin_lock_irqsave(&wq_head->lock, flags);
286	if (list_empty(&wq_entry->entry)) {
287		was_empty = list_empty(&wq_head->head);
288		__add_wait_queue_entry_tail(wq_head, wq_entry);
289	}
290	set_current_state(state);
291	spin_unlock_irqrestore(&wq_head->lock, flags);
292	return was_empty;
293}
294EXPORT_SYMBOL(prepare_to_wait_exclusive);
295
296void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
297{
298	wq_entry->flags = flags;
299	wq_entry->private = current;
300	wq_entry->func = autoremove_wake_function;
301	INIT_LIST_HEAD(&wq_entry->entry);
302}
303EXPORT_SYMBOL(init_wait_entry);
304
305long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
306{
307	unsigned long flags;
308	long ret = 0;
309
310	spin_lock_irqsave(&wq_head->lock, flags);
311	if (signal_pending_state(state, current)) {
312		/*
313		 * Exclusive waiter must not fail if it was selected by wakeup,
314		 * it should "consume" the condition we were waiting for.
315		 *
316		 * The caller will recheck the condition and return success if
317		 * we were already woken up, we can not miss the event because
318		 * wakeup locks/unlocks the same wq_head->lock.
319		 *
320		 * But we need to ensure that set-condition + wakeup after that
321		 * can't see us, it should wake up another exclusive waiter if
322		 * we fail.
323		 */
324		list_del_init(&wq_entry->entry);
325		ret = -ERESTARTSYS;
326	} else {
327		if (list_empty(&wq_entry->entry)) {
328			if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
329				__add_wait_queue_entry_tail(wq_head, wq_entry);
330			else
331				__add_wait_queue(wq_head, wq_entry);
332		}
333		set_current_state(state);
334	}
335	spin_unlock_irqrestore(&wq_head->lock, flags);
336
337	return ret;
338}
339EXPORT_SYMBOL(prepare_to_wait_event);
340
341/*
342 * Note! These two wait functions are entered with the
343 * wait-queue lock held (and interrupts off in the _irq
344 * case), so there is no race with testing the wakeup
345 * condition in the caller before they add the wait
346 * entry to the wake queue.
347 */
348int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
349{
350	if (likely(list_empty(&wait->entry)))
351		__add_wait_queue_entry_tail(wq, wait);
352
353	set_current_state(TASK_INTERRUPTIBLE);
354	if (signal_pending(current))
355		return -ERESTARTSYS;
356
357	spin_unlock(&wq->lock);
358	schedule();
359	spin_lock(&wq->lock);
360
361	return 0;
362}
363EXPORT_SYMBOL(do_wait_intr);
364
365int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
366{
367	if (likely(list_empty(&wait->entry)))
368		__add_wait_queue_entry_tail(wq, wait);
369
370	set_current_state(TASK_INTERRUPTIBLE);
371	if (signal_pending(current))
372		return -ERESTARTSYS;
373
374	spin_unlock_irq(&wq->lock);
375	schedule();
376	spin_lock_irq(&wq->lock);
 
 
 
 
 
 
377
378	return 0;
379}
380EXPORT_SYMBOL(do_wait_intr_irq);
381
382/**
383 * finish_wait - clean up after waiting in a queue
384 * @wq_head: waitqueue waited on
385 * @wq_entry: wait descriptor
386 *
387 * Sets current thread back to running state and removes
388 * the wait descriptor from the given waitqueue if still
389 * queued.
390 */
391void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
392{
393	unsigned long flags;
394
395	__set_current_state(TASK_RUNNING);
396	/*
397	 * We can check for list emptiness outside the lock
398	 * IFF:
399	 *  - we use the "careful" check that verifies both
400	 *    the next and prev pointers, so that there cannot
401	 *    be any half-pending updates in progress on other
402	 *    CPU's that we haven't seen yet (and that might
403	 *    still change the stack area.
404	 * and
405	 *  - all other users take the lock (ie we can only
406	 *    have _one_ other CPU that looks at or modifies
407	 *    the list).
408	 */
409	if (!list_empty_careful(&wq_entry->entry)) {
410		spin_lock_irqsave(&wq_head->lock, flags);
411		list_del_init(&wq_entry->entry);
412		spin_unlock_irqrestore(&wq_head->lock, flags);
413	}
414}
415EXPORT_SYMBOL(finish_wait);
416
417int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418{
419	int ret = default_wake_function(wq_entry, mode, sync, key);
420
421	if (ret)
422		list_del_init_careful(&wq_entry->entry);
 
 
 
 
 
 
 
 
 
 
 
423
 
 
424	return ret;
425}
426EXPORT_SYMBOL(autoremove_wake_function);
427
428static inline bool is_kthread_should_stop(void)
429{
430	return (current->flags & PF_KTHREAD) && kthread_should_stop();
431}
432
433/*
434 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
435 *
436 * add_wait_queue(&wq_head, &wait);
437 * for (;;) {
438 *     if (condition)
439 *         break;
440 *
441 *     // in wait_woken()			// in woken_wake_function()
 
 
 
 
 
 
 
 
 
442 *
443 *     p->state = mode;				wq_entry->flags |= WQ_FLAG_WOKEN;
444 *     smp_mb(); // A				try_to_wake_up():
445 *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))	   <full barrier>
446 *         schedule()				   if (p->state & mode)
447 *     p->state = TASK_RUNNING;			      p->state = TASK_RUNNING;
448 *     wq_entry->flags &= ~WQ_FLAG_WOKEN;	~~~~~~~~~~~~~~~~~~
449 *     smp_mb(); // B				condition = true;
450 * }						smp_mb(); // C
451 * remove_wait_queue(&wq_head, &wait);		wq_entry->flags |= WQ_FLAG_WOKEN;
452 */
453long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
454{
 
455	/*
456	 * The below executes an smp_mb(), which matches with the full barrier
457	 * executed by the try_to_wake_up() in woken_wake_function() such that
458	 * either we see the store to wq_entry->flags in woken_wake_function()
459	 * or woken_wake_function() sees our store to current->state.
460	 */
461	set_current_state(mode); /* A */
462	if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
463		timeout = schedule_timeout(timeout);
464	__set_current_state(TASK_RUNNING);
465
466	/*
467	 * The below executes an smp_mb(), which matches with the smp_mb() (C)
468	 * in woken_wake_function() such that either we see the wait condition
469	 * being true or the store to wq_entry->flags in woken_wake_function()
470	 * follows ours in the coherence order.
471	 */
472	smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
473
474	return timeout;
475}
476EXPORT_SYMBOL(wait_woken);
477
478int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
479{
480	/* Pairs with the smp_store_mb() in wait_woken(). */
481	smp_mb(); /* C */
482	wq_entry->flags |= WQ_FLAG_WOKEN;
 
 
 
 
 
 
483
484	return default_wake_function(wq_entry, mode, sync, key);
485}
486EXPORT_SYMBOL(woken_wake_function);
v4.6
 
  1/*
  2 * Generic waiting primitives.
  3 *
  4 * (C) 2004 Nadia Yvette Chambers, Oracle
  5 */
  6#include <linux/init.h>
  7#include <linux/export.h>
  8#include <linux/sched.h>
  9#include <linux/mm.h>
 10#include <linux/wait.h>
 11#include <linux/hash.h>
 12#include <linux/kthread.h>
 13
 14void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
 15{
 16	spin_lock_init(&q->lock);
 17	lockdep_set_class_and_name(&q->lock, key, name);
 18	INIT_LIST_HEAD(&q->task_list);
 19}
 20
 21EXPORT_SYMBOL(__init_waitqueue_head);
 22
 23void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
 24{
 25	unsigned long flags;
 26
 27	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
 28	spin_lock_irqsave(&q->lock, flags);
 29	__add_wait_queue(q, wait);
 30	spin_unlock_irqrestore(&q->lock, flags);
 31}
 32EXPORT_SYMBOL(add_wait_queue);
 33
 34void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
 35{
 36	unsigned long flags;
 37
 38	wait->flags |= WQ_FLAG_EXCLUSIVE;
 39	spin_lock_irqsave(&q->lock, flags);
 40	__add_wait_queue_tail(q, wait);
 41	spin_unlock_irqrestore(&q->lock, flags);
 42}
 43EXPORT_SYMBOL(add_wait_queue_exclusive);
 44
 45void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
 
 
 
 
 
 
 
 
 
 
 
 46{
 47	unsigned long flags;
 48
 49	spin_lock_irqsave(&q->lock, flags);
 50	__remove_wait_queue(q, wait);
 51	spin_unlock_irqrestore(&q->lock, flags);
 52}
 53EXPORT_SYMBOL(remove_wait_queue);
 54
 
 
 
 
 
 
 55
 56/*
 57 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
 58 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
 59 * number) then we wake all the non-exclusive tasks and one exclusive task.
 
 
 
 
 60 *
 61 * There are circumstances in which we can try to wake a task which has already
 62 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
 63 * zero in this (rare) case, and we handle it by continuing to scan the queue.
 64 */
 65static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
 66			int nr_exclusive, int wake_flags, void *key)
 
 67{
 68	wait_queue_t *curr, *next;
 
 
 
 
 
 
 69
 70	list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
 
 
 
 
 
 
 
 
 71		unsigned flags = curr->flags;
 
 
 
 
 
 
 
 
 
 
 72
 73		if (curr->func(curr, mode, wake_flags, key) &&
 74				(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
 
 
 75			break;
 
 76	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77}
 78
 79/**
 80 * __wake_up - wake up threads blocked on a waitqueue.
 81 * @q: the waitqueue
 82 * @mode: which threads
 83 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 84 * @key: is directly passed to the wakeup function
 85 *
 86 * It may be assumed that this function implies a write memory barrier before
 87 * changing the task state if and only if any tasks are woken up.
 
 88 */
 89void __wake_up(wait_queue_head_t *q, unsigned int mode,
 90			int nr_exclusive, void *key)
 91{
 92	unsigned long flags;
 93
 94	spin_lock_irqsave(&q->lock, flags);
 95	__wake_up_common(q, mode, nr_exclusive, 0, key);
 96	spin_unlock_irqrestore(&q->lock, flags);
 97}
 98EXPORT_SYMBOL(__wake_up);
 99
100/*
101 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
102 */
103void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
104{
105	__wake_up_common(q, mode, nr, 0, NULL);
106}
107EXPORT_SYMBOL_GPL(__wake_up_locked);
108
109void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
110{
111	__wake_up_common(q, mode, 1, 0, key);
112}
113EXPORT_SYMBOL_GPL(__wake_up_locked_key);
114
 
 
 
 
 
 
 
115/**
116 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
117 * @q: the waitqueue
118 * @mode: which threads
119 * @nr_exclusive: how many wake-one or wake-many threads to wake up
120 * @key: opaque value to be passed to wakeup targets
121 *
122 * The sync wakeup differs that the waker knows that it will schedule
123 * away soon, so while the target thread will be woken up, it will not
124 * be migrated to another CPU - ie. the two threads are 'synchronized'
125 * with each other. This can prevent needless bouncing between CPUs.
126 *
127 * On UP it can prevent extra preemption.
128 *
129 * It may be assumed that this function implies a write memory barrier before
130 * changing the task state if and only if any tasks are woken up.
131 */
132void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
133			int nr_exclusive, void *key)
134{
135	unsigned long flags;
136	int wake_flags = 1; /* XXX WF_SYNC */
137
138	if (unlikely(!q))
139		return;
140
141	if (unlikely(nr_exclusive != 1))
142		wake_flags = 0;
 
143
144	spin_lock_irqsave(&q->lock, flags);
145	__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
146	spin_unlock_irqrestore(&q->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147}
148EXPORT_SYMBOL_GPL(__wake_up_sync_key);
149
150/*
151 * __wake_up_sync - see __wake_up_sync_key()
152 */
153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
154{
155	__wake_up_sync_key(q, mode, nr_exclusive, NULL);
156}
157EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
158
 
 
 
 
 
 
 
159/*
160 * Note: we use "set_current_state()" _after_ the wait-queue add,
161 * because we need a memory barrier there on SMP, so that any
162 * wake-function that tests for the wait-queue being active
163 * will be guaranteed to see waitqueue addition _or_ subsequent
164 * tests in this thread will see the wakeup having taken place.
165 *
166 * The spin_unlock() itself is semi-permeable and only protects
167 * one way (it only protects stuff inside the critical region and
168 * stops them from bleeding out - it would still allow subsequent
169 * loads to move into the critical region).
170 */
171void
172prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
173{
174	unsigned long flags;
175
176	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
177	spin_lock_irqsave(&q->lock, flags);
178	if (list_empty(&wait->task_list))
179		__add_wait_queue(q, wait);
180	set_current_state(state);
181	spin_unlock_irqrestore(&q->lock, flags);
182}
183EXPORT_SYMBOL(prepare_to_wait);
184
185void
186prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
 
187{
188	unsigned long flags;
 
189
190	wait->flags |= WQ_FLAG_EXCLUSIVE;
191	spin_lock_irqsave(&q->lock, flags);
192	if (list_empty(&wait->task_list))
193		__add_wait_queue_tail(q, wait);
 
 
194	set_current_state(state);
195	spin_unlock_irqrestore(&q->lock, flags);
 
196}
197EXPORT_SYMBOL(prepare_to_wait_exclusive);
198
199long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
 
 
 
 
 
 
 
 
 
200{
201	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
203	if (signal_pending_state(state, current))
 
204		return -ERESTARTSYS;
205
206	wait->private = current;
207	wait->func = autoremove_wake_function;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
209	spin_lock_irqsave(&q->lock, flags);
210	if (list_empty(&wait->task_list)) {
211		if (wait->flags & WQ_FLAG_EXCLUSIVE)
212			__add_wait_queue_tail(q, wait);
213		else
214			__add_wait_queue(q, wait);
215	}
216	set_current_state(state);
217	spin_unlock_irqrestore(&q->lock, flags);
218
219	return 0;
220}
221EXPORT_SYMBOL(prepare_to_wait_event);
222
223/**
224 * finish_wait - clean up after waiting in a queue
225 * @q: waitqueue waited on
226 * @wait: wait descriptor
227 *
228 * Sets current thread back to running state and removes
229 * the wait descriptor from the given waitqueue if still
230 * queued.
231 */
232void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
233{
234	unsigned long flags;
235
236	__set_current_state(TASK_RUNNING);
237	/*
238	 * We can check for list emptiness outside the lock
239	 * IFF:
240	 *  - we use the "careful" check that verifies both
241	 *    the next and prev pointers, so that there cannot
242	 *    be any half-pending updates in progress on other
243	 *    CPU's that we haven't seen yet (and that might
244	 *    still change the stack area.
245	 * and
246	 *  - all other users take the lock (ie we can only
247	 *    have _one_ other CPU that looks at or modifies
248	 *    the list).
249	 */
250	if (!list_empty_careful(&wait->task_list)) {
251		spin_lock_irqsave(&q->lock, flags);
252		list_del_init(&wait->task_list);
253		spin_unlock_irqrestore(&q->lock, flags);
254	}
255}
256EXPORT_SYMBOL(finish_wait);
257
258/**
259 * abort_exclusive_wait - abort exclusive waiting in a queue
260 * @q: waitqueue waited on
261 * @wait: wait descriptor
262 * @mode: runstate of the waiter to be woken
263 * @key: key to identify a wait bit queue or %NULL
264 *
265 * Sets current thread back to running state and removes
266 * the wait descriptor from the given waitqueue if still
267 * queued.
268 *
269 * Wakes up the next waiter if the caller is concurrently
270 * woken up through the queue.
271 *
272 * This prevents waiter starvation where an exclusive waiter
273 * aborts and is woken up concurrently and no one wakes up
274 * the next waiter.
275 */
276void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
277			unsigned int mode, void *key)
278{
279	unsigned long flags;
280
281	__set_current_state(TASK_RUNNING);
282	spin_lock_irqsave(&q->lock, flags);
283	if (!list_empty(&wait->task_list))
284		list_del_init(&wait->task_list);
285	else if (waitqueue_active(q))
286		__wake_up_locked_key(q, mode, key);
287	spin_unlock_irqrestore(&q->lock, flags);
288}
289EXPORT_SYMBOL(abort_exclusive_wait);
290
291int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
292{
293	int ret = default_wake_function(wait, mode, sync, key);
294
295	if (ret)
296		list_del_init(&wait->task_list);
297	return ret;
298}
299EXPORT_SYMBOL(autoremove_wake_function);
300
301static inline bool is_kthread_should_stop(void)
302{
303	return (current->flags & PF_KTHREAD) && kthread_should_stop();
304}
305
306/*
307 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
308 *
309 * add_wait_queue(&wq, &wait);
310 * for (;;) {
311 *     if (condition)
312 *         break;
313 *
314 *     p->state = mode;				condition = true;
315 *     smp_mb(); // A				smp_wmb(); // C
316 *     if (!wait->flags & WQ_FLAG_WOKEN)	wait->flags |= WQ_FLAG_WOKEN;
317 *         schedule()				try_to_wake_up();
318 *     p->state = TASK_RUNNING;		    ~~~~~~~~~~~~~~~~~~
319 *     wait->flags &= ~WQ_FLAG_WOKEN;		condition = true;
320 *     smp_mb() // B				smp_wmb(); // C
321 *						wait->flags |= WQ_FLAG_WOKEN;
322 * }
323 * remove_wait_queue(&wq, &wait);
324 *
 
 
 
 
 
 
 
 
 
325 */
326long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
327{
328	set_current_state(mode); /* A */
329	/*
330	 * The above implies an smp_mb(), which matches with the smp_wmb() from
331	 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
332	 * also observe all state before the wakeup.
 
333	 */
334	if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
 
335		timeout = schedule_timeout(timeout);
336	__set_current_state(TASK_RUNNING);
337
338	/*
339	 * The below implies an smp_mb(), it too pairs with the smp_wmb() from
340	 * woken_wake_function() such that we must either observe the wait
341	 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
342	 * an event.
343	 */
344	smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
345
346	return timeout;
347}
348EXPORT_SYMBOL(wait_woken);
349
350int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
351{
352	/*
353	 * Although this function is called under waitqueue lock, LOCK
354	 * doesn't imply write barrier and the users expects write
355	 * barrier semantics on wakeup functions.  The following
356	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
357	 * and is paired with smp_store_mb() in wait_woken().
358	 */
359	smp_wmb(); /* C */
360	wait->flags |= WQ_FLAG_WOKEN;
361
362	return default_wake_function(wait, mode, sync, key);
363}
364EXPORT_SYMBOL(woken_wake_function);
365
366int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
367{
368	struct wait_bit_key *key = arg;
369	struct wait_bit_queue *wait_bit
370		= container_of(wait, struct wait_bit_queue, wait);
371
372	if (wait_bit->key.flags != key->flags ||
373			wait_bit->key.bit_nr != key->bit_nr ||
374			test_bit(key->bit_nr, key->flags))
375		return 0;
376	else
377		return autoremove_wake_function(wait, mode, sync, key);
378}
379EXPORT_SYMBOL(wake_bit_function);
380
381/*
382 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
383 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
384 * permitted return codes. Nonzero return codes halt waiting and return.
385 */
386int __sched
387__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
388	      wait_bit_action_f *action, unsigned mode)
389{
390	int ret = 0;
391
392	do {
393		prepare_to_wait(wq, &q->wait, mode);
394		if (test_bit(q->key.bit_nr, q->key.flags))
395			ret = (*action)(&q->key, mode);
396	} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
397	finish_wait(wq, &q->wait);
398	return ret;
399}
400EXPORT_SYMBOL(__wait_on_bit);
401
402int __sched out_of_line_wait_on_bit(void *word, int bit,
403				    wait_bit_action_f *action, unsigned mode)
404{
405	wait_queue_head_t *wq = bit_waitqueue(word, bit);
406	DEFINE_WAIT_BIT(wait, word, bit);
407
408	return __wait_on_bit(wq, &wait, action, mode);
409}
410EXPORT_SYMBOL(out_of_line_wait_on_bit);
411
412int __sched out_of_line_wait_on_bit_timeout(
413	void *word, int bit, wait_bit_action_f *action,
414	unsigned mode, unsigned long timeout)
415{
416	wait_queue_head_t *wq = bit_waitqueue(word, bit);
417	DEFINE_WAIT_BIT(wait, word, bit);
418
419	wait.key.timeout = jiffies + timeout;
420	return __wait_on_bit(wq, &wait, action, mode);
421}
422EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
423
424int __sched
425__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
426			wait_bit_action_f *action, unsigned mode)
427{
428	do {
429		int ret;
430
431		prepare_to_wait_exclusive(wq, &q->wait, mode);
432		if (!test_bit(q->key.bit_nr, q->key.flags))
433			continue;
434		ret = action(&q->key, mode);
435		if (!ret)
436			continue;
437		abort_exclusive_wait(wq, &q->wait, mode, &q->key);
438		return ret;
439	} while (test_and_set_bit(q->key.bit_nr, q->key.flags));
440	finish_wait(wq, &q->wait);
441	return 0;
442}
443EXPORT_SYMBOL(__wait_on_bit_lock);
444
445int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
446					 wait_bit_action_f *action, unsigned mode)
447{
448	wait_queue_head_t *wq = bit_waitqueue(word, bit);
449	DEFINE_WAIT_BIT(wait, word, bit);
450
451	return __wait_on_bit_lock(wq, &wait, action, mode);
452}
453EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
454
455void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
456{
457	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
458	if (waitqueue_active(wq))
459		__wake_up(wq, TASK_NORMAL, 1, &key);
460}
461EXPORT_SYMBOL(__wake_up_bit);
462
463/**
464 * wake_up_bit - wake up a waiter on a bit
465 * @word: the word being waited on, a kernel virtual address
466 * @bit: the bit of the word being waited on
467 *
468 * There is a standard hashed waitqueue table for generic use. This
469 * is the part of the hashtable's accessor API that wakes up waiters
470 * on a bit. For instance, if one were to have waiters on a bitflag,
471 * one would call wake_up_bit() after clearing the bit.
472 *
473 * In order for this to function properly, as it uses waitqueue_active()
474 * internally, some kind of memory barrier must be done prior to calling
475 * this. Typically, this will be smp_mb__after_atomic(), but in some
476 * cases where bitflags are manipulated non-atomically under a lock, one
477 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
478 * because spin_unlock() does not guarantee a memory barrier.
479 */
480void wake_up_bit(void *word, int bit)
481{
482	__wake_up_bit(bit_waitqueue(word, bit), word, bit);
483}
484EXPORT_SYMBOL(wake_up_bit);
485
486wait_queue_head_t *bit_waitqueue(void *word, int bit)
487{
488	const int shift = BITS_PER_LONG == 32 ? 5 : 6;
489	const struct zone *zone = page_zone(virt_to_page(word));
490	unsigned long val = (unsigned long)word << shift | bit;
491
492	return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
493}
494EXPORT_SYMBOL(bit_waitqueue);
495
496/*
497 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
498 * index (we're keying off bit -1, but that would produce a horrible hash
499 * value).
500 */
501static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
502{
503	if (BITS_PER_LONG == 64) {
504		unsigned long q = (unsigned long)p;
505		return bit_waitqueue((void *)(q & ~1), q & 1);
506	}
507	return bit_waitqueue(p, 0);
508}
509
510static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
511				  void *arg)
512{
513	struct wait_bit_key *key = arg;
514	struct wait_bit_queue *wait_bit
515		= container_of(wait, struct wait_bit_queue, wait);
516	atomic_t *val = key->flags;
517
518	if (wait_bit->key.flags != key->flags ||
519	    wait_bit->key.bit_nr != key->bit_nr ||
520	    atomic_read(val) != 0)
521		return 0;
522	return autoremove_wake_function(wait, mode, sync, key);
523}
524
525/*
526 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
527 * the actions of __wait_on_atomic_t() are permitted return codes.  Nonzero
528 * return codes halt waiting and return.
529 */
530static __sched
531int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
532		       int (*action)(atomic_t *), unsigned mode)
533{
534	atomic_t *val;
535	int ret = 0;
536
537	do {
538		prepare_to_wait(wq, &q->wait, mode);
539		val = q->key.flags;
540		if (atomic_read(val) == 0)
541			break;
542		ret = (*action)(val);
543	} while (!ret && atomic_read(val) != 0);
544	finish_wait(wq, &q->wait);
545	return ret;
546}
547
548#define DEFINE_WAIT_ATOMIC_T(name, p)					\
549	struct wait_bit_queue name = {					\
550		.key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p),		\
551		.wait	= {						\
552			.private	= current,			\
553			.func		= wake_atomic_t_function,	\
554			.task_list	=				\
555				LIST_HEAD_INIT((name).wait.task_list),	\
556		},							\
557	}
558
559__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
560					 unsigned mode)
561{
562	wait_queue_head_t *wq = atomic_t_waitqueue(p);
563	DEFINE_WAIT_ATOMIC_T(wait, p);
564
565	return __wait_on_atomic_t(wq, &wait, action, mode);
566}
567EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
568
569/**
570 * wake_up_atomic_t - Wake up a waiter on a atomic_t
571 * @p: The atomic_t being waited on, a kernel virtual address
572 *
573 * Wake up anyone waiting for the atomic_t to go to zero.
574 *
575 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
576 * check is done by the waiter's wake function, not the by the waker itself).
577 */
578void wake_up_atomic_t(atomic_t *p)
579{
580	__wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
581}
582EXPORT_SYMBOL(wake_up_atomic_t);
583
584__sched int bit_wait(struct wait_bit_key *word, int mode)
585{
586	schedule();
587	if (signal_pending_state(mode, current))
588		return -EINTR;
589	return 0;
590}
591EXPORT_SYMBOL(bit_wait);
592
593__sched int bit_wait_io(struct wait_bit_key *word, int mode)
594{
595	io_schedule();
596	if (signal_pending_state(mode, current))
597		return -EINTR;
598	return 0;
599}
600EXPORT_SYMBOL(bit_wait_io);
601
602__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
603{
604	unsigned long now = READ_ONCE(jiffies);
605	if (time_after_eq(now, word->timeout))
606		return -EAGAIN;
607	schedule_timeout(word->timeout - now);
608	if (signal_pending_state(mode, current))
609		return -EINTR;
610	return 0;
611}
612EXPORT_SYMBOL_GPL(bit_wait_timeout);
613
614__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
615{
616	unsigned long now = READ_ONCE(jiffies);
617	if (time_after_eq(now, word->timeout))
618		return -EAGAIN;
619	io_schedule_timeout(word->timeout - now);
620	if (signal_pending_state(mode, current))
621		return -EINTR;
622	return 0;
623}
624EXPORT_SYMBOL_GPL(bit_wait_io_timeout);