Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic waiting primitives.
4 *
5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 */
7#include "sched.h"
8
9void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
10{
11 spin_lock_init(&wq_head->lock);
12 lockdep_set_class_and_name(&wq_head->lock, key, name);
13 INIT_LIST_HEAD(&wq_head->head);
14}
15
16EXPORT_SYMBOL(__init_waitqueue_head);
17
18void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
19{
20 unsigned long flags;
21
22 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
23 spin_lock_irqsave(&wq_head->lock, flags);
24 __add_wait_queue(wq_head, wq_entry);
25 spin_unlock_irqrestore(&wq_head->lock, flags);
26}
27EXPORT_SYMBOL(add_wait_queue);
28
29void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
30{
31 unsigned long flags;
32
33 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
34 spin_lock_irqsave(&wq_head->lock, flags);
35 __add_wait_queue_entry_tail(wq_head, wq_entry);
36 spin_unlock_irqrestore(&wq_head->lock, flags);
37}
38EXPORT_SYMBOL(add_wait_queue_exclusive);
39
40void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
41{
42 unsigned long flags;
43
44 spin_lock_irqsave(&wq_head->lock, flags);
45 __remove_wait_queue(wq_head, wq_entry);
46 spin_unlock_irqrestore(&wq_head->lock, flags);
47}
48EXPORT_SYMBOL(remove_wait_queue);
49
50/*
51 * Scan threshold to break wait queue walk.
52 * This allows a waker to take a break from holding the
53 * wait queue lock during the wait queue walk.
54 */
55#define WAITQUEUE_WALK_BREAK_CNT 64
56
57/*
58 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 * number) then we wake all the non-exclusive tasks and one exclusive task.
61 *
62 * There are circumstances in which we can try to wake a task which has already
63 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 * zero in this (rare) case, and we handle it by continuing to scan the queue.
65 */
66static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
67 int nr_exclusive, int wake_flags, void *key,
68 wait_queue_entry_t *bookmark)
69{
70 wait_queue_entry_t *curr, *next;
71 int cnt = 0;
72
73 lockdep_assert_held(&wq_head->lock);
74
75 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
76 curr = list_next_entry(bookmark, entry);
77
78 list_del(&bookmark->entry);
79 bookmark->flags = 0;
80 } else
81 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
82
83 if (&curr->entry == &wq_head->head)
84 return nr_exclusive;
85
86 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
87 unsigned flags = curr->flags;
88 int ret;
89
90 if (flags & WQ_FLAG_BOOKMARK)
91 continue;
92
93 ret = curr->func(curr, mode, wake_flags, key);
94 if (ret < 0)
95 break;
96 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
97 break;
98
99 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
100 (&next->entry != &wq_head->head)) {
101 bookmark->flags = WQ_FLAG_BOOKMARK;
102 list_add_tail(&bookmark->entry, &next->entry);
103 break;
104 }
105 }
106
107 return nr_exclusive;
108}
109
110static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
111 int nr_exclusive, int wake_flags, void *key)
112{
113 unsigned long flags;
114 wait_queue_entry_t bookmark;
115
116 bookmark.flags = 0;
117 bookmark.private = NULL;
118 bookmark.func = NULL;
119 INIT_LIST_HEAD(&bookmark.entry);
120
121 do {
122 spin_lock_irqsave(&wq_head->lock, flags);
123 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
124 wake_flags, key, &bookmark);
125 spin_unlock_irqrestore(&wq_head->lock, flags);
126 } while (bookmark.flags & WQ_FLAG_BOOKMARK);
127}
128
129/**
130 * __wake_up - wake up threads blocked on a waitqueue.
131 * @wq_head: the waitqueue
132 * @mode: which threads
133 * @nr_exclusive: how many wake-one or wake-many threads to wake up
134 * @key: is directly passed to the wakeup function
135 *
136 * If this function wakes up a task, it executes a full memory barrier before
137 * accessing the task state.
138 */
139void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
140 int nr_exclusive, void *key)
141{
142 __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
143}
144EXPORT_SYMBOL(__wake_up);
145
146/*
147 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
148 */
149void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
150{
151 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
152}
153EXPORT_SYMBOL_GPL(__wake_up_locked);
154
155void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
156{
157 __wake_up_common(wq_head, mode, 1, 0, key, NULL);
158}
159EXPORT_SYMBOL_GPL(__wake_up_locked_key);
160
161void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
162 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
163{
164 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
165}
166EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
167
168/**
169 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170 * @wq_head: the waitqueue
171 * @mode: which threads
172 * @nr_exclusive: how many wake-one or wake-many threads to wake up
173 * @key: opaque value to be passed to wakeup targets
174 *
175 * The sync wakeup differs that the waker knows that it will schedule
176 * away soon, so while the target thread will be woken up, it will not
177 * be migrated to another CPU - ie. the two threads are 'synchronized'
178 * with each other. This can prevent needless bouncing between CPUs.
179 *
180 * On UP it can prevent extra preemption.
181 *
182 * If this function wakes up a task, it executes a full memory barrier before
183 * accessing the task state.
184 */
185void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
186 int nr_exclusive, void *key)
187{
188 int wake_flags = 1; /* XXX WF_SYNC */
189
190 if (unlikely(!wq_head))
191 return;
192
193 if (unlikely(nr_exclusive != 1))
194 wake_flags = 0;
195
196 __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
197}
198EXPORT_SYMBOL_GPL(__wake_up_sync_key);
199
200/*
201 * __wake_up_sync - see __wake_up_sync_key()
202 */
203void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
204{
205 __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
206}
207EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
208
209/*
210 * Note: we use "set_current_state()" _after_ the wait-queue add,
211 * because we need a memory barrier there on SMP, so that any
212 * wake-function that tests for the wait-queue being active
213 * will be guaranteed to see waitqueue addition _or_ subsequent
214 * tests in this thread will see the wakeup having taken place.
215 *
216 * The spin_unlock() itself is semi-permeable and only protects
217 * one way (it only protects stuff inside the critical region and
218 * stops them from bleeding out - it would still allow subsequent
219 * loads to move into the critical region).
220 */
221void
222prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
223{
224 unsigned long flags;
225
226 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
227 spin_lock_irqsave(&wq_head->lock, flags);
228 if (list_empty(&wq_entry->entry))
229 __add_wait_queue(wq_head, wq_entry);
230 set_current_state(state);
231 spin_unlock_irqrestore(&wq_head->lock, flags);
232}
233EXPORT_SYMBOL(prepare_to_wait);
234
235void
236prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
237{
238 unsigned long flags;
239
240 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
241 spin_lock_irqsave(&wq_head->lock, flags);
242 if (list_empty(&wq_entry->entry))
243 __add_wait_queue_entry_tail(wq_head, wq_entry);
244 set_current_state(state);
245 spin_unlock_irqrestore(&wq_head->lock, flags);
246}
247EXPORT_SYMBOL(prepare_to_wait_exclusive);
248
249void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
250{
251 wq_entry->flags = flags;
252 wq_entry->private = current;
253 wq_entry->func = autoremove_wake_function;
254 INIT_LIST_HEAD(&wq_entry->entry);
255}
256EXPORT_SYMBOL(init_wait_entry);
257
258long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
259{
260 unsigned long flags;
261 long ret = 0;
262
263 spin_lock_irqsave(&wq_head->lock, flags);
264 if (signal_pending_state(state, current)) {
265 /*
266 * Exclusive waiter must not fail if it was selected by wakeup,
267 * it should "consume" the condition we were waiting for.
268 *
269 * The caller will recheck the condition and return success if
270 * we were already woken up, we can not miss the event because
271 * wakeup locks/unlocks the same wq_head->lock.
272 *
273 * But we need to ensure that set-condition + wakeup after that
274 * can't see us, it should wake up another exclusive waiter if
275 * we fail.
276 */
277 list_del_init(&wq_entry->entry);
278 ret = -ERESTARTSYS;
279 } else {
280 if (list_empty(&wq_entry->entry)) {
281 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
282 __add_wait_queue_entry_tail(wq_head, wq_entry);
283 else
284 __add_wait_queue(wq_head, wq_entry);
285 }
286 set_current_state(state);
287 }
288 spin_unlock_irqrestore(&wq_head->lock, flags);
289
290 return ret;
291}
292EXPORT_SYMBOL(prepare_to_wait_event);
293
294/*
295 * Note! These two wait functions are entered with the
296 * wait-queue lock held (and interrupts off in the _irq
297 * case), so there is no race with testing the wakeup
298 * condition in the caller before they add the wait
299 * entry to the wake queue.
300 */
301int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
302{
303 if (likely(list_empty(&wait->entry)))
304 __add_wait_queue_entry_tail(wq, wait);
305
306 set_current_state(TASK_INTERRUPTIBLE);
307 if (signal_pending(current))
308 return -ERESTARTSYS;
309
310 spin_unlock(&wq->lock);
311 schedule();
312 spin_lock(&wq->lock);
313
314 return 0;
315}
316EXPORT_SYMBOL(do_wait_intr);
317
318int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
319{
320 if (likely(list_empty(&wait->entry)))
321 __add_wait_queue_entry_tail(wq, wait);
322
323 set_current_state(TASK_INTERRUPTIBLE);
324 if (signal_pending(current))
325 return -ERESTARTSYS;
326
327 spin_unlock_irq(&wq->lock);
328 schedule();
329 spin_lock_irq(&wq->lock);
330
331 return 0;
332}
333EXPORT_SYMBOL(do_wait_intr_irq);
334
335/**
336 * finish_wait - clean up after waiting in a queue
337 * @wq_head: waitqueue waited on
338 * @wq_entry: wait descriptor
339 *
340 * Sets current thread back to running state and removes
341 * the wait descriptor from the given waitqueue if still
342 * queued.
343 */
344void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
345{
346 unsigned long flags;
347
348 __set_current_state(TASK_RUNNING);
349 /*
350 * We can check for list emptiness outside the lock
351 * IFF:
352 * - we use the "careful" check that verifies both
353 * the next and prev pointers, so that there cannot
354 * be any half-pending updates in progress on other
355 * CPU's that we haven't seen yet (and that might
356 * still change the stack area.
357 * and
358 * - all other users take the lock (ie we can only
359 * have _one_ other CPU that looks at or modifies
360 * the list).
361 */
362 if (!list_empty_careful(&wq_entry->entry)) {
363 spin_lock_irqsave(&wq_head->lock, flags);
364 list_del_init(&wq_entry->entry);
365 spin_unlock_irqrestore(&wq_head->lock, flags);
366 }
367}
368EXPORT_SYMBOL(finish_wait);
369
370int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
371{
372 int ret = default_wake_function(wq_entry, mode, sync, key);
373
374 if (ret)
375 list_del_init(&wq_entry->entry);
376
377 return ret;
378}
379EXPORT_SYMBOL(autoremove_wake_function);
380
381static inline bool is_kthread_should_stop(void)
382{
383 return (current->flags & PF_KTHREAD) && kthread_should_stop();
384}
385
386/*
387 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
388 *
389 * add_wait_queue(&wq_head, &wait);
390 * for (;;) {
391 * if (condition)
392 * break;
393 *
394 * // in wait_woken() // in woken_wake_function()
395 *
396 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
397 * smp_mb(); // A try_to_wake_up():
398 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
399 * schedule() if (p->state & mode)
400 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
401 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
402 * smp_mb(); // B condition = true;
403 * } smp_mb(); // C
404 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
405 */
406long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
407{
408 /*
409 * The below executes an smp_mb(), which matches with the full barrier
410 * executed by the try_to_wake_up() in woken_wake_function() such that
411 * either we see the store to wq_entry->flags in woken_wake_function()
412 * or woken_wake_function() sees our store to current->state.
413 */
414 set_current_state(mode); /* A */
415 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
416 timeout = schedule_timeout(timeout);
417 __set_current_state(TASK_RUNNING);
418
419 /*
420 * The below executes an smp_mb(), which matches with the smp_mb() (C)
421 * in woken_wake_function() such that either we see the wait condition
422 * being true or the store to wq_entry->flags in woken_wake_function()
423 * follows ours in the coherence order.
424 */
425 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
426
427 return timeout;
428}
429EXPORT_SYMBOL(wait_woken);
430
431int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
432{
433 /* Pairs with the smp_store_mb() in wait_woken(). */
434 smp_mb(); /* C */
435 wq_entry->flags |= WQ_FLAG_WOKEN;
436
437 return default_wake_function(wq_entry, mode, sync, key);
438}
439EXPORT_SYMBOL(woken_wake_function);
1/*
2 * Generic waiting primitives.
3 *
4 * (C) 2004 Nadia Yvette Chambers, Oracle
5 */
6#include <linux/init.h>
7#include <linux/export.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/wait.h>
11#include <linux/hash.h>
12
13void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
14{
15 spin_lock_init(&q->lock);
16 lockdep_set_class_and_name(&q->lock, key, name);
17 INIT_LIST_HEAD(&q->task_list);
18}
19
20EXPORT_SYMBOL(__init_waitqueue_head);
21
22void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
23{
24 unsigned long flags;
25
26 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
27 spin_lock_irqsave(&q->lock, flags);
28 __add_wait_queue(q, wait);
29 spin_unlock_irqrestore(&q->lock, flags);
30}
31EXPORT_SYMBOL(add_wait_queue);
32
33void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
34{
35 unsigned long flags;
36
37 wait->flags |= WQ_FLAG_EXCLUSIVE;
38 spin_lock_irqsave(&q->lock, flags);
39 __add_wait_queue_tail(q, wait);
40 spin_unlock_irqrestore(&q->lock, flags);
41}
42EXPORT_SYMBOL(add_wait_queue_exclusive);
43
44void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
45{
46 unsigned long flags;
47
48 spin_lock_irqsave(&q->lock, flags);
49 __remove_wait_queue(q, wait);
50 spin_unlock_irqrestore(&q->lock, flags);
51}
52EXPORT_SYMBOL(remove_wait_queue);
53
54
55/*
56 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
57 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
58 * number) then we wake all the non-exclusive tasks and one exclusive task.
59 *
60 * There are circumstances in which we can try to wake a task which has already
61 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
62 * zero in this (rare) case, and we handle it by continuing to scan the queue.
63 */
64static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
65 int nr_exclusive, int wake_flags, void *key)
66{
67 wait_queue_t *curr, *next;
68
69 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
70 unsigned flags = curr->flags;
71
72 if (curr->func(curr, mode, wake_flags, key) &&
73 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
74 break;
75 }
76}
77
78/**
79 * __wake_up - wake up threads blocked on a waitqueue.
80 * @q: the waitqueue
81 * @mode: which threads
82 * @nr_exclusive: how many wake-one or wake-many threads to wake up
83 * @key: is directly passed to the wakeup function
84 *
85 * It may be assumed that this function implies a write memory barrier before
86 * changing the task state if and only if any tasks are woken up.
87 */
88void __wake_up(wait_queue_head_t *q, unsigned int mode,
89 int nr_exclusive, void *key)
90{
91 unsigned long flags;
92
93 spin_lock_irqsave(&q->lock, flags);
94 __wake_up_common(q, mode, nr_exclusive, 0, key);
95 spin_unlock_irqrestore(&q->lock, flags);
96}
97EXPORT_SYMBOL(__wake_up);
98
99/*
100 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
101 */
102void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
103{
104 __wake_up_common(q, mode, nr, 0, NULL);
105}
106EXPORT_SYMBOL_GPL(__wake_up_locked);
107
108void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
109{
110 __wake_up_common(q, mode, 1, 0, key);
111}
112EXPORT_SYMBOL_GPL(__wake_up_locked_key);
113
114/**
115 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
116 * @q: the waitqueue
117 * @mode: which threads
118 * @nr_exclusive: how many wake-one or wake-many threads to wake up
119 * @key: opaque value to be passed to wakeup targets
120 *
121 * The sync wakeup differs that the waker knows that it will schedule
122 * away soon, so while the target thread will be woken up, it will not
123 * be migrated to another CPU - ie. the two threads are 'synchronized'
124 * with each other. This can prevent needless bouncing between CPUs.
125 *
126 * On UP it can prevent extra preemption.
127 *
128 * It may be assumed that this function implies a write memory barrier before
129 * changing the task state if and only if any tasks are woken up.
130 */
131void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
132 int nr_exclusive, void *key)
133{
134 unsigned long flags;
135 int wake_flags = 1; /* XXX WF_SYNC */
136
137 if (unlikely(!q))
138 return;
139
140 if (unlikely(nr_exclusive != 1))
141 wake_flags = 0;
142
143 spin_lock_irqsave(&q->lock, flags);
144 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
145 spin_unlock_irqrestore(&q->lock, flags);
146}
147EXPORT_SYMBOL_GPL(__wake_up_sync_key);
148
149/*
150 * __wake_up_sync - see __wake_up_sync_key()
151 */
152void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
153{
154 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
155}
156EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
157
158/*
159 * Note: we use "set_current_state()" _after_ the wait-queue add,
160 * because we need a memory barrier there on SMP, so that any
161 * wake-function that tests for the wait-queue being active
162 * will be guaranteed to see waitqueue addition _or_ subsequent
163 * tests in this thread will see the wakeup having taken place.
164 *
165 * The spin_unlock() itself is semi-permeable and only protects
166 * one way (it only protects stuff inside the critical region and
167 * stops them from bleeding out - it would still allow subsequent
168 * loads to move into the critical region).
169 */
170void
171prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
172{
173 unsigned long flags;
174
175 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
176 spin_lock_irqsave(&q->lock, flags);
177 if (list_empty(&wait->task_list))
178 __add_wait_queue(q, wait);
179 set_current_state(state);
180 spin_unlock_irqrestore(&q->lock, flags);
181}
182EXPORT_SYMBOL(prepare_to_wait);
183
184void
185prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
186{
187 unsigned long flags;
188
189 wait->flags |= WQ_FLAG_EXCLUSIVE;
190 spin_lock_irqsave(&q->lock, flags);
191 if (list_empty(&wait->task_list))
192 __add_wait_queue_tail(q, wait);
193 set_current_state(state);
194 spin_unlock_irqrestore(&q->lock, flags);
195}
196EXPORT_SYMBOL(prepare_to_wait_exclusive);
197
198long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
199{
200 unsigned long flags;
201
202 if (signal_pending_state(state, current))
203 return -ERESTARTSYS;
204
205 wait->private = current;
206 wait->func = autoremove_wake_function;
207
208 spin_lock_irqsave(&q->lock, flags);
209 if (list_empty(&wait->task_list)) {
210 if (wait->flags & WQ_FLAG_EXCLUSIVE)
211 __add_wait_queue_tail(q, wait);
212 else
213 __add_wait_queue(q, wait);
214 }
215 set_current_state(state);
216 spin_unlock_irqrestore(&q->lock, flags);
217
218 return 0;
219}
220EXPORT_SYMBOL(prepare_to_wait_event);
221
222/**
223 * finish_wait - clean up after waiting in a queue
224 * @q: waitqueue waited on
225 * @wait: wait descriptor
226 *
227 * Sets current thread back to running state and removes
228 * the wait descriptor from the given waitqueue if still
229 * queued.
230 */
231void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
232{
233 unsigned long flags;
234
235 __set_current_state(TASK_RUNNING);
236 /*
237 * We can check for list emptiness outside the lock
238 * IFF:
239 * - we use the "careful" check that verifies both
240 * the next and prev pointers, so that there cannot
241 * be any half-pending updates in progress on other
242 * CPU's that we haven't seen yet (and that might
243 * still change the stack area.
244 * and
245 * - all other users take the lock (ie we can only
246 * have _one_ other CPU that looks at or modifies
247 * the list).
248 */
249 if (!list_empty_careful(&wait->task_list)) {
250 spin_lock_irqsave(&q->lock, flags);
251 list_del_init(&wait->task_list);
252 spin_unlock_irqrestore(&q->lock, flags);
253 }
254}
255EXPORT_SYMBOL(finish_wait);
256
257/**
258 * abort_exclusive_wait - abort exclusive waiting in a queue
259 * @q: waitqueue waited on
260 * @wait: wait descriptor
261 * @mode: runstate of the waiter to be woken
262 * @key: key to identify a wait bit queue or %NULL
263 *
264 * Sets current thread back to running state and removes
265 * the wait descriptor from the given waitqueue if still
266 * queued.
267 *
268 * Wakes up the next waiter if the caller is concurrently
269 * woken up through the queue.
270 *
271 * This prevents waiter starvation where an exclusive waiter
272 * aborts and is woken up concurrently and no one wakes up
273 * the next waiter.
274 */
275void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
276 unsigned int mode, void *key)
277{
278 unsigned long flags;
279
280 __set_current_state(TASK_RUNNING);
281 spin_lock_irqsave(&q->lock, flags);
282 if (!list_empty(&wait->task_list))
283 list_del_init(&wait->task_list);
284 else if (waitqueue_active(q))
285 __wake_up_locked_key(q, mode, key);
286 spin_unlock_irqrestore(&q->lock, flags);
287}
288EXPORT_SYMBOL(abort_exclusive_wait);
289
290int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
291{
292 int ret = default_wake_function(wait, mode, sync, key);
293
294 if (ret)
295 list_del_init(&wait->task_list);
296 return ret;
297}
298EXPORT_SYMBOL(autoremove_wake_function);
299
300int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
301{
302 struct wait_bit_key *key = arg;
303 struct wait_bit_queue *wait_bit
304 = container_of(wait, struct wait_bit_queue, wait);
305
306 if (wait_bit->key.flags != key->flags ||
307 wait_bit->key.bit_nr != key->bit_nr ||
308 test_bit(key->bit_nr, key->flags))
309 return 0;
310 else
311 return autoremove_wake_function(wait, mode, sync, key);
312}
313EXPORT_SYMBOL(wake_bit_function);
314
315/*
316 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
317 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
318 * permitted return codes. Nonzero return codes halt waiting and return.
319 */
320int __sched
321__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
322 int (*action)(void *), unsigned mode)
323{
324 int ret = 0;
325
326 do {
327 prepare_to_wait(wq, &q->wait, mode);
328 if (test_bit(q->key.bit_nr, q->key.flags))
329 ret = (*action)(q->key.flags);
330 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
331 finish_wait(wq, &q->wait);
332 return ret;
333}
334EXPORT_SYMBOL(__wait_on_bit);
335
336int __sched out_of_line_wait_on_bit(void *word, int bit,
337 int (*action)(void *), unsigned mode)
338{
339 wait_queue_head_t *wq = bit_waitqueue(word, bit);
340 DEFINE_WAIT_BIT(wait, word, bit);
341
342 return __wait_on_bit(wq, &wait, action, mode);
343}
344EXPORT_SYMBOL(out_of_line_wait_on_bit);
345
346int __sched
347__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
348 int (*action)(void *), unsigned mode)
349{
350 do {
351 int ret;
352
353 prepare_to_wait_exclusive(wq, &q->wait, mode);
354 if (!test_bit(q->key.bit_nr, q->key.flags))
355 continue;
356 ret = action(q->key.flags);
357 if (!ret)
358 continue;
359 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
360 return ret;
361 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
362 finish_wait(wq, &q->wait);
363 return 0;
364}
365EXPORT_SYMBOL(__wait_on_bit_lock);
366
367int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
368 int (*action)(void *), unsigned mode)
369{
370 wait_queue_head_t *wq = bit_waitqueue(word, bit);
371 DEFINE_WAIT_BIT(wait, word, bit);
372
373 return __wait_on_bit_lock(wq, &wait, action, mode);
374}
375EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
376
377void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
378{
379 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
380 if (waitqueue_active(wq))
381 __wake_up(wq, TASK_NORMAL, 1, &key);
382}
383EXPORT_SYMBOL(__wake_up_bit);
384
385/**
386 * wake_up_bit - wake up a waiter on a bit
387 * @word: the word being waited on, a kernel virtual address
388 * @bit: the bit of the word being waited on
389 *
390 * There is a standard hashed waitqueue table for generic use. This
391 * is the part of the hashtable's accessor API that wakes up waiters
392 * on a bit. For instance, if one were to have waiters on a bitflag,
393 * one would call wake_up_bit() after clearing the bit.
394 *
395 * In order for this to function properly, as it uses waitqueue_active()
396 * internally, some kind of memory barrier must be done prior to calling
397 * this. Typically, this will be smp_mb__after_clear_bit(), but in some
398 * cases where bitflags are manipulated non-atomically under a lock, one
399 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
400 * because spin_unlock() does not guarantee a memory barrier.
401 */
402void wake_up_bit(void *word, int bit)
403{
404 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
405}
406EXPORT_SYMBOL(wake_up_bit);
407
408wait_queue_head_t *bit_waitqueue(void *word, int bit)
409{
410 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
411 const struct zone *zone = page_zone(virt_to_page(word));
412 unsigned long val = (unsigned long)word << shift | bit;
413
414 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
415}
416EXPORT_SYMBOL(bit_waitqueue);
417
418/*
419 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
420 * index (we're keying off bit -1, but that would produce a horrible hash
421 * value).
422 */
423static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
424{
425 if (BITS_PER_LONG == 64) {
426 unsigned long q = (unsigned long)p;
427 return bit_waitqueue((void *)(q & ~1), q & 1);
428 }
429 return bit_waitqueue(p, 0);
430}
431
432static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
433 void *arg)
434{
435 struct wait_bit_key *key = arg;
436 struct wait_bit_queue *wait_bit
437 = container_of(wait, struct wait_bit_queue, wait);
438 atomic_t *val = key->flags;
439
440 if (wait_bit->key.flags != key->flags ||
441 wait_bit->key.bit_nr != key->bit_nr ||
442 atomic_read(val) != 0)
443 return 0;
444 return autoremove_wake_function(wait, mode, sync, key);
445}
446
447/*
448 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
449 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
450 * return codes halt waiting and return.
451 */
452static __sched
453int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
454 int (*action)(atomic_t *), unsigned mode)
455{
456 atomic_t *val;
457 int ret = 0;
458
459 do {
460 prepare_to_wait(wq, &q->wait, mode);
461 val = q->key.flags;
462 if (atomic_read(val) == 0)
463 break;
464 ret = (*action)(val);
465 } while (!ret && atomic_read(val) != 0);
466 finish_wait(wq, &q->wait);
467 return ret;
468}
469
470#define DEFINE_WAIT_ATOMIC_T(name, p) \
471 struct wait_bit_queue name = { \
472 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
473 .wait = { \
474 .private = current, \
475 .func = wake_atomic_t_function, \
476 .task_list = \
477 LIST_HEAD_INIT((name).wait.task_list), \
478 }, \
479 }
480
481__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
482 unsigned mode)
483{
484 wait_queue_head_t *wq = atomic_t_waitqueue(p);
485 DEFINE_WAIT_ATOMIC_T(wait, p);
486
487 return __wait_on_atomic_t(wq, &wait, action, mode);
488}
489EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
490
491/**
492 * wake_up_atomic_t - Wake up a waiter on a atomic_t
493 * @p: The atomic_t being waited on, a kernel virtual address
494 *
495 * Wake up anyone waiting for the atomic_t to go to zero.
496 *
497 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
498 * check is done by the waiter's wake function, not the by the waker itself).
499 */
500void wake_up_atomic_t(atomic_t *p)
501{
502 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
503}
504EXPORT_SYMBOL(wake_up_atomic_t);