Linux Audio

Check our new training course

Loading...
v5.9
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_WAIT_H
   3#define _LINUX_WAIT_H
   4/*
   5 * Linux wait queue related types and methods
   6 */
   7#include <linux/list.h>
   8#include <linux/stddef.h>
   9#include <linux/spinlock.h>
  10
  11#include <asm/current.h>
  12#include <uapi/linux/wait.h>
  13
  14typedef struct wait_queue_entry wait_queue_entry_t;
  15
  16typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  17int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  18
  19/* wait_queue_entry::flags */
  20#define WQ_FLAG_EXCLUSIVE	0x01
  21#define WQ_FLAG_WOKEN		0x02
  22#define WQ_FLAG_BOOKMARK	0x04
  23#define WQ_FLAG_CUSTOM		0x08
  24#define WQ_FLAG_DONE		0x10
  25
  26/*
  27 * A single wait-queue entry structure:
  28 */
  29struct wait_queue_entry {
  30	unsigned int		flags;
  31	void			*private;
  32	wait_queue_func_t	func;
  33	struct list_head	entry;
 
 
 
 
 
 
 
  34};
  35
  36struct wait_queue_head {
 
 
 
 
 
  37	spinlock_t		lock;
  38	struct list_head	head;
  39};
  40typedef struct wait_queue_head wait_queue_head_t;
  41
  42struct task_struct;
  43
  44/*
  45 * Macros for declaration and initialisaton of the datatypes
  46 */
  47
  48#define __WAITQUEUE_INITIALIZER(name, tsk) {					\
  49	.private	= tsk,							\
  50	.func		= default_wake_function,				\
  51	.entry		= { NULL, NULL } }
  52
  53#define DECLARE_WAITQUEUE(name, tsk)						\
  54	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
  55
  56#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
  57	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
  58	.head		= { &(name).head, &(name).head } }
  59
  60#define DECLARE_WAIT_QUEUE_HEAD(name) \
  61	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
 
 
 
  62
  63extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
 
  64
  65#define init_waitqueue_head(wq_head)						\
  66	do {									\
  67		static struct lock_class_key __key;				\
  68										\
  69		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
 
 
  70	} while (0)
  71
  72#ifdef CONFIG_LOCKDEP
  73# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  74	({ init_waitqueue_head(&name); name; })
  75# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  76	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  77#else
  78# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  79#endif
  80
  81static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
  82{
  83	wq_entry->flags		= 0;
  84	wq_entry->private	= p;
  85	wq_entry->func		= default_wake_function;
  86}
  87
  88static inline void
  89init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
  90{
  91	wq_entry->flags		= 0;
  92	wq_entry->private	= NULL;
  93	wq_entry->func		= func;
  94}
  95
  96/**
  97 * waitqueue_active -- locklessly test for waiters on the queue
  98 * @wq_head: the waitqueue to test for waiters
  99 *
 100 * returns true if the wait list is not empty
 101 *
 102 * NOTE: this function is lockless and requires care, incorrect usage _will_
 103 * lead to sporadic and non-obvious failure.
 104 *
 105 * Use either while holding wait_queue_head::lock or when used for wakeups
 106 * with an extra smp_mb() like::
 107 *
 108 *      CPU0 - waker                    CPU1 - waiter
 109 *
 110 *                                      for (;;) {
 111 *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
 112 *      smp_mb();                         // smp_mb() from set_current_state()
 113 *      if (waitqueue_active(wq_head))         if (@cond)
 114 *        wake_up(wq_head);                      break;
 115 *                                        schedule();
 116 *                                      }
 117 *                                      finish_wait(&wq_head, &wait);
 118 *
 119 * Because without the explicit smp_mb() it's possible for the
 120 * waitqueue_active() load to get hoisted over the @cond store such that we'll
 121 * observe an empty wait list while the waiter might not observe @cond.
 122 *
 123 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
 124 * which (when the lock is uncontended) are of roughly equal cost.
 125 */
 126static inline int waitqueue_active(struct wait_queue_head *wq_head)
 127{
 128	return !list_empty(&wq_head->head);
 129}
 130
 131/**
 132 * wq_has_single_sleeper - check if there is only one sleeper
 133 * @wq_head: wait queue head
 134 *
 135 * Returns true of wq_head has only one sleeper on the list.
 136 *
 137 * Please refer to the comment for waitqueue_active.
 138 */
 139static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
 140{
 141	return list_is_singular(&wq_head->head);
 142}
 143
 144/**
 145 * wq_has_sleeper - check if there are any waiting processes
 146 * @wq_head: wait queue head
 147 *
 148 * Returns true if wq_head has waiting processes
 149 *
 150 * Please refer to the comment for waitqueue_active.
 151 */
 152static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
 153{
 154	/*
 155	 * We need to be sure we are in sync with the
 156	 * add_wait_queue modifications to the wait queue.
 157	 *
 158	 * This memory barrier should be paired with one on the
 159	 * waiting side.
 160	 */
 161	smp_mb();
 162	return waitqueue_active(wq_head);
 163}
 164
 165extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 166extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 167extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 168
 169static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 170{
 171	list_add(&wq_entry->entry, &wq_head->head);
 172}
 173
 174/*
 175 * Used for wake-one threads:
 176 */
 177static inline void
 178__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 179{
 180	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 181	__add_wait_queue(wq_head, wq_entry);
 182}
 183
 184static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 
 185{
 186	list_add_tail(&wq_entry->entry, &wq_head->head);
 187}
 188
 189static inline void
 190__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 191{
 192	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 193	__add_wait_queue_entry_tail(wq_head, wq_entry);
 194}
 195
 196static inline void
 197__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 198{
 199	list_del(&wq_entry->entry);
 200}
 201
 202void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
 203void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 204void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
 205		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
 206void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 207void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 208void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
 209void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
 
 
 
 
 
 
 
 
 210
 211#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
 212#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
 213#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
 214#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
 215#define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
 216
 217#define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 218#define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 219#define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
 220#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE)
 221
 222/*
 223 * Wakeup macros to be used to report events to the targets.
 224 */
 225#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
 226#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
 227#define wake_up_poll(x, m)							\
 228	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
 229#define wake_up_locked_poll(x, m)						\
 230	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
 231#define wake_up_interruptible_poll(x, m)					\
 232	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
 233#define wake_up_interruptible_sync_poll(x, m)					\
 234	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
 235#define wake_up_interruptible_sync_poll_locked(x, m)				\
 236	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
 237
 238#define ___wait_cond_timeout(condition)						\
 239({										\
 240	bool __cond = (condition);						\
 241	if (__cond && !__ret)							\
 242		__ret = 1;							\
 243	__cond || !__ret;							\
 244})
 245
 246#define ___wait_is_interruptible(state)						\
 247	(!__builtin_constant_p(state) ||					\
 248		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
 249
 250extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
 251
 252/*
 253 * The below macro ___wait_event() has an explicit shadow of the __ret
 254 * variable when used from the wait_event_*() macros.
 255 *
 256 * This is so that both can use the ___wait_cond_timeout() construct
 257 * to wrap the condition.
 258 *
 259 * The type inconsistency of the wait_event_*() __ret variable is also
 260 * on purpose; we use long where we can return timeout values and int
 261 * otherwise.
 262 */
 263
 264#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
 265({										\
 266	__label__ __out;							\
 267	struct wait_queue_entry __wq_entry;					\
 268	long __ret = ret;	/* explicit shadow */				\
 269										\
 270	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
 271	for (;;) {								\
 272		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
 273										\
 274		if (condition)							\
 275			break;							\
 276										\
 277		if (___wait_is_interruptible(state) && __int) {			\
 278			__ret = __int;						\
 279			goto __out;						\
 280		}								\
 281										\
 282		cmd;								\
 283	}									\
 284	finish_wait(&wq_head, &__wq_entry);					\
 285__out:	__ret;									\
 
 
 
 
 
 
 
 
 
 
 286})
 287
 288#define __wait_event(wq_head, condition)					\
 289	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 290			    schedule())
 291
 292/**
 293 * wait_event - sleep until a condition gets true
 294 * @wq_head: the waitqueue to wait on
 295 * @condition: a C expression for the event to wait for
 296 *
 297 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 298 * @condition evaluates to true. The @condition is checked each time
 299 * the waitqueue @wq_head is woken up.
 300 *
 301 * wake_up() has to be called after changing any variable that could
 302 * change the result of the wait condition.
 303 */
 304#define wait_event(wq_head, condition)						\
 305do {										\
 306	might_sleep();								\
 307	if (condition)								\
 308		break;								\
 309	__wait_event(wq_head, condition);					\
 310} while (0)
 311
 312#define __io_wait_event(wq_head, condition)					\
 313	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 314			    io_schedule())
 315
 316/*
 317 * io_wait_event() -- like wait_event() but with io_schedule()
 318 */
 319#define io_wait_event(wq_head, condition)					\
 320do {										\
 321	might_sleep();								\
 322	if (condition)								\
 323		break;								\
 324	__io_wait_event(wq_head, condition);					\
 325} while (0)
 326
 327#define __wait_event_freezable(wq_head, condition)				\
 328	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 329			    freezable_schedule())
 330
 331/**
 332 * wait_event_freezable - sleep (or freeze) until a condition gets true
 333 * @wq_head: the waitqueue to wait on
 334 * @condition: a C expression for the event to wait for
 335 *
 336 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
 337 * to system load) until the @condition evaluates to true. The
 338 * @condition is checked each time the waitqueue @wq_head is woken up.
 339 *
 340 * wake_up() has to be called after changing any variable that could
 341 * change the result of the wait condition.
 342 */
 343#define wait_event_freezable(wq_head, condition)				\
 344({										\
 345	int __ret = 0;								\
 346	might_sleep();								\
 347	if (!(condition))							\
 348		__ret = __wait_event_freezable(wq_head, condition);		\
 349	__ret;									\
 350})
 351
 352#define __wait_event_timeout(wq_head, condition, timeout)			\
 353	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 354		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
 355		      __ret = schedule_timeout(__ret))
 356
 357/**
 358 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
 359 * @wq_head: the waitqueue to wait on
 360 * @condition: a C expression for the event to wait for
 361 * @timeout: timeout, in jiffies
 362 *
 363 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 364 * @condition evaluates to true. The @condition is checked each time
 365 * the waitqueue @wq_head is woken up.
 366 *
 367 * wake_up() has to be called after changing any variable that could
 368 * change the result of the wait condition.
 369 *
 370 * Returns:
 371 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 372 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 373 * or the remaining jiffies (at least 1) if the @condition evaluated
 374 * to %true before the @timeout elapsed.
 375 */
 376#define wait_event_timeout(wq_head, condition, timeout)				\
 377({										\
 378	long __ret = timeout;							\
 379	might_sleep();								\
 380	if (!___wait_cond_timeout(condition))					\
 381		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
 382	__ret;									\
 383})
 384
 385#define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
 386	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 387		      TASK_INTERRUPTIBLE, 0, timeout,				\
 388		      __ret = freezable_schedule_timeout(__ret))
 389
 390/*
 391 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
 392 * increasing load and is freezable.
 393 */
 394#define wait_event_freezable_timeout(wq_head, condition, timeout)		\
 395({										\
 396	long __ret = timeout;							\
 397	might_sleep();								\
 398	if (!___wait_cond_timeout(condition))					\
 399		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
 400	__ret;									\
 401})
 402
 403#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
 404	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
 405			    cmd1; schedule(); cmd2)
 406/*
 407 * Just like wait_event_cmd(), except it sets exclusive flag
 408 */
 409#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
 410do {										\
 411	if (condition)								\
 412		break;								\
 413	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
 414} while (0)
 415
 416#define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
 417	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 418			    cmd1; schedule(); cmd2)
 419
 420/**
 421 * wait_event_cmd - sleep until a condition gets true
 422 * @wq_head: the waitqueue to wait on
 423 * @condition: a C expression for the event to wait for
 424 * @cmd1: the command will be executed before sleep
 425 * @cmd2: the command will be executed after sleep
 426 *
 427 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 428 * @condition evaluates to true. The @condition is checked each time
 429 * the waitqueue @wq_head is woken up.
 430 *
 431 * wake_up() has to be called after changing any variable that could
 432 * change the result of the wait condition.
 433 */
 434#define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
 435do {										\
 436	if (condition)								\
 437		break;								\
 438	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
 439} while (0)
 440
 441#define __wait_event_interruptible(wq_head, condition)				\
 442	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 443		      schedule())
 444
 445/**
 446 * wait_event_interruptible - sleep until a condition gets true
 447 * @wq_head: the waitqueue to wait on
 448 * @condition: a C expression for the event to wait for
 449 *
 450 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 451 * @condition evaluates to true or a signal is received.
 452 * The @condition is checked each time the waitqueue @wq_head is woken up.
 453 *
 454 * wake_up() has to be called after changing any variable that could
 455 * change the result of the wait condition.
 456 *
 457 * The function will return -ERESTARTSYS if it was interrupted by a
 458 * signal and 0 if @condition evaluated to true.
 459 */
 460#define wait_event_interruptible(wq_head, condition)				\
 461({										\
 462	int __ret = 0;								\
 463	might_sleep();								\
 464	if (!(condition))							\
 465		__ret = __wait_event_interruptible(wq_head, condition);		\
 466	__ret;									\
 467})
 468
 469#define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
 470	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 471		      TASK_INTERRUPTIBLE, 0, timeout,				\
 472		      __ret = schedule_timeout(__ret))
 473
 474/**
 475 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
 476 * @wq_head: the waitqueue to wait on
 477 * @condition: a C expression for the event to wait for
 478 * @timeout: timeout, in jiffies
 479 *
 480 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 481 * @condition evaluates to true or a signal is received.
 482 * The @condition is checked each time the waitqueue @wq_head is woken up.
 483 *
 484 * wake_up() has to be called after changing any variable that could
 485 * change the result of the wait condition.
 486 *
 487 * Returns:
 488 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 489 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 490 * the remaining jiffies (at least 1) if the @condition evaluated
 491 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
 492 * interrupted by a signal.
 493 */
 494#define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
 495({										\
 496	long __ret = timeout;							\
 497	might_sleep();								\
 498	if (!___wait_cond_timeout(condition))					\
 499		__ret = __wait_event_interruptible_timeout(wq_head,		\
 500						condition, timeout);		\
 501	__ret;									\
 502})
 503
 504#define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
 505({										\
 506	int __ret = 0;								\
 507	struct hrtimer_sleeper __t;						\
 508										\
 509	hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,			\
 510				      HRTIMER_MODE_REL);			\
 511	if ((timeout) != KTIME_MAX)						\
 512		hrtimer_start_range_ns(&__t.timer, timeout,			\
 513				       current->timer_slack_ns,			\
 514				       HRTIMER_MODE_REL);			\
 515										\
 516	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
 517		if (!__t.task) {						\
 518			__ret = -ETIME;						\
 519			break;							\
 520		}								\
 521		schedule());							\
 522										\
 523	hrtimer_cancel(&__t.timer);						\
 524	destroy_hrtimer_on_stack(&__t.timer);					\
 525	__ret;									\
 
 526})
 527
 528/**
 529 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
 530 * @wq_head: the waitqueue to wait on
 531 * @condition: a C expression for the event to wait for
 532 * @timeout: timeout, as a ktime_t
 533 *
 534 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 535 * @condition evaluates to true or a signal is received.
 536 * The @condition is checked each time the waitqueue @wq_head is woken up.
 537 *
 538 * wake_up() has to be called after changing any variable that could
 539 * change the result of the wait condition.
 540 *
 541 * The function returns 0 if @condition became true, or -ETIME if the timeout
 542 * elapsed.
 543 */
 544#define wait_event_hrtimeout(wq_head, condition, timeout)			\
 545({										\
 546	int __ret = 0;								\
 547	might_sleep();								\
 548	if (!(condition))							\
 549		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
 550					       TASK_UNINTERRUPTIBLE);		\
 551	__ret;									\
 552})
 553
 554/**
 555 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
 556 * @wq: the waitqueue to wait on
 557 * @condition: a C expression for the event to wait for
 558 * @timeout: timeout, as a ktime_t
 559 *
 560 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 561 * @condition evaluates to true or a signal is received.
 562 * The @condition is checked each time the waitqueue @wq is woken up.
 563 *
 564 * wake_up() has to be called after changing any variable that could
 565 * change the result of the wait condition.
 566 *
 567 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
 568 * interrupted by a signal, or -ETIME if the timeout elapsed.
 569 */
 570#define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
 571({										\
 572	long __ret = 0;								\
 573	might_sleep();								\
 574	if (!(condition))							\
 575		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
 576					       TASK_INTERRUPTIBLE);		\
 577	__ret;									\
 578})
 579
 580#define __wait_event_interruptible_exclusive(wq, condition)			\
 581	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
 582		      schedule())
 583
 584#define wait_event_interruptible_exclusive(wq, condition)			\
 585({										\
 586	int __ret = 0;								\
 587	might_sleep();								\
 588	if (!(condition))							\
 589		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
 590	__ret;									\
 591})
 592
 593#define __wait_event_killable_exclusive(wq, condition)				\
 594	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
 595		      schedule())
 596
 597#define wait_event_killable_exclusive(wq, condition)				\
 598({										\
 599	int __ret = 0;								\
 600	might_sleep();								\
 601	if (!(condition))							\
 602		__ret = __wait_event_killable_exclusive(wq, condition);		\
 603	__ret;									\
 604})
 605
 606
 607#define __wait_event_freezable_exclusive(wq, condition)				\
 608	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
 609			freezable_schedule())
 610
 611#define wait_event_freezable_exclusive(wq, condition)				\
 612({										\
 613	int __ret = 0;								\
 614	might_sleep();								\
 615	if (!(condition))							\
 616		__ret = __wait_event_freezable_exclusive(wq, condition);	\
 617	__ret;									\
 618})
 619
 620/**
 621 * wait_event_idle - wait for a condition without contributing to system load
 622 * @wq_head: the waitqueue to wait on
 623 * @condition: a C expression for the event to wait for
 624 *
 625 * The process is put to sleep (TASK_IDLE) until the
 626 * @condition evaluates to true.
 627 * The @condition is checked each time the waitqueue @wq_head is woken up.
 628 *
 629 * wake_up() has to be called after changing any variable that could
 630 * change the result of the wait condition.
 631 *
 632 */
 633#define wait_event_idle(wq_head, condition)					\
 634do {										\
 635	might_sleep();								\
 636	if (!(condition))							\
 637		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
 638} while (0)
 639
 640/**
 641 * wait_event_idle_exclusive - wait for a condition with contributing to system load
 642 * @wq_head: the waitqueue to wait on
 643 * @condition: a C expression for the event to wait for
 644 *
 645 * The process is put to sleep (TASK_IDLE) until the
 646 * @condition evaluates to true.
 647 * The @condition is checked each time the waitqueue @wq_head is woken up.
 648 *
 649 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 650 * set thus if other processes wait on the same list, when this
 651 * process is woken further processes are not considered.
 652 *
 653 * wake_up() has to be called after changing any variable that could
 654 * change the result of the wait condition.
 655 *
 656 */
 657#define wait_event_idle_exclusive(wq_head, condition)				\
 658do {										\
 659	might_sleep();								\
 660	if (!(condition))							\
 661		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
 662} while (0)
 663
 664#define __wait_event_idle_timeout(wq_head, condition, timeout)			\
 665	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 666		      TASK_IDLE, 0, timeout,					\
 667		      __ret = schedule_timeout(__ret))
 668
 669/**
 670 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
 671 * @wq_head: the waitqueue to wait on
 672 * @condition: a C expression for the event to wait for
 673 * @timeout: timeout, in jiffies
 674 *
 675 * The process is put to sleep (TASK_IDLE) until the
 676 * @condition evaluates to true. The @condition is checked each time
 677 * the waitqueue @wq_head is woken up.
 678 *
 679 * wake_up() has to be called after changing any variable that could
 680 * change the result of the wait condition.
 681 *
 682 * Returns:
 683 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 684 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 685 * or the remaining jiffies (at least 1) if the @condition evaluated
 686 * to %true before the @timeout elapsed.
 687 */
 688#define wait_event_idle_timeout(wq_head, condition, timeout)			\
 689({										\
 690	long __ret = timeout;							\
 691	might_sleep();								\
 692	if (!___wait_cond_timeout(condition))					\
 693		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
 694	__ret;									\
 695})
 696
 697#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
 698	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 699		      TASK_IDLE, 1, timeout,					\
 700		      __ret = schedule_timeout(__ret))
 701
 702/**
 703 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
 704 * @wq_head: the waitqueue to wait on
 705 * @condition: a C expression for the event to wait for
 706 * @timeout: timeout, in jiffies
 707 *
 708 * The process is put to sleep (TASK_IDLE) until the
 709 * @condition evaluates to true. The @condition is checked each time
 710 * the waitqueue @wq_head is woken up.
 711 *
 712 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 713 * set thus if other processes wait on the same list, when this
 714 * process is woken further processes are not considered.
 715 *
 716 * wake_up() has to be called after changing any variable that could
 717 * change the result of the wait condition.
 718 *
 719 * Returns:
 720 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 721 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 722 * or the remaining jiffies (at least 1) if the @condition evaluated
 723 * to %true before the @timeout elapsed.
 724 */
 725#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
 726({										\
 727	long __ret = timeout;							\
 728	might_sleep();								\
 729	if (!___wait_cond_timeout(condition))					\
 730		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
 731	__ret;									\
 732})
 733
 734extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
 735extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
 736
 737#define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
 738({										\
 739	int __ret;								\
 740	DEFINE_WAIT(__wait);							\
 741	if (exclusive)								\
 742		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
 743	do {									\
 744		__ret = fn(&(wq), &__wait);					\
 745		if (__ret)							\
 746			break;							\
 747	} while (!(condition));							\
 748	__remove_wait_queue(&(wq), &__wait);					\
 749	__set_current_state(TASK_RUNNING);					\
 750	__ret;									\
 
 
 
 
 
 
 
 
 
 
 
 
 
 751})
 752
 753
 754/**
 755 * wait_event_interruptible_locked - sleep until a condition gets true
 756 * @wq: the waitqueue to wait on
 757 * @condition: a C expression for the event to wait for
 758 *
 759 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 760 * @condition evaluates to true or a signal is received.
 761 * The @condition is checked each time the waitqueue @wq is woken up.
 762 *
 763 * It must be called with wq.lock being held.  This spinlock is
 764 * unlocked while sleeping but @condition testing is done while lock
 765 * is held and when this macro exits the lock is held.
 766 *
 767 * The lock is locked/unlocked using spin_lock()/spin_unlock()
 768 * functions which must match the way they are locked/unlocked outside
 769 * of this macro.
 770 *
 771 * wake_up_locked() has to be called after changing any variable that could
 772 * change the result of the wait condition.
 773 *
 774 * The function will return -ERESTARTSYS if it was interrupted by a
 775 * signal and 0 if @condition evaluated to true.
 776 */
 777#define wait_event_interruptible_locked(wq, condition)				\
 778	((condition)								\
 779	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
 780
 781/**
 782 * wait_event_interruptible_locked_irq - sleep until a condition gets true
 783 * @wq: the waitqueue to wait on
 784 * @condition: a C expression for the event to wait for
 785 *
 786 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 787 * @condition evaluates to true or a signal is received.
 788 * The @condition is checked each time the waitqueue @wq is woken up.
 789 *
 790 * It must be called with wq.lock being held.  This spinlock is
 791 * unlocked while sleeping but @condition testing is done while lock
 792 * is held and when this macro exits the lock is held.
 793 *
 794 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
 795 * functions which must match the way they are locked/unlocked outside
 796 * of this macro.
 797 *
 798 * wake_up_locked() has to be called after changing any variable that could
 799 * change the result of the wait condition.
 800 *
 801 * The function will return -ERESTARTSYS if it was interrupted by a
 802 * signal and 0 if @condition evaluated to true.
 803 */
 804#define wait_event_interruptible_locked_irq(wq, condition)			\
 805	((condition)								\
 806	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
 807
 808/**
 809 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
 810 * @wq: the waitqueue to wait on
 811 * @condition: a C expression for the event to wait for
 812 *
 813 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 814 * @condition evaluates to true or a signal is received.
 815 * The @condition is checked each time the waitqueue @wq is woken up.
 816 *
 817 * It must be called with wq.lock being held.  This spinlock is
 818 * unlocked while sleeping but @condition testing is done while lock
 819 * is held and when this macro exits the lock is held.
 820 *
 821 * The lock is locked/unlocked using spin_lock()/spin_unlock()
 822 * functions which must match the way they are locked/unlocked outside
 823 * of this macro.
 824 *
 825 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 826 * set thus when other process waits process on the list if this
 827 * process is awaken further processes are not considered.
 828 *
 829 * wake_up_locked() has to be called after changing any variable that could
 830 * change the result of the wait condition.
 831 *
 832 * The function will return -ERESTARTSYS if it was interrupted by a
 833 * signal and 0 if @condition evaluated to true.
 834 */
 835#define wait_event_interruptible_exclusive_locked(wq, condition)		\
 836	((condition)								\
 837	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
 838
 839/**
 840 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
 841 * @wq: the waitqueue to wait on
 842 * @condition: a C expression for the event to wait for
 843 *
 844 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 845 * @condition evaluates to true or a signal is received.
 846 * The @condition is checked each time the waitqueue @wq is woken up.
 847 *
 848 * It must be called with wq.lock being held.  This spinlock is
 849 * unlocked while sleeping but @condition testing is done while lock
 850 * is held and when this macro exits the lock is held.
 851 *
 852 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
 853 * functions which must match the way they are locked/unlocked outside
 854 * of this macro.
 855 *
 856 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 857 * set thus when other process waits process on the list if this
 858 * process is awaken further processes are not considered.
 859 *
 860 * wake_up_locked() has to be called after changing any variable that could
 861 * change the result of the wait condition.
 862 *
 863 * The function will return -ERESTARTSYS if it was interrupted by a
 864 * signal and 0 if @condition evaluated to true.
 865 */
 866#define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
 867	((condition)								\
 868	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
 869
 870
 871#define __wait_event_killable(wq, condition)					\
 872	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
 873
 874/**
 875 * wait_event_killable - sleep until a condition gets true
 876 * @wq_head: the waitqueue to wait on
 877 * @condition: a C expression for the event to wait for
 878 *
 879 * The process is put to sleep (TASK_KILLABLE) until the
 880 * @condition evaluates to true or a signal is received.
 881 * The @condition is checked each time the waitqueue @wq_head is woken up.
 882 *
 883 * wake_up() has to be called after changing any variable that could
 884 * change the result of the wait condition.
 885 *
 886 * The function will return -ERESTARTSYS if it was interrupted by a
 887 * signal and 0 if @condition evaluated to true.
 888 */
 889#define wait_event_killable(wq_head, condition)					\
 890({										\
 891	int __ret = 0;								\
 892	might_sleep();								\
 893	if (!(condition))							\
 894		__ret = __wait_event_killable(wq_head, condition);		\
 895	__ret;									\
 896})
 897
 898#define __wait_event_killable_timeout(wq_head, condition, timeout)		\
 899	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 900		      TASK_KILLABLE, 0, timeout,				\
 901		      __ret = schedule_timeout(__ret))
 902
 903/**
 904 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
 905 * @wq_head: the waitqueue to wait on
 906 * @condition: a C expression for the event to wait for
 907 * @timeout: timeout, in jiffies
 908 *
 909 * The process is put to sleep (TASK_KILLABLE) until the
 910 * @condition evaluates to true or a kill signal is received.
 911 * The @condition is checked each time the waitqueue @wq_head is woken up.
 912 *
 913 * wake_up() has to be called after changing any variable that could
 914 * change the result of the wait condition.
 915 *
 916 * Returns:
 917 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 918 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 919 * the remaining jiffies (at least 1) if the @condition evaluated
 920 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
 921 * interrupted by a kill signal.
 922 *
 923 * Only kill signals interrupt this process.
 924 */
 925#define wait_event_killable_timeout(wq_head, condition, timeout)		\
 926({										\
 927	long __ret = timeout;							\
 928	might_sleep();								\
 929	if (!___wait_cond_timeout(condition))					\
 930		__ret = __wait_event_killable_timeout(wq_head,			\
 931						condition, timeout);		\
 932	__ret;									\
 933})
 934
 935
 936#define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
 937	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 938			    spin_unlock_irq(&lock);				\
 939			    cmd;						\
 940			    schedule();						\
 941			    spin_lock_irq(&lock))
 942
 943/**
 944 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
 945 *			     condition is checked under the lock. This
 946 *			     is expected to be called with the lock
 947 *			     taken.
 948 * @wq_head: the waitqueue to wait on
 949 * @condition: a C expression for the event to wait for
 950 * @lock: a locked spinlock_t, which will be released before cmd
 951 *	  and schedule() and reacquired afterwards.
 952 * @cmd: a command which is invoked outside the critical section before
 953 *	 sleep
 954 *
 955 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 956 * @condition evaluates to true. The @condition is checked each time
 957 * the waitqueue @wq_head is woken up.
 958 *
 959 * wake_up() has to be called after changing any variable that could
 960 * change the result of the wait condition.
 961 *
 962 * This is supposed to be called while holding the lock. The lock is
 963 * dropped before invoking the cmd and going to sleep and is reacquired
 964 * afterwards.
 965 */
 966#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
 967do {										\
 968	if (condition)								\
 969		break;								\
 970	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
 971} while (0)
 972
 973/**
 974 * wait_event_lock_irq - sleep until a condition gets true. The
 975 *			 condition is checked under the lock. This
 976 *			 is expected to be called with the lock
 977 *			 taken.
 978 * @wq_head: the waitqueue to wait on
 979 * @condition: a C expression for the event to wait for
 980 * @lock: a locked spinlock_t, which will be released before schedule()
 981 *	  and reacquired afterwards.
 982 *
 983 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 984 * @condition evaluates to true. The @condition is checked each time
 985 * the waitqueue @wq_head is woken up.
 986 *
 987 * wake_up() has to be called after changing any variable that could
 988 * change the result of the wait condition.
 989 *
 990 * This is supposed to be called while holding the lock. The lock is
 991 * dropped before going to sleep and is reacquired afterwards.
 992 */
 993#define wait_event_lock_irq(wq_head, condition, lock)				\
 994do {										\
 995	if (condition)								\
 996		break;								\
 997	__wait_event_lock_irq(wq_head, condition, lock, );			\
 998} while (0)
 999
1000
1001#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
1002	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
1003		      spin_unlock_irq(&lock);					\
1004		      cmd;							\
1005		      schedule();						\
1006		      spin_lock_irq(&lock))
1007
1008/**
1009 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1010 *		The condition is checked under the lock. This is expected to
1011 *		be called with the lock taken.
1012 * @wq_head: the waitqueue to wait on
1013 * @condition: a C expression for the event to wait for
1014 * @lock: a locked spinlock_t, which will be released before cmd and
1015 *	  schedule() and reacquired afterwards.
1016 * @cmd: a command which is invoked outside the critical section before
1017 *	 sleep
1018 *
1019 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1020 * @condition evaluates to true or a signal is received. The @condition is
1021 * checked each time the waitqueue @wq_head is woken up.
1022 *
1023 * wake_up() has to be called after changing any variable that could
1024 * change the result of the wait condition.
1025 *
1026 * This is supposed to be called while holding the lock. The lock is
1027 * dropped before invoking the cmd and going to sleep and is reacquired
1028 * afterwards.
1029 *
1030 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1031 * and 0 if @condition evaluated to true.
1032 */
1033#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1034({										\
1035	int __ret = 0;								\
1036	if (!(condition))							\
1037		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1038						condition, lock, cmd);		\
1039	__ret;									\
1040})
1041
1042/**
1043 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1044 *		The condition is checked under the lock. This is expected
1045 *		to be called with the lock taken.
1046 * @wq_head: the waitqueue to wait on
1047 * @condition: a C expression for the event to wait for
1048 * @lock: a locked spinlock_t, which will be released before schedule()
1049 *	  and reacquired afterwards.
1050 *
1051 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1052 * @condition evaluates to true or signal is received. The @condition is
1053 * checked each time the waitqueue @wq_head is woken up.
1054 *
1055 * wake_up() has to be called after changing any variable that could
1056 * change the result of the wait condition.
1057 *
1058 * This is supposed to be called while holding the lock. The lock is
1059 * dropped before going to sleep and is reacquired afterwards.
1060 *
1061 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1062 * and 0 if @condition evaluated to true.
1063 */
1064#define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1065({										\
1066	int __ret = 0;								\
1067	if (!(condition))							\
1068		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1069						condition, lock,);		\
1070	__ret;									\
1071})
1072
1073#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1074	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1075		      state, 0, timeout,					\
1076		      spin_unlock_irq(&lock);					\
1077		      __ret = schedule_timeout(__ret);				\
 
1078		      spin_lock_irq(&lock));
1079
1080/**
1081 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1082 *		true or a timeout elapses. The condition is checked under
1083 *		the lock. This is expected to be called with the lock taken.
1084 * @wq_head: the waitqueue to wait on
1085 * @condition: a C expression for the event to wait for
1086 * @lock: a locked spinlock_t, which will be released before schedule()
1087 *	  and reacquired afterwards.
1088 * @timeout: timeout, in jiffies
1089 *
1090 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1091 * @condition evaluates to true or signal is received. The @condition is
1092 * checked each time the waitqueue @wq_head is woken up.
1093 *
1094 * wake_up() has to be called after changing any variable that could
1095 * change the result of the wait condition.
1096 *
1097 * This is supposed to be called while holding the lock. The lock is
1098 * dropped before going to sleep and is reacquired afterwards.
1099 *
1100 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1101 * was interrupted by a signal, and the remaining jiffies otherwise
1102 * if the condition evaluated to true before the timeout elapsed.
1103 */
1104#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1105						  timeout)			\
1106({										\
1107	long __ret = timeout;							\
1108	if (!___wait_cond_timeout(condition))					\
1109		__ret = __wait_event_lock_irq_timeout(				\
1110					wq_head, condition, lock, timeout,	\
1111					TASK_INTERRUPTIBLE);			\
1112	__ret;									\
1113})
1114
1115#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1116({										\
1117	long __ret = timeout;							\
1118	if (!___wait_cond_timeout(condition))					\
1119		__ret = __wait_event_lock_irq_timeout(				\
1120					wq_head, condition, lock, timeout,	\
1121					TASK_UNINTERRUPTIBLE);			\
1122	__ret;									\
1123})
1124
1125/*
1126 * Waitqueues which are removed from the waitqueue_head at wakeup time
1127 */
1128void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1129void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1130long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1131void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1132long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1133int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1134int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1135
1136#define DEFINE_WAIT_FUNC(name, function)					\
1137	struct wait_queue_entry name = {					\
1138		.private	= current,					\
1139		.func		= function,					\
1140		.entry		= LIST_HEAD_INIT((name).entry),			\
 
 
1141	}
1142
1143#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1144
1145#define init_wait(wait)								\
1146	do {									\
1147		(wait)->private = current;					\
1148		(wait)->func = autoremove_wake_function;			\
1149		INIT_LIST_HEAD(&(wait)->entry);					\
1150		(wait)->flags = 0;						\
 
 
 
 
 
 
 
 
 
 
 
1151	} while (0)
1152
1153bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154
1155#endif /* _LINUX_WAIT_H */
v4.6
 
   1#ifndef _LINUX_WAIT_H
   2#define _LINUX_WAIT_H
   3/*
   4 * Linux wait queue related types and methods
   5 */
   6#include <linux/list.h>
   7#include <linux/stddef.h>
   8#include <linux/spinlock.h>
 
   9#include <asm/current.h>
  10#include <uapi/linux/wait.h>
  11
  12typedef struct __wait_queue wait_queue_t;
  13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
 
  15
  16/* __wait_queue::flags */
  17#define WQ_FLAG_EXCLUSIVE	0x01
  18#define WQ_FLAG_WOKEN		0x02
 
 
 
  19
  20struct __wait_queue {
 
 
 
  21	unsigned int		flags;
  22	void			*private;
  23	wait_queue_func_t	func;
  24	struct list_head	task_list;
  25};
  26
  27struct wait_bit_key {
  28	void			*flags;
  29	int			bit_nr;
  30#define WAIT_ATOMIC_T_BIT_NR	-1
  31	unsigned long		timeout;
  32};
  33
  34struct wait_bit_queue {
  35	struct wait_bit_key	key;
  36	wait_queue_t		wait;
  37};
  38
  39struct __wait_queue_head {
  40	spinlock_t		lock;
  41	struct list_head	task_list;
  42};
  43typedef struct __wait_queue_head wait_queue_head_t;
  44
  45struct task_struct;
  46
  47/*
  48 * Macros for declaration and initialisaton of the datatypes
  49 */
  50
  51#define __WAITQUEUE_INITIALIZER(name, tsk) {				\
  52	.private	= tsk,						\
  53	.func		= default_wake_function,			\
  54	.task_list	= { NULL, NULL } }
  55
  56#define DECLARE_WAITQUEUE(name, tsk)					\
  57	wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  58
  59#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {				\
  60	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),		\
  61	.task_list	= { &(name).task_list, &(name).task_list } }
  62
  63#define DECLARE_WAIT_QUEUE_HEAD(name) \
  64	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  65
  66#define __WAIT_BIT_KEY_INITIALIZER(word, bit)				\
  67	{ .flags = word, .bit_nr = bit, }
  68
  69#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)				\
  70	{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  71
  72extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  73
  74#define init_waitqueue_head(q)				\
  75	do {						\
  76		static struct lock_class_key __key;	\
  77							\
  78		__init_waitqueue_head((q), #q, &__key);	\
  79	} while (0)
  80
  81#ifdef CONFIG_LOCKDEP
  82# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  83	({ init_waitqueue_head(&name); name; })
  84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  85	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  86#else
  87# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  88#endif
  89
  90static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  91{
  92	q->flags	= 0;
  93	q->private	= p;
  94	q->func		= default_wake_function;
  95}
  96
  97static inline void
  98init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  99{
 100	q->flags	= 0;
 101	q->private	= NULL;
 102	q->func		= func;
 103}
 104
 105/**
 106 * waitqueue_active -- locklessly test for waiters on the queue
 107 * @q: the waitqueue to test for waiters
 108 *
 109 * returns true if the wait list is not empty
 110 *
 111 * NOTE: this function is lockless and requires care, incorrect usage _will_
 112 * lead to sporadic and non-obvious failure.
 113 *
 114 * Use either while holding wait_queue_head_t::lock or when used for wakeups
 115 * with an extra smp_mb() like:
 116 *
 117 *      CPU0 - waker                    CPU1 - waiter
 118 *
 119 *                                      for (;;) {
 120 *      @cond = true;                     prepare_to_wait(&wq, &wait, state);
 121 *      smp_mb();                         // smp_mb() from set_current_state()
 122 *      if (waitqueue_active(wq))         if (@cond)
 123 *        wake_up(wq);                      break;
 124 *                                        schedule();
 125 *                                      }
 126 *                                      finish_wait(&wq, &wait);
 127 *
 128 * Because without the explicit smp_mb() it's possible for the
 129 * waitqueue_active() load to get hoisted over the @cond store such that we'll
 130 * observe an empty wait list while the waiter might not observe @cond.
 131 *
 132 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
 133 * which (when the lock is uncontended) are of roughly equal cost.
 134 */
 135static inline int waitqueue_active(wait_queue_head_t *q)
 136{
 137	return !list_empty(&q->task_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 138}
 139
 140/**
 141 * wq_has_sleeper - check if there are any waiting processes
 142 * @wq: wait queue head
 143 *
 144 * Returns true if wq has waiting processes
 145 *
 146 * Please refer to the comment for waitqueue_active.
 147 */
 148static inline bool wq_has_sleeper(wait_queue_head_t *wq)
 149{
 150	/*
 151	 * We need to be sure we are in sync with the
 152	 * add_wait_queue modifications to the wait queue.
 153	 *
 154	 * This memory barrier should be paired with one on the
 155	 * waiting side.
 156	 */
 157	smp_mb();
 158	return waitqueue_active(wq);
 159}
 160
 161extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
 162extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
 163extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
 164
 165static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
 166{
 167	list_add(&new->task_list, &head->task_list);
 168}
 169
 170/*
 171 * Used for wake-one threads:
 172 */
 173static inline void
 174__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
 175{
 176	wait->flags |= WQ_FLAG_EXCLUSIVE;
 177	__add_wait_queue(q, wait);
 178}
 179
 180static inline void __add_wait_queue_tail(wait_queue_head_t *head,
 181					 wait_queue_t *new)
 182{
 183	list_add_tail(&new->task_list, &head->task_list);
 184}
 185
 186static inline void
 187__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
 188{
 189	wait->flags |= WQ_FLAG_EXCLUSIVE;
 190	__add_wait_queue_tail(q, wait);
 191}
 192
 193static inline void
 194__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
 195{
 196	list_del(&old->task_list);
 197}
 198
 199typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
 200void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
 201void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
 202void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
 203void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
 204void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
 205void __wake_up_bit(wait_queue_head_t *, void *, int);
 206int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
 207int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
 208void wake_up_bit(void *, int);
 209void wake_up_atomic_t(atomic_t *);
 210int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
 211int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
 212int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
 213int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
 214wait_queue_head_t *bit_waitqueue(void *, int);
 215
 216#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
 217#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
 218#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
 219#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
 220#define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
 221
 222#define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 223#define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 224#define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
 225#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
 226
 227/*
 228 * Wakeup macros to be used to report events to the targets.
 229 */
 230#define wake_up_poll(x, m)						\
 231	__wake_up(x, TASK_NORMAL, 1, (void *) (m))
 232#define wake_up_locked_poll(x, m)					\
 233	__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
 234#define wake_up_interruptible_poll(x, m)				\
 235	__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
 236#define wake_up_interruptible_sync_poll(x, m)				\
 237	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
 238
 239#define ___wait_cond_timeout(condition)					\
 240({									\
 241	bool __cond = (condition);					\
 242	if (__cond && !__ret)						\
 243		__ret = 1;						\
 244	__cond || !__ret;						\
 245})
 246
 247#define ___wait_is_interruptible(state)					\
 248	(!__builtin_constant_p(state) ||				\
 249		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)	\
 
 
 
 
 
 
 250
 251/*
 252 * The below macro ___wait_event() has an explicit shadow of the __ret
 253 * variable when used from the wait_event_*() macros.
 254 *
 255 * This is so that both can use the ___wait_cond_timeout() construct
 256 * to wrap the condition.
 257 *
 258 * The type inconsistency of the wait_event_*() __ret variable is also
 259 * on purpose; we use long where we can return timeout values and int
 260 * otherwise.
 261 */
 262
 263#define ___wait_event(wq, condition, state, exclusive, ret, cmd)	\
 264({									\
 265	__label__ __out;						\
 266	wait_queue_t __wait;						\
 267	long __ret = ret;	/* explicit shadow */			\
 268									\
 269	INIT_LIST_HEAD(&__wait.task_list);				\
 270	if (exclusive)							\
 271		__wait.flags = WQ_FLAG_EXCLUSIVE;			\
 272	else								\
 273		__wait.flags = 0;					\
 274									\
 275	for (;;) {							\
 276		long __int = prepare_to_wait_event(&wq, &__wait, state);\
 277									\
 278		if (condition)						\
 279			break;						\
 280									\
 281		if (___wait_is_interruptible(state) && __int) {		\
 282			__ret = __int;					\
 283			if (exclusive) {				\
 284				abort_exclusive_wait(&wq, &__wait,	\
 285						     state, NULL);	\
 286				goto __out;				\
 287			}						\
 288			break;						\
 289		}							\
 290									\
 291		cmd;							\
 292	}								\
 293	finish_wait(&wq, &__wait);					\
 294__out:	__ret;								\
 295})
 296
 297#define __wait_event(wq, condition)					\
 298	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 299			    schedule())
 300
 301/**
 302 * wait_event - sleep until a condition gets true
 303 * @wq: the waitqueue to wait on
 304 * @condition: a C expression for the event to wait for
 305 *
 306 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 307 * @condition evaluates to true. The @condition is checked each time
 308 * the waitqueue @wq is woken up.
 309 *
 310 * wake_up() has to be called after changing any variable that could
 311 * change the result of the wait condition.
 312 */
 313#define wait_event(wq, condition)					\
 314do {									\
 315	might_sleep();							\
 316	if (condition)							\
 317		break;							\
 318	__wait_event(wq, condition);					\
 319} while (0)
 320
 321#define __io_wait_event(wq, condition)					\
 322	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 323			    io_schedule())
 324
 325/*
 326 * io_wait_event() -- like wait_event() but with io_schedule()
 327 */
 328#define io_wait_event(wq, condition)					\
 329do {									\
 330	might_sleep();							\
 331	if (condition)							\
 332		break;							\
 333	__io_wait_event(wq, condition);					\
 334} while (0)
 335
 336#define __wait_event_freezable(wq, condition)				\
 337	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 338			    schedule(); try_to_freeze())
 339
 340/**
 341 * wait_event_freezable - sleep (or freeze) until a condition gets true
 342 * @wq: the waitqueue to wait on
 343 * @condition: a C expression for the event to wait for
 344 *
 345 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
 346 * to system load) until the @condition evaluates to true. The
 347 * @condition is checked each time the waitqueue @wq is woken up.
 348 *
 349 * wake_up() has to be called after changing any variable that could
 350 * change the result of the wait condition.
 351 */
 352#define wait_event_freezable(wq, condition)				\
 353({									\
 354	int __ret = 0;							\
 355	might_sleep();							\
 356	if (!(condition))						\
 357		__ret = __wait_event_freezable(wq, condition);		\
 358	__ret;								\
 359})
 360
 361#define __wait_event_timeout(wq, condition, timeout)			\
 362	___wait_event(wq, ___wait_cond_timeout(condition),		\
 363		      TASK_UNINTERRUPTIBLE, 0, timeout,			\
 364		      __ret = schedule_timeout(__ret))
 365
 366/**
 367 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
 368 * @wq: the waitqueue to wait on
 369 * @condition: a C expression for the event to wait for
 370 * @timeout: timeout, in jiffies
 371 *
 372 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 373 * @condition evaluates to true. The @condition is checked each time
 374 * the waitqueue @wq is woken up.
 375 *
 376 * wake_up() has to be called after changing any variable that could
 377 * change the result of the wait condition.
 378 *
 379 * Returns:
 380 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 381 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 382 * or the remaining jiffies (at least 1) if the @condition evaluated
 383 * to %true before the @timeout elapsed.
 384 */
 385#define wait_event_timeout(wq, condition, timeout)			\
 386({									\
 387	long __ret = timeout;						\
 388	might_sleep();							\
 389	if (!___wait_cond_timeout(condition))				\
 390		__ret = __wait_event_timeout(wq, condition, timeout);	\
 391	__ret;								\
 392})
 393
 394#define __wait_event_freezable_timeout(wq, condition, timeout)		\
 395	___wait_event(wq, ___wait_cond_timeout(condition),		\
 396		      TASK_INTERRUPTIBLE, 0, timeout,			\
 397		      __ret = schedule_timeout(__ret); try_to_freeze())
 398
 399/*
 400 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
 401 * increasing load and is freezable.
 402 */
 403#define wait_event_freezable_timeout(wq, condition, timeout)		\
 404({									\
 405	long __ret = timeout;						\
 406	might_sleep();							\
 407	if (!___wait_cond_timeout(condition))				\
 408		__ret = __wait_event_freezable_timeout(wq, condition, timeout);	\
 409	__ret;								\
 410})
 411
 412#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)		\
 413	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
 414			    cmd1; schedule(); cmd2)
 415/*
 416 * Just like wait_event_cmd(), except it sets exclusive flag
 417 */
 418#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)		\
 419do {									\
 420	if (condition)							\
 421		break;							\
 422	__wait_event_exclusive_cmd(wq, condition, cmd1, cmd2);		\
 423} while (0)
 424
 425#define __wait_event_cmd(wq, condition, cmd1, cmd2)			\
 426	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 427			    cmd1; schedule(); cmd2)
 428
 429/**
 430 * wait_event_cmd - sleep until a condition gets true
 431 * @wq: the waitqueue to wait on
 432 * @condition: a C expression for the event to wait for
 433 * @cmd1: the command will be executed before sleep
 434 * @cmd2: the command will be executed after sleep
 435 *
 436 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 437 * @condition evaluates to true. The @condition is checked each time
 438 * the waitqueue @wq is woken up.
 439 *
 440 * wake_up() has to be called after changing any variable that could
 441 * change the result of the wait condition.
 442 */
 443#define wait_event_cmd(wq, condition, cmd1, cmd2)			\
 444do {									\
 445	if (condition)							\
 446		break;							\
 447	__wait_event_cmd(wq, condition, cmd1, cmd2);			\
 448} while (0)
 449
 450#define __wait_event_interruptible(wq, condition)			\
 451	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 452		      schedule())
 453
 454/**
 455 * wait_event_interruptible - sleep until a condition gets true
 456 * @wq: the waitqueue to wait on
 457 * @condition: a C expression for the event to wait for
 458 *
 459 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 460 * @condition evaluates to true or a signal is received.
 461 * The @condition is checked each time the waitqueue @wq is woken up.
 462 *
 463 * wake_up() has to be called after changing any variable that could
 464 * change the result of the wait condition.
 465 *
 466 * The function will return -ERESTARTSYS if it was interrupted by a
 467 * signal and 0 if @condition evaluated to true.
 468 */
 469#define wait_event_interruptible(wq, condition)				\
 470({									\
 471	int __ret = 0;							\
 472	might_sleep();							\
 473	if (!(condition))						\
 474		__ret = __wait_event_interruptible(wq, condition);	\
 475	__ret;								\
 476})
 477
 478#define __wait_event_interruptible_timeout(wq, condition, timeout)	\
 479	___wait_event(wq, ___wait_cond_timeout(condition),		\
 480		      TASK_INTERRUPTIBLE, 0, timeout,			\
 481		      __ret = schedule_timeout(__ret))
 482
 483/**
 484 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
 485 * @wq: the waitqueue to wait on
 486 * @condition: a C expression for the event to wait for
 487 * @timeout: timeout, in jiffies
 488 *
 489 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 490 * @condition evaluates to true or a signal is received.
 491 * The @condition is checked each time the waitqueue @wq is woken up.
 492 *
 493 * wake_up() has to be called after changing any variable that could
 494 * change the result of the wait condition.
 495 *
 496 * Returns:
 497 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 498 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 499 * the remaining jiffies (at least 1) if the @condition evaluated
 500 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
 501 * interrupted by a signal.
 502 */
 503#define wait_event_interruptible_timeout(wq, condition, timeout)	\
 504({									\
 505	long __ret = timeout;						\
 506	might_sleep();							\
 507	if (!___wait_cond_timeout(condition))				\
 508		__ret = __wait_event_interruptible_timeout(wq,		\
 509						condition, timeout);	\
 510	__ret;								\
 511})
 512
 513#define __wait_event_hrtimeout(wq, condition, timeout, state)		\
 514({									\
 515	int __ret = 0;							\
 516	struct hrtimer_sleeper __t;					\
 517									\
 518	hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,		\
 519			      HRTIMER_MODE_REL);			\
 520	hrtimer_init_sleeper(&__t, current);				\
 521	if ((timeout).tv64 != KTIME_MAX)				\
 522		hrtimer_start_range_ns(&__t.timer, timeout,		\
 523				       current->timer_slack_ns,		\
 524				       HRTIMER_MODE_REL);		\
 525									\
 526	__ret = ___wait_event(wq, condition, state, 0, 0,		\
 527		if (!__t.task) {					\
 528			__ret = -ETIME;					\
 529			break;						\
 530		}							\
 531		schedule());						\
 532									\
 533	hrtimer_cancel(&__t.timer);					\
 534	destroy_hrtimer_on_stack(&__t.timer);				\
 535	__ret;								\
 536})
 537
 538/**
 539 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
 540 * @wq: the waitqueue to wait on
 541 * @condition: a C expression for the event to wait for
 542 * @timeout: timeout, as a ktime_t
 543 *
 544 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 545 * @condition evaluates to true or a signal is received.
 546 * The @condition is checked each time the waitqueue @wq is woken up.
 547 *
 548 * wake_up() has to be called after changing any variable that could
 549 * change the result of the wait condition.
 550 *
 551 * The function returns 0 if @condition became true, or -ETIME if the timeout
 552 * elapsed.
 553 */
 554#define wait_event_hrtimeout(wq, condition, timeout)			\
 555({									\
 556	int __ret = 0;							\
 557	might_sleep();							\
 558	if (!(condition))						\
 559		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
 560					       TASK_UNINTERRUPTIBLE);	\
 561	__ret;								\
 562})
 563
 564/**
 565 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
 566 * @wq: the waitqueue to wait on
 567 * @condition: a C expression for the event to wait for
 568 * @timeout: timeout, as a ktime_t
 569 *
 570 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 571 * @condition evaluates to true or a signal is received.
 572 * The @condition is checked each time the waitqueue @wq is woken up.
 573 *
 574 * wake_up() has to be called after changing any variable that could
 575 * change the result of the wait condition.
 576 *
 577 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
 578 * interrupted by a signal, or -ETIME if the timeout elapsed.
 579 */
 580#define wait_event_interruptible_hrtimeout(wq, condition, timeout)	\
 581({									\
 582	long __ret = 0;							\
 583	might_sleep();							\
 584	if (!(condition))						\
 585		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
 586					       TASK_INTERRUPTIBLE);	\
 587	__ret;								\
 
 
 
 
 
 
 
 
 
 
 
 
 
 588})
 589
 590#define __wait_event_interruptible_exclusive(wq, condition)		\
 591	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,		\
 592		      schedule())
 593
 594#define wait_event_interruptible_exclusive(wq, condition)		\
 595({									\
 596	int __ret = 0;							\
 597	might_sleep();							\
 598	if (!(condition))						\
 599		__ret = __wait_event_interruptible_exclusive(wq, condition);\
 600	__ret;								\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 601})
 602
 
 
 
 
 603
 604#define __wait_event_freezable_exclusive(wq, condition)			\
 605	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,		\
 606			schedule(); try_to_freeze())
 607
 608#define wait_event_freezable_exclusive(wq, condition)			\
 609({									\
 610	int __ret = 0;							\
 611	might_sleep();							\
 612	if (!(condition))						\
 613		__ret = __wait_event_freezable_exclusive(wq, condition);\
 614	__ret;								\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 615})
 616
 
 
 617
 618#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
 619({									\
 620	int __ret = 0;							\
 621	DEFINE_WAIT(__wait);						\
 622	if (exclusive)							\
 623		__wait.flags |= WQ_FLAG_EXCLUSIVE;			\
 624	do {								\
 625		if (likely(list_empty(&__wait.task_list)))		\
 626			__add_wait_queue_tail(&(wq), &__wait);		\
 627		set_current_state(TASK_INTERRUPTIBLE);			\
 628		if (signal_pending(current)) {				\
 629			__ret = -ERESTARTSYS;				\
 630			break;						\
 631		}							\
 632		if (irq)						\
 633			spin_unlock_irq(&(wq).lock);			\
 634		else							\
 635			spin_unlock(&(wq).lock);			\
 636		schedule();						\
 637		if (irq)						\
 638			spin_lock_irq(&(wq).lock);			\
 639		else							\
 640			spin_lock(&(wq).lock);				\
 641	} while (!(condition));						\
 642	__remove_wait_queue(&(wq), &__wait);				\
 643	__set_current_state(TASK_RUNNING);				\
 644	__ret;								\
 645})
 646
 647
 648/**
 649 * wait_event_interruptible_locked - sleep until a condition gets true
 650 * @wq: the waitqueue to wait on
 651 * @condition: a C expression for the event to wait for
 652 *
 653 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 654 * @condition evaluates to true or a signal is received.
 655 * The @condition is checked each time the waitqueue @wq is woken up.
 656 *
 657 * It must be called with wq.lock being held.  This spinlock is
 658 * unlocked while sleeping but @condition testing is done while lock
 659 * is held and when this macro exits the lock is held.
 660 *
 661 * The lock is locked/unlocked using spin_lock()/spin_unlock()
 662 * functions which must match the way they are locked/unlocked outside
 663 * of this macro.
 664 *
 665 * wake_up_locked() has to be called after changing any variable that could
 666 * change the result of the wait condition.
 667 *
 668 * The function will return -ERESTARTSYS if it was interrupted by a
 669 * signal and 0 if @condition evaluated to true.
 670 */
 671#define wait_event_interruptible_locked(wq, condition)			\
 672	((condition)							\
 673	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
 674
 675/**
 676 * wait_event_interruptible_locked_irq - sleep until a condition gets true
 677 * @wq: the waitqueue to wait on
 678 * @condition: a C expression for the event to wait for
 679 *
 680 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 681 * @condition evaluates to true or a signal is received.
 682 * The @condition is checked each time the waitqueue @wq is woken up.
 683 *
 684 * It must be called with wq.lock being held.  This spinlock is
 685 * unlocked while sleeping but @condition testing is done while lock
 686 * is held and when this macro exits the lock is held.
 687 *
 688 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
 689 * functions which must match the way they are locked/unlocked outside
 690 * of this macro.
 691 *
 692 * wake_up_locked() has to be called after changing any variable that could
 693 * change the result of the wait condition.
 694 *
 695 * The function will return -ERESTARTSYS if it was interrupted by a
 696 * signal and 0 if @condition evaluated to true.
 697 */
 698#define wait_event_interruptible_locked_irq(wq, condition)		\
 699	((condition)							\
 700	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
 701
 702/**
 703 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
 704 * @wq: the waitqueue to wait on
 705 * @condition: a C expression for the event to wait for
 706 *
 707 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 708 * @condition evaluates to true or a signal is received.
 709 * The @condition is checked each time the waitqueue @wq is woken up.
 710 *
 711 * It must be called with wq.lock being held.  This spinlock is
 712 * unlocked while sleeping but @condition testing is done while lock
 713 * is held and when this macro exits the lock is held.
 714 *
 715 * The lock is locked/unlocked using spin_lock()/spin_unlock()
 716 * functions which must match the way they are locked/unlocked outside
 717 * of this macro.
 718 *
 719 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 720 * set thus when other process waits process on the list if this
 721 * process is awaken further processes are not considered.
 722 *
 723 * wake_up_locked() has to be called after changing any variable that could
 724 * change the result of the wait condition.
 725 *
 726 * The function will return -ERESTARTSYS if it was interrupted by a
 727 * signal and 0 if @condition evaluated to true.
 728 */
 729#define wait_event_interruptible_exclusive_locked(wq, condition)	\
 730	((condition)							\
 731	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
 732
 733/**
 734 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
 735 * @wq: the waitqueue to wait on
 736 * @condition: a C expression for the event to wait for
 737 *
 738 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 739 * @condition evaluates to true or a signal is received.
 740 * The @condition is checked each time the waitqueue @wq is woken up.
 741 *
 742 * It must be called with wq.lock being held.  This spinlock is
 743 * unlocked while sleeping but @condition testing is done while lock
 744 * is held and when this macro exits the lock is held.
 745 *
 746 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
 747 * functions which must match the way they are locked/unlocked outside
 748 * of this macro.
 749 *
 750 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 751 * set thus when other process waits process on the list if this
 752 * process is awaken further processes are not considered.
 753 *
 754 * wake_up_locked() has to be called after changing any variable that could
 755 * change the result of the wait condition.
 756 *
 757 * The function will return -ERESTARTSYS if it was interrupted by a
 758 * signal and 0 if @condition evaluated to true.
 759 */
 760#define wait_event_interruptible_exclusive_locked_irq(wq, condition)	\
 761	((condition)							\
 762	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
 763
 764
 765#define __wait_event_killable(wq, condition)				\
 766	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
 767
 768/**
 769 * wait_event_killable - sleep until a condition gets true
 770 * @wq: the waitqueue to wait on
 771 * @condition: a C expression for the event to wait for
 772 *
 773 * The process is put to sleep (TASK_KILLABLE) until the
 774 * @condition evaluates to true or a signal is received.
 775 * The @condition is checked each time the waitqueue @wq is woken up.
 776 *
 777 * wake_up() has to be called after changing any variable that could
 778 * change the result of the wait condition.
 779 *
 780 * The function will return -ERESTARTSYS if it was interrupted by a
 781 * signal and 0 if @condition evaluated to true.
 782 */
 783#define wait_event_killable(wq, condition)				\
 784({									\
 785	int __ret = 0;							\
 786	might_sleep();							\
 787	if (!(condition))						\
 788		__ret = __wait_event_killable(wq, condition);		\
 789	__ret;								\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 790})
 791
 792
 793#define __wait_event_lock_irq(wq, condition, lock, cmd)			\
 794	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 795			    spin_unlock_irq(&lock);			\
 796			    cmd;					\
 797			    schedule();					\
 798			    spin_lock_irq(&lock))
 799
 800/**
 801 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
 802 *			     condition is checked under the lock. This
 803 *			     is expected to be called with the lock
 804 *			     taken.
 805 * @wq: the waitqueue to wait on
 806 * @condition: a C expression for the event to wait for
 807 * @lock: a locked spinlock_t, which will be released before cmd
 808 *	  and schedule() and reacquired afterwards.
 809 * @cmd: a command which is invoked outside the critical section before
 810 *	 sleep
 811 *
 812 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 813 * @condition evaluates to true. The @condition is checked each time
 814 * the waitqueue @wq is woken up.
 815 *
 816 * wake_up() has to be called after changing any variable that could
 817 * change the result of the wait condition.
 818 *
 819 * This is supposed to be called while holding the lock. The lock is
 820 * dropped before invoking the cmd and going to sleep and is reacquired
 821 * afterwards.
 822 */
 823#define wait_event_lock_irq_cmd(wq, condition, lock, cmd)		\
 824do {									\
 825	if (condition)							\
 826		break;							\
 827	__wait_event_lock_irq(wq, condition, lock, cmd);		\
 828} while (0)
 829
 830/**
 831 * wait_event_lock_irq - sleep until a condition gets true. The
 832 *			 condition is checked under the lock. This
 833 *			 is expected to be called with the lock
 834 *			 taken.
 835 * @wq: the waitqueue to wait on
 836 * @condition: a C expression for the event to wait for
 837 * @lock: a locked spinlock_t, which will be released before schedule()
 838 *	  and reacquired afterwards.
 839 *
 840 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 841 * @condition evaluates to true. The @condition is checked each time
 842 * the waitqueue @wq is woken up.
 843 *
 844 * wake_up() has to be called after changing any variable that could
 845 * change the result of the wait condition.
 846 *
 847 * This is supposed to be called while holding the lock. The lock is
 848 * dropped before going to sleep and is reacquired afterwards.
 849 */
 850#define wait_event_lock_irq(wq, condition, lock)			\
 851do {									\
 852	if (condition)							\
 853		break;							\
 854	__wait_event_lock_irq(wq, condition, lock, );			\
 855} while (0)
 856
 857
 858#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)	\
 859	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 860		      spin_unlock_irq(&lock);				\
 861		      cmd;						\
 862		      schedule();					\
 863		      spin_lock_irq(&lock))
 864
 865/**
 866 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
 867 *		The condition is checked under the lock. This is expected to
 868 *		be called with the lock taken.
 869 * @wq: the waitqueue to wait on
 870 * @condition: a C expression for the event to wait for
 871 * @lock: a locked spinlock_t, which will be released before cmd and
 872 *	  schedule() and reacquired afterwards.
 873 * @cmd: a command which is invoked outside the critical section before
 874 *	 sleep
 875 *
 876 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 877 * @condition evaluates to true or a signal is received. The @condition is
 878 * checked each time the waitqueue @wq is woken up.
 879 *
 880 * wake_up() has to be called after changing any variable that could
 881 * change the result of the wait condition.
 882 *
 883 * This is supposed to be called while holding the lock. The lock is
 884 * dropped before invoking the cmd and going to sleep and is reacquired
 885 * afterwards.
 886 *
 887 * The macro will return -ERESTARTSYS if it was interrupted by a signal
 888 * and 0 if @condition evaluated to true.
 889 */
 890#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd)	\
 891({									\
 892	int __ret = 0;							\
 893	if (!(condition))						\
 894		__ret = __wait_event_interruptible_lock_irq(wq,		\
 895						condition, lock, cmd);	\
 896	__ret;								\
 897})
 898
 899/**
 900 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
 901 *		The condition is checked under the lock. This is expected
 902 *		to be called with the lock taken.
 903 * @wq: the waitqueue to wait on
 904 * @condition: a C expression for the event to wait for
 905 * @lock: a locked spinlock_t, which will be released before schedule()
 906 *	  and reacquired afterwards.
 907 *
 908 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 909 * @condition evaluates to true or signal is received. The @condition is
 910 * checked each time the waitqueue @wq is woken up.
 911 *
 912 * wake_up() has to be called after changing any variable that could
 913 * change the result of the wait condition.
 914 *
 915 * This is supposed to be called while holding the lock. The lock is
 916 * dropped before going to sleep and is reacquired afterwards.
 917 *
 918 * The macro will return -ERESTARTSYS if it was interrupted by a signal
 919 * and 0 if @condition evaluated to true.
 920 */
 921#define wait_event_interruptible_lock_irq(wq, condition, lock)		\
 922({									\
 923	int __ret = 0;							\
 924	if (!(condition))						\
 925		__ret = __wait_event_interruptible_lock_irq(wq,		\
 926						condition, lock,);	\
 927	__ret;								\
 928})
 929
 930#define __wait_event_interruptible_lock_irq_timeout(wq, condition,	\
 931						    lock, timeout)	\
 932	___wait_event(wq, ___wait_cond_timeout(condition),		\
 933		      TASK_INTERRUPTIBLE, 0, timeout,			\
 934		      spin_unlock_irq(&lock);				\
 935		      __ret = schedule_timeout(__ret);			\
 936		      spin_lock_irq(&lock));
 937
 938/**
 939 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
 940 *		true or a timeout elapses. The condition is checked under
 941 *		the lock. This is expected to be called with the lock taken.
 942 * @wq: the waitqueue to wait on
 943 * @condition: a C expression for the event to wait for
 944 * @lock: a locked spinlock_t, which will be released before schedule()
 945 *	  and reacquired afterwards.
 946 * @timeout: timeout, in jiffies
 947 *
 948 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 949 * @condition evaluates to true or signal is received. The @condition is
 950 * checked each time the waitqueue @wq is woken up.
 951 *
 952 * wake_up() has to be called after changing any variable that could
 953 * change the result of the wait condition.
 954 *
 955 * This is supposed to be called while holding the lock. The lock is
 956 * dropped before going to sleep and is reacquired afterwards.
 957 *
 958 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
 959 * was interrupted by a signal, and the remaining jiffies otherwise
 960 * if the condition evaluated to true before the timeout elapsed.
 961 */
 962#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,	\
 963						  timeout)		\
 964({									\
 965	long __ret = timeout;						\
 966	if (!___wait_cond_timeout(condition))				\
 967		__ret = __wait_event_interruptible_lock_irq_timeout(	\
 968					wq, condition, lock, timeout);	\
 969	__ret;								\
 
 
 
 
 
 
 
 
 
 
 
 970})
 971
 972/*
 973 * Waitqueues which are removed from the waitqueue_head at wakeup time
 974 */
 975void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
 976void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
 977long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
 978void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
 979void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
 980long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
 981int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
 982int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
 983int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
 984
 985#define DEFINE_WAIT_FUNC(name, function)				\
 986	wait_queue_t name = {						\
 987		.private	= current,				\
 988		.func		= function,				\
 989		.task_list	= LIST_HEAD_INIT((name).task_list),	\
 990	}
 991
 992#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
 993
 994#define DEFINE_WAIT_BIT(name, word, bit)				\
 995	struct wait_bit_queue name = {					\
 996		.key = __WAIT_BIT_KEY_INITIALIZER(word, bit),		\
 997		.wait	= {						\
 998			.private	= current,			\
 999			.func		= wake_bit_function,		\
1000			.task_list	=				\
1001				LIST_HEAD_INIT((name).wait.task_list),	\
1002		},							\
1003	}
1004
1005#define init_wait(wait)							\
1006	do {								\
1007		(wait)->private = current;				\
1008		(wait)->func = autoremove_wake_function;		\
1009		INIT_LIST_HEAD(&(wait)->task_list);			\
1010		(wait)->flags = 0;					\
1011	} while (0)
1012
1013
1014extern int bit_wait(struct wait_bit_key *, int);
1015extern int bit_wait_io(struct wait_bit_key *, int);
1016extern int bit_wait_timeout(struct wait_bit_key *, int);
1017extern int bit_wait_io_timeout(struct wait_bit_key *, int);
1018
1019/**
1020 * wait_on_bit - wait for a bit to be cleared
1021 * @word: the word being waited on, a kernel virtual address
1022 * @bit: the bit of the word being waited on
1023 * @mode: the task state to sleep in
1024 *
1025 * There is a standard hashed waitqueue table for generic use. This
1026 * is the part of the hashtable's accessor API that waits on a bit.
1027 * For instance, if one were to have waiters on a bitflag, one would
1028 * call wait_on_bit() in threads waiting for the bit to clear.
1029 * One uses wait_on_bit() where one is waiting for the bit to clear,
1030 * but has no intention of setting it.
1031 * Returned value will be zero if the bit was cleared, or non-zero
1032 * if the process received a signal and the mode permitted wakeup
1033 * on that signal.
1034 */
1035static inline int
1036wait_on_bit(unsigned long *word, int bit, unsigned mode)
1037{
1038	might_sleep();
1039	if (!test_bit(bit, word))
1040		return 0;
1041	return out_of_line_wait_on_bit(word, bit,
1042				       bit_wait,
1043				       mode);
1044}
1045
1046/**
1047 * wait_on_bit_io - wait for a bit to be cleared
1048 * @word: the word being waited on, a kernel virtual address
1049 * @bit: the bit of the word being waited on
1050 * @mode: the task state to sleep in
1051 *
1052 * Use the standard hashed waitqueue table to wait for a bit
1053 * to be cleared.  This is similar to wait_on_bit(), but calls
1054 * io_schedule() instead of schedule() for the actual waiting.
1055 *
1056 * Returned value will be zero if the bit was cleared, or non-zero
1057 * if the process received a signal and the mode permitted wakeup
1058 * on that signal.
1059 */
1060static inline int
1061wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1062{
1063	might_sleep();
1064	if (!test_bit(bit, word))
1065		return 0;
1066	return out_of_line_wait_on_bit(word, bit,
1067				       bit_wait_io,
1068				       mode);
1069}
1070
1071/**
1072 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1073 * @word: the word being waited on, a kernel virtual address
1074 * @bit: the bit of the word being waited on
1075 * @mode: the task state to sleep in
1076 * @timeout: timeout, in jiffies
1077 *
1078 * Use the standard hashed waitqueue table to wait for a bit
1079 * to be cleared. This is similar to wait_on_bit(), except also takes a
1080 * timeout parameter.
1081 *
1082 * Returned value will be zero if the bit was cleared before the
1083 * @timeout elapsed, or non-zero if the @timeout elapsed or process
1084 * received a signal and the mode permitted wakeup on that signal.
1085 */
1086static inline int
1087wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1088		    unsigned long timeout)
1089{
1090	might_sleep();
1091	if (!test_bit(bit, word))
1092		return 0;
1093	return out_of_line_wait_on_bit_timeout(word, bit,
1094					       bit_wait_timeout,
1095					       mode, timeout);
1096}
1097
1098/**
1099 * wait_on_bit_action - wait for a bit to be cleared
1100 * @word: the word being waited on, a kernel virtual address
1101 * @bit: the bit of the word being waited on
1102 * @action: the function used to sleep, which may take special actions
1103 * @mode: the task state to sleep in
1104 *
1105 * Use the standard hashed waitqueue table to wait for a bit
1106 * to be cleared, and allow the waiting action to be specified.
1107 * This is like wait_on_bit() but allows fine control of how the waiting
1108 * is done.
1109 *
1110 * Returned value will be zero if the bit was cleared, or non-zero
1111 * if the process received a signal and the mode permitted wakeup
1112 * on that signal.
1113 */
1114static inline int
1115wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1116		   unsigned mode)
1117{
1118	might_sleep();
1119	if (!test_bit(bit, word))
1120		return 0;
1121	return out_of_line_wait_on_bit(word, bit, action, mode);
1122}
1123
1124/**
1125 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1126 * @word: the word being waited on, a kernel virtual address
1127 * @bit: the bit of the word being waited on
1128 * @mode: the task state to sleep in
1129 *
1130 * There is a standard hashed waitqueue table for generic use. This
1131 * is the part of the hashtable's accessor API that waits on a bit
1132 * when one intends to set it, for instance, trying to lock bitflags.
1133 * For instance, if one were to have waiters trying to set bitflag
1134 * and waiting for it to clear before setting it, one would call
1135 * wait_on_bit() in threads waiting to be able to set the bit.
1136 * One uses wait_on_bit_lock() where one is waiting for the bit to
1137 * clear with the intention of setting it, and when done, clearing it.
1138 *
1139 * Returns zero if the bit was (eventually) found to be clear and was
1140 * set.  Returns non-zero if a signal was delivered to the process and
1141 * the @mode allows that signal to wake the process.
1142 */
1143static inline int
1144wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1145{
1146	might_sleep();
1147	if (!test_and_set_bit(bit, word))
1148		return 0;
1149	return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1150}
1151
1152/**
1153 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1154 * @word: the word being waited on, a kernel virtual address
1155 * @bit: the bit of the word being waited on
1156 * @mode: the task state to sleep in
1157 *
1158 * Use the standard hashed waitqueue table to wait for a bit
1159 * to be cleared and then to atomically set it.  This is similar
1160 * to wait_on_bit(), but calls io_schedule() instead of schedule()
1161 * for the actual waiting.
1162 *
1163 * Returns zero if the bit was (eventually) found to be clear and was
1164 * set.  Returns non-zero if a signal was delivered to the process and
1165 * the @mode allows that signal to wake the process.
1166 */
1167static inline int
1168wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1169{
1170	might_sleep();
1171	if (!test_and_set_bit(bit, word))
1172		return 0;
1173	return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1174}
1175
1176/**
1177 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1178 * @word: the word being waited on, a kernel virtual address
1179 * @bit: the bit of the word being waited on
1180 * @action: the function used to sleep, which may take special actions
1181 * @mode: the task state to sleep in
1182 *
1183 * Use the standard hashed waitqueue table to wait for a bit
1184 * to be cleared and then to set it, and allow the waiting action
1185 * to be specified.
1186 * This is like wait_on_bit() but allows fine control of how the waiting
1187 * is done.
1188 *
1189 * Returns zero if the bit was (eventually) found to be clear and was
1190 * set.  Returns non-zero if a signal was delivered to the process and
1191 * the @mode allows that signal to wake the process.
1192 */
1193static inline int
1194wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1195			unsigned mode)
1196{
1197	might_sleep();
1198	if (!test_and_set_bit(bit, word))
1199		return 0;
1200	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1201}
1202
1203/**
1204 * wait_on_atomic_t - Wait for an atomic_t to become 0
1205 * @val: The atomic value being waited on, a kernel virtual address
1206 * @action: the function used to sleep, which may take special actions
1207 * @mode: the task state to sleep in
1208 *
1209 * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
1210 * the purpose of getting a waitqueue, but we set the key to a bit number
1211 * outside of the target 'word'.
1212 */
1213static inline
1214int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1215{
1216	might_sleep();
1217	if (atomic_read(val) == 0)
1218		return 0;
1219	return out_of_line_wait_on_atomic_t(val, action, mode);
1220}
1221
1222#endif /* _LINUX_WAIT_H */