Linux Audio

Check our new training course

Loading...
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_WAIT_H
   3#define _LINUX_WAIT_H
   4/*
   5 * Linux wait queue related types and methods
   6 */
   7#include <linux/list.h>
   8#include <linux/stddef.h>
   9#include <linux/spinlock.h>
  10
  11#include <asm/current.h>
  12#include <uapi/linux/wait.h>
  13
  14typedef struct wait_queue_entry wait_queue_entry_t;
  15
  16typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  17int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  18
  19/* wait_queue_entry::flags */
  20#define WQ_FLAG_EXCLUSIVE	0x01
  21#define WQ_FLAG_WOKEN		0x02
  22#define WQ_FLAG_BOOKMARK	0x04
  23#define WQ_FLAG_CUSTOM		0x08
  24#define WQ_FLAG_DONE		0x10
  25#define WQ_FLAG_PRIORITY	0x20
  26
  27/*
  28 * A single wait-queue entry structure:
  29 */
  30struct wait_queue_entry {
  31	unsigned int		flags;
  32	void			*private;
  33	wait_queue_func_t	func;
  34	struct list_head	entry;
  35};
  36
  37struct wait_queue_head {
  38	spinlock_t		lock;
  39	struct list_head	head;
  40};
  41typedef struct wait_queue_head wait_queue_head_t;
  42
  43struct task_struct;
  44
  45/*
  46 * Macros for declaration and initialisaton of the datatypes
  47 */
  48
  49#define __WAITQUEUE_INITIALIZER(name, tsk) {					\
  50	.private	= tsk,							\
  51	.func		= default_wake_function,				\
  52	.entry		= { NULL, NULL } }
  53
  54#define DECLARE_WAITQUEUE(name, tsk)						\
  55	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
  56
  57#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
  58	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
  59	.head		= { &(name).head, &(name).head } }
  60
  61#define DECLARE_WAIT_QUEUE_HEAD(name) \
  62	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  63
  64extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
  65
  66#define init_waitqueue_head(wq_head)						\
  67	do {									\
  68		static struct lock_class_key __key;				\
  69										\
  70		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
  71	} while (0)
  72
  73#ifdef CONFIG_LOCKDEP
  74# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  75	({ init_waitqueue_head(&name); name; })
  76# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  77	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  78#else
  79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  80#endif
  81
  82static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
  83{
  84	wq_entry->flags		= 0;
  85	wq_entry->private	= p;
  86	wq_entry->func		= default_wake_function;
  87}
  88
  89static inline void
  90init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
  91{
  92	wq_entry->flags		= 0;
  93	wq_entry->private	= NULL;
  94	wq_entry->func		= func;
  95}
  96
  97/**
  98 * waitqueue_active -- locklessly test for waiters on the queue
  99 * @wq_head: the waitqueue to test for waiters
 100 *
 101 * returns true if the wait list is not empty
 102 *
 103 * NOTE: this function is lockless and requires care, incorrect usage _will_
 104 * lead to sporadic and non-obvious failure.
 105 *
 106 * Use either while holding wait_queue_head::lock or when used for wakeups
 107 * with an extra smp_mb() like::
 108 *
 109 *      CPU0 - waker                    CPU1 - waiter
 110 *
 111 *                                      for (;;) {
 112 *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
 113 *      smp_mb();                         // smp_mb() from set_current_state()
 114 *      if (waitqueue_active(wq_head))         if (@cond)
 115 *        wake_up(wq_head);                      break;
 116 *                                        schedule();
 117 *                                      }
 118 *                                      finish_wait(&wq_head, &wait);
 119 *
 120 * Because without the explicit smp_mb() it's possible for the
 121 * waitqueue_active() load to get hoisted over the @cond store such that we'll
 122 * observe an empty wait list while the waiter might not observe @cond.
 123 *
 124 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
 125 * which (when the lock is uncontended) are of roughly equal cost.
 126 */
 127static inline int waitqueue_active(struct wait_queue_head *wq_head)
 128{
 129	return !list_empty(&wq_head->head);
 130}
 131
 132/**
 133 * wq_has_single_sleeper - check if there is only one sleeper
 134 * @wq_head: wait queue head
 135 *
 136 * Returns true of wq_head has only one sleeper on the list.
 137 *
 138 * Please refer to the comment for waitqueue_active.
 139 */
 140static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
 141{
 142	return list_is_singular(&wq_head->head);
 143}
 144
 145/**
 146 * wq_has_sleeper - check if there are any waiting processes
 147 * @wq_head: wait queue head
 148 *
 149 * Returns true if wq_head has waiting processes
 150 *
 151 * Please refer to the comment for waitqueue_active.
 152 */
 153static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
 154{
 155	/*
 156	 * We need to be sure we are in sync with the
 157	 * add_wait_queue modifications to the wait queue.
 158	 *
 159	 * This memory barrier should be paired with one on the
 160	 * waiting side.
 161	 */
 162	smp_mb();
 163	return waitqueue_active(wq_head);
 164}
 165
 166extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 167extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 168extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 169extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 170
 171static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 172{
 173	struct list_head *head = &wq_head->head;
 174	struct wait_queue_entry *wq;
 175
 176	list_for_each_entry(wq, &wq_head->head, entry) {
 177		if (!(wq->flags & WQ_FLAG_PRIORITY))
 178			break;
 179		head = &wq->entry;
 180	}
 181	list_add(&wq_entry->entry, head);
 182}
 183
 184/*
 185 * Used for wake-one threads:
 186 */
 187static inline void
 188__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 189{
 190	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 191	__add_wait_queue(wq_head, wq_entry);
 192}
 193
 194static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 195{
 196	list_add_tail(&wq_entry->entry, &wq_head->head);
 197}
 198
 199static inline void
 200__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 201{
 202	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 203	__add_wait_queue_entry_tail(wq_head, wq_entry);
 204}
 205
 206static inline void
 207__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 208{
 209	list_del(&wq_entry->entry);
 210}
 211
 212void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
 
 213void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 214void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
 215		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
 216void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 217void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 218void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
 219void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
 
 220
 221#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
 222#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
 223#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
 224#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
 225#define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
 226
 227#define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 228#define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 229#define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
 230#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE)
 231
 232/*
 233 * Wakeup macros to be used to report events to the targets.
 234 */
 235#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
 236#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
 237#define wake_up_poll(x, m)							\
 238	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
 
 
 239#define wake_up_locked_poll(x, m)						\
 240	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
 241#define wake_up_interruptible_poll(x, m)					\
 242	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
 243#define wake_up_interruptible_sync_poll(x, m)					\
 244	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
 245#define wake_up_interruptible_sync_poll_locked(x, m)				\
 246	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
 247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248#define ___wait_cond_timeout(condition)						\
 249({										\
 250	bool __cond = (condition);						\
 251	if (__cond && !__ret)							\
 252		__ret = 1;							\
 253	__cond || !__ret;							\
 254})
 255
 256#define ___wait_is_interruptible(state)						\
 257	(!__builtin_constant_p(state) ||					\
 258		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
 259
 260extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
 261
 262/*
 263 * The below macro ___wait_event() has an explicit shadow of the __ret
 264 * variable when used from the wait_event_*() macros.
 265 *
 266 * This is so that both can use the ___wait_cond_timeout() construct
 267 * to wrap the condition.
 268 *
 269 * The type inconsistency of the wait_event_*() __ret variable is also
 270 * on purpose; we use long where we can return timeout values and int
 271 * otherwise.
 272 */
 273
 274#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
 275({										\
 276	__label__ __out;							\
 277	struct wait_queue_entry __wq_entry;					\
 278	long __ret = ret;	/* explicit shadow */				\
 279										\
 280	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
 281	for (;;) {								\
 282		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
 283										\
 284		if (condition)							\
 285			break;							\
 286										\
 287		if (___wait_is_interruptible(state) && __int) {			\
 288			__ret = __int;						\
 289			goto __out;						\
 290		}								\
 291										\
 292		cmd;								\
 293	}									\
 294	finish_wait(&wq_head, &__wq_entry);					\
 295__out:	__ret;									\
 296})
 297
 298#define __wait_event(wq_head, condition)					\
 299	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 300			    schedule())
 301
 302/**
 303 * wait_event - sleep until a condition gets true
 304 * @wq_head: the waitqueue to wait on
 305 * @condition: a C expression for the event to wait for
 306 *
 307 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 308 * @condition evaluates to true. The @condition is checked each time
 309 * the waitqueue @wq_head is woken up.
 310 *
 311 * wake_up() has to be called after changing any variable that could
 312 * change the result of the wait condition.
 313 */
 314#define wait_event(wq_head, condition)						\
 315do {										\
 316	might_sleep();								\
 317	if (condition)								\
 318		break;								\
 319	__wait_event(wq_head, condition);					\
 320} while (0)
 321
 322#define __io_wait_event(wq_head, condition)					\
 323	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 324			    io_schedule())
 325
 326/*
 327 * io_wait_event() -- like wait_event() but with io_schedule()
 328 */
 329#define io_wait_event(wq_head, condition)					\
 330do {										\
 331	might_sleep();								\
 332	if (condition)								\
 333		break;								\
 334	__io_wait_event(wq_head, condition);					\
 335} while (0)
 336
 337#define __wait_event_freezable(wq_head, condition)				\
 338	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 339			    freezable_schedule())
 340
 341/**
 342 * wait_event_freezable - sleep (or freeze) until a condition gets true
 343 * @wq_head: the waitqueue to wait on
 344 * @condition: a C expression for the event to wait for
 345 *
 346 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
 347 * to system load) until the @condition evaluates to true. The
 348 * @condition is checked each time the waitqueue @wq_head is woken up.
 349 *
 350 * wake_up() has to be called after changing any variable that could
 351 * change the result of the wait condition.
 352 */
 353#define wait_event_freezable(wq_head, condition)				\
 354({										\
 355	int __ret = 0;								\
 356	might_sleep();								\
 357	if (!(condition))							\
 358		__ret = __wait_event_freezable(wq_head, condition);		\
 359	__ret;									\
 360})
 361
 362#define __wait_event_timeout(wq_head, condition, timeout)			\
 363	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 364		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
 365		      __ret = schedule_timeout(__ret))
 366
 367/**
 368 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
 369 * @wq_head: the waitqueue to wait on
 370 * @condition: a C expression for the event to wait for
 371 * @timeout: timeout, in jiffies
 372 *
 373 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 374 * @condition evaluates to true. The @condition is checked each time
 375 * the waitqueue @wq_head is woken up.
 376 *
 377 * wake_up() has to be called after changing any variable that could
 378 * change the result of the wait condition.
 379 *
 380 * Returns:
 381 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 382 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 383 * or the remaining jiffies (at least 1) if the @condition evaluated
 384 * to %true before the @timeout elapsed.
 385 */
 386#define wait_event_timeout(wq_head, condition, timeout)				\
 387({										\
 388	long __ret = timeout;							\
 389	might_sleep();								\
 390	if (!___wait_cond_timeout(condition))					\
 391		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
 392	__ret;									\
 393})
 394
 395#define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
 396	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 397		      TASK_INTERRUPTIBLE, 0, timeout,				\
 398		      __ret = freezable_schedule_timeout(__ret))
 399
 400/*
 401 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
 402 * increasing load and is freezable.
 403 */
 404#define wait_event_freezable_timeout(wq_head, condition, timeout)		\
 405({										\
 406	long __ret = timeout;							\
 407	might_sleep();								\
 408	if (!___wait_cond_timeout(condition))					\
 409		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
 410	__ret;									\
 411})
 412
 413#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
 414	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
 415			    cmd1; schedule(); cmd2)
 416/*
 417 * Just like wait_event_cmd(), except it sets exclusive flag
 418 */
 419#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
 420do {										\
 421	if (condition)								\
 422		break;								\
 423	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
 424} while (0)
 425
 426#define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
 427	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 428			    cmd1; schedule(); cmd2)
 429
 430/**
 431 * wait_event_cmd - sleep until a condition gets true
 432 * @wq_head: the waitqueue to wait on
 433 * @condition: a C expression for the event to wait for
 434 * @cmd1: the command will be executed before sleep
 435 * @cmd2: the command will be executed after sleep
 436 *
 437 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 438 * @condition evaluates to true. The @condition is checked each time
 439 * the waitqueue @wq_head is woken up.
 440 *
 441 * wake_up() has to be called after changing any variable that could
 442 * change the result of the wait condition.
 443 */
 444#define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
 445do {										\
 446	if (condition)								\
 447		break;								\
 448	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
 449} while (0)
 450
 451#define __wait_event_interruptible(wq_head, condition)				\
 452	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 453		      schedule())
 454
 455/**
 456 * wait_event_interruptible - sleep until a condition gets true
 457 * @wq_head: the waitqueue to wait on
 458 * @condition: a C expression for the event to wait for
 459 *
 460 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 461 * @condition evaluates to true or a signal is received.
 462 * The @condition is checked each time the waitqueue @wq_head is woken up.
 463 *
 464 * wake_up() has to be called after changing any variable that could
 465 * change the result of the wait condition.
 466 *
 467 * The function will return -ERESTARTSYS if it was interrupted by a
 468 * signal and 0 if @condition evaluated to true.
 469 */
 470#define wait_event_interruptible(wq_head, condition)				\
 471({										\
 472	int __ret = 0;								\
 473	might_sleep();								\
 474	if (!(condition))							\
 475		__ret = __wait_event_interruptible(wq_head, condition);		\
 476	__ret;									\
 477})
 478
 479#define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
 480	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 481		      TASK_INTERRUPTIBLE, 0, timeout,				\
 482		      __ret = schedule_timeout(__ret))
 483
 484/**
 485 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
 486 * @wq_head: the waitqueue to wait on
 487 * @condition: a C expression for the event to wait for
 488 * @timeout: timeout, in jiffies
 489 *
 490 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 491 * @condition evaluates to true or a signal is received.
 492 * The @condition is checked each time the waitqueue @wq_head is woken up.
 493 *
 494 * wake_up() has to be called after changing any variable that could
 495 * change the result of the wait condition.
 496 *
 497 * Returns:
 498 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 499 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 500 * the remaining jiffies (at least 1) if the @condition evaluated
 501 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
 502 * interrupted by a signal.
 503 */
 504#define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
 505({										\
 506	long __ret = timeout;							\
 507	might_sleep();								\
 508	if (!___wait_cond_timeout(condition))					\
 509		__ret = __wait_event_interruptible_timeout(wq_head,		\
 510						condition, timeout);		\
 511	__ret;									\
 512})
 513
 514#define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
 515({										\
 516	int __ret = 0;								\
 517	struct hrtimer_sleeper __t;						\
 518										\
 519	hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,			\
 520				      HRTIMER_MODE_REL);			\
 521	if ((timeout) != KTIME_MAX)						\
 522		hrtimer_start_range_ns(&__t.timer, timeout,			\
 523				       current->timer_slack_ns,			\
 524				       HRTIMER_MODE_REL);			\
 
 525										\
 526	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
 527		if (!__t.task) {						\
 528			__ret = -ETIME;						\
 529			break;							\
 530		}								\
 531		schedule());							\
 532										\
 533	hrtimer_cancel(&__t.timer);						\
 534	destroy_hrtimer_on_stack(&__t.timer);					\
 535	__ret;									\
 536})
 537
 538/**
 539 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
 540 * @wq_head: the waitqueue to wait on
 541 * @condition: a C expression for the event to wait for
 542 * @timeout: timeout, as a ktime_t
 543 *
 544 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 545 * @condition evaluates to true or a signal is received.
 546 * The @condition is checked each time the waitqueue @wq_head is woken up.
 547 *
 548 * wake_up() has to be called after changing any variable that could
 549 * change the result of the wait condition.
 550 *
 551 * The function returns 0 if @condition became true, or -ETIME if the timeout
 552 * elapsed.
 553 */
 554#define wait_event_hrtimeout(wq_head, condition, timeout)			\
 555({										\
 556	int __ret = 0;								\
 557	might_sleep();								\
 558	if (!(condition))							\
 559		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
 560					       TASK_UNINTERRUPTIBLE);		\
 561	__ret;									\
 562})
 563
 564/**
 565 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
 566 * @wq: the waitqueue to wait on
 567 * @condition: a C expression for the event to wait for
 568 * @timeout: timeout, as a ktime_t
 569 *
 570 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 571 * @condition evaluates to true or a signal is received.
 572 * The @condition is checked each time the waitqueue @wq is woken up.
 573 *
 574 * wake_up() has to be called after changing any variable that could
 575 * change the result of the wait condition.
 576 *
 577 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
 578 * interrupted by a signal, or -ETIME if the timeout elapsed.
 579 */
 580#define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
 581({										\
 582	long __ret = 0;								\
 583	might_sleep();								\
 584	if (!(condition))							\
 585		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
 586					       TASK_INTERRUPTIBLE);		\
 587	__ret;									\
 588})
 589
 590#define __wait_event_interruptible_exclusive(wq, condition)			\
 591	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
 592		      schedule())
 593
 594#define wait_event_interruptible_exclusive(wq, condition)			\
 595({										\
 596	int __ret = 0;								\
 597	might_sleep();								\
 598	if (!(condition))							\
 599		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
 600	__ret;									\
 601})
 602
 603#define __wait_event_killable_exclusive(wq, condition)				\
 604	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
 605		      schedule())
 606
 607#define wait_event_killable_exclusive(wq, condition)				\
 608({										\
 609	int __ret = 0;								\
 610	might_sleep();								\
 611	if (!(condition))							\
 612		__ret = __wait_event_killable_exclusive(wq, condition);		\
 613	__ret;									\
 614})
 615
 616
 617#define __wait_event_freezable_exclusive(wq, condition)				\
 618	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
 619			freezable_schedule())
 620
 621#define wait_event_freezable_exclusive(wq, condition)				\
 622({										\
 623	int __ret = 0;								\
 624	might_sleep();								\
 625	if (!(condition))							\
 626		__ret = __wait_event_freezable_exclusive(wq, condition);	\
 627	__ret;									\
 628})
 629
 630/**
 631 * wait_event_idle - wait for a condition without contributing to system load
 632 * @wq_head: the waitqueue to wait on
 633 * @condition: a C expression for the event to wait for
 634 *
 635 * The process is put to sleep (TASK_IDLE) until the
 636 * @condition evaluates to true.
 637 * The @condition is checked each time the waitqueue @wq_head is woken up.
 638 *
 639 * wake_up() has to be called after changing any variable that could
 640 * change the result of the wait condition.
 641 *
 642 */
 643#define wait_event_idle(wq_head, condition)					\
 644do {										\
 645	might_sleep();								\
 646	if (!(condition))							\
 647		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
 648} while (0)
 649
 650/**
 651 * wait_event_idle_exclusive - wait for a condition with contributing to system load
 652 * @wq_head: the waitqueue to wait on
 653 * @condition: a C expression for the event to wait for
 654 *
 655 * The process is put to sleep (TASK_IDLE) until the
 656 * @condition evaluates to true.
 657 * The @condition is checked each time the waitqueue @wq_head is woken up.
 658 *
 659 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 660 * set thus if other processes wait on the same list, when this
 661 * process is woken further processes are not considered.
 662 *
 663 * wake_up() has to be called after changing any variable that could
 664 * change the result of the wait condition.
 665 *
 666 */
 667#define wait_event_idle_exclusive(wq_head, condition)				\
 668do {										\
 669	might_sleep();								\
 670	if (!(condition))							\
 671		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
 672} while (0)
 673
 674#define __wait_event_idle_timeout(wq_head, condition, timeout)			\
 675	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 676		      TASK_IDLE, 0, timeout,					\
 677		      __ret = schedule_timeout(__ret))
 678
 679/**
 680 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
 681 * @wq_head: the waitqueue to wait on
 682 * @condition: a C expression for the event to wait for
 683 * @timeout: timeout, in jiffies
 684 *
 685 * The process is put to sleep (TASK_IDLE) until the
 686 * @condition evaluates to true. The @condition is checked each time
 687 * the waitqueue @wq_head is woken up.
 688 *
 689 * wake_up() has to be called after changing any variable that could
 690 * change the result of the wait condition.
 691 *
 692 * Returns:
 693 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 694 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 695 * or the remaining jiffies (at least 1) if the @condition evaluated
 696 * to %true before the @timeout elapsed.
 697 */
 698#define wait_event_idle_timeout(wq_head, condition, timeout)			\
 699({										\
 700	long __ret = timeout;							\
 701	might_sleep();								\
 702	if (!___wait_cond_timeout(condition))					\
 703		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
 704	__ret;									\
 705})
 706
 707#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
 708	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 709		      TASK_IDLE, 1, timeout,					\
 710		      __ret = schedule_timeout(__ret))
 711
 712/**
 713 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
 714 * @wq_head: the waitqueue to wait on
 715 * @condition: a C expression for the event to wait for
 716 * @timeout: timeout, in jiffies
 717 *
 718 * The process is put to sleep (TASK_IDLE) until the
 719 * @condition evaluates to true. The @condition is checked each time
 720 * the waitqueue @wq_head is woken up.
 721 *
 722 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 723 * set thus if other processes wait on the same list, when this
 724 * process is woken further processes are not considered.
 725 *
 726 * wake_up() has to be called after changing any variable that could
 727 * change the result of the wait condition.
 728 *
 729 * Returns:
 730 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 731 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 732 * or the remaining jiffies (at least 1) if the @condition evaluated
 733 * to %true before the @timeout elapsed.
 734 */
 735#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
 736({										\
 737	long __ret = timeout;							\
 738	might_sleep();								\
 739	if (!___wait_cond_timeout(condition))					\
 740		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
 741	__ret;									\
 742})
 743
 744extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
 745extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
 746
 747#define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
 748({										\
 749	int __ret;								\
 750	DEFINE_WAIT(__wait);							\
 751	if (exclusive)								\
 752		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
 753	do {									\
 754		__ret = fn(&(wq), &__wait);					\
 755		if (__ret)							\
 756			break;							\
 757	} while (!(condition));							\
 758	__remove_wait_queue(&(wq), &__wait);					\
 759	__set_current_state(TASK_RUNNING);					\
 760	__ret;									\
 761})
 762
 763
 764/**
 765 * wait_event_interruptible_locked - sleep until a condition gets true
 766 * @wq: the waitqueue to wait on
 767 * @condition: a C expression for the event to wait for
 768 *
 769 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 770 * @condition evaluates to true or a signal is received.
 771 * The @condition is checked each time the waitqueue @wq is woken up.
 772 *
 773 * It must be called with wq.lock being held.  This spinlock is
 774 * unlocked while sleeping but @condition testing is done while lock
 775 * is held and when this macro exits the lock is held.
 776 *
 777 * The lock is locked/unlocked using spin_lock()/spin_unlock()
 778 * functions which must match the way they are locked/unlocked outside
 779 * of this macro.
 780 *
 781 * wake_up_locked() has to be called after changing any variable that could
 782 * change the result of the wait condition.
 783 *
 784 * The function will return -ERESTARTSYS if it was interrupted by a
 785 * signal and 0 if @condition evaluated to true.
 786 */
 787#define wait_event_interruptible_locked(wq, condition)				\
 788	((condition)								\
 789	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
 790
 791/**
 792 * wait_event_interruptible_locked_irq - sleep until a condition gets true
 793 * @wq: the waitqueue to wait on
 794 * @condition: a C expression for the event to wait for
 795 *
 796 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 797 * @condition evaluates to true or a signal is received.
 798 * The @condition is checked each time the waitqueue @wq is woken up.
 799 *
 800 * It must be called with wq.lock being held.  This spinlock is
 801 * unlocked while sleeping but @condition testing is done while lock
 802 * is held and when this macro exits the lock is held.
 803 *
 804 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
 805 * functions which must match the way they are locked/unlocked outside
 806 * of this macro.
 807 *
 808 * wake_up_locked() has to be called after changing any variable that could
 809 * change the result of the wait condition.
 810 *
 811 * The function will return -ERESTARTSYS if it was interrupted by a
 812 * signal and 0 if @condition evaluated to true.
 813 */
 814#define wait_event_interruptible_locked_irq(wq, condition)			\
 815	((condition)								\
 816	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
 817
 818/**
 819 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
 820 * @wq: the waitqueue to wait on
 821 * @condition: a C expression for the event to wait for
 822 *
 823 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 824 * @condition evaluates to true or a signal is received.
 825 * The @condition is checked each time the waitqueue @wq is woken up.
 826 *
 827 * It must be called with wq.lock being held.  This spinlock is
 828 * unlocked while sleeping but @condition testing is done while lock
 829 * is held and when this macro exits the lock is held.
 830 *
 831 * The lock is locked/unlocked using spin_lock()/spin_unlock()
 832 * functions which must match the way they are locked/unlocked outside
 833 * of this macro.
 834 *
 835 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 836 * set thus when other process waits process on the list if this
 837 * process is awaken further processes are not considered.
 838 *
 839 * wake_up_locked() has to be called after changing any variable that could
 840 * change the result of the wait condition.
 841 *
 842 * The function will return -ERESTARTSYS if it was interrupted by a
 843 * signal and 0 if @condition evaluated to true.
 844 */
 845#define wait_event_interruptible_exclusive_locked(wq, condition)		\
 846	((condition)								\
 847	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
 848
 849/**
 850 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
 851 * @wq: the waitqueue to wait on
 852 * @condition: a C expression for the event to wait for
 853 *
 854 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 855 * @condition evaluates to true or a signal is received.
 856 * The @condition is checked each time the waitqueue @wq is woken up.
 857 *
 858 * It must be called with wq.lock being held.  This spinlock is
 859 * unlocked while sleeping but @condition testing is done while lock
 860 * is held and when this macro exits the lock is held.
 861 *
 862 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
 863 * functions which must match the way they are locked/unlocked outside
 864 * of this macro.
 865 *
 866 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 867 * set thus when other process waits process on the list if this
 868 * process is awaken further processes are not considered.
 869 *
 870 * wake_up_locked() has to be called after changing any variable that could
 871 * change the result of the wait condition.
 872 *
 873 * The function will return -ERESTARTSYS if it was interrupted by a
 874 * signal and 0 if @condition evaluated to true.
 875 */
 876#define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
 877	((condition)								\
 878	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
 879
 880
 881#define __wait_event_killable(wq, condition)					\
 882	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
 883
 884/**
 885 * wait_event_killable - sleep until a condition gets true
 886 * @wq_head: the waitqueue to wait on
 887 * @condition: a C expression for the event to wait for
 888 *
 889 * The process is put to sleep (TASK_KILLABLE) until the
 890 * @condition evaluates to true or a signal is received.
 891 * The @condition is checked each time the waitqueue @wq_head is woken up.
 892 *
 893 * wake_up() has to be called after changing any variable that could
 894 * change the result of the wait condition.
 895 *
 896 * The function will return -ERESTARTSYS if it was interrupted by a
 897 * signal and 0 if @condition evaluated to true.
 898 */
 899#define wait_event_killable(wq_head, condition)					\
 900({										\
 901	int __ret = 0;								\
 902	might_sleep();								\
 903	if (!(condition))							\
 904		__ret = __wait_event_killable(wq_head, condition);		\
 905	__ret;									\
 906})
 907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908#define __wait_event_killable_timeout(wq_head, condition, timeout)		\
 909	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 910		      TASK_KILLABLE, 0, timeout,				\
 911		      __ret = schedule_timeout(__ret))
 912
 913/**
 914 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
 915 * @wq_head: the waitqueue to wait on
 916 * @condition: a C expression for the event to wait for
 917 * @timeout: timeout, in jiffies
 918 *
 919 * The process is put to sleep (TASK_KILLABLE) until the
 920 * @condition evaluates to true or a kill signal is received.
 921 * The @condition is checked each time the waitqueue @wq_head is woken up.
 922 *
 923 * wake_up() has to be called after changing any variable that could
 924 * change the result of the wait condition.
 925 *
 926 * Returns:
 927 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 928 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 929 * the remaining jiffies (at least 1) if the @condition evaluated
 930 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
 931 * interrupted by a kill signal.
 932 *
 933 * Only kill signals interrupt this process.
 934 */
 935#define wait_event_killable_timeout(wq_head, condition, timeout)		\
 936({										\
 937	long __ret = timeout;							\
 938	might_sleep();								\
 939	if (!___wait_cond_timeout(condition))					\
 940		__ret = __wait_event_killable_timeout(wq_head,			\
 941						condition, timeout);		\
 942	__ret;									\
 943})
 944
 945
 946#define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
 947	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 948			    spin_unlock_irq(&lock);				\
 949			    cmd;						\
 950			    schedule();						\
 951			    spin_lock_irq(&lock))
 952
 953/**
 954 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
 955 *			     condition is checked under the lock. This
 956 *			     is expected to be called with the lock
 957 *			     taken.
 958 * @wq_head: the waitqueue to wait on
 959 * @condition: a C expression for the event to wait for
 960 * @lock: a locked spinlock_t, which will be released before cmd
 961 *	  and schedule() and reacquired afterwards.
 962 * @cmd: a command which is invoked outside the critical section before
 963 *	 sleep
 964 *
 965 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 966 * @condition evaluates to true. The @condition is checked each time
 967 * the waitqueue @wq_head is woken up.
 968 *
 969 * wake_up() has to be called after changing any variable that could
 970 * change the result of the wait condition.
 971 *
 972 * This is supposed to be called while holding the lock. The lock is
 973 * dropped before invoking the cmd and going to sleep and is reacquired
 974 * afterwards.
 975 */
 976#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
 977do {										\
 978	if (condition)								\
 979		break;								\
 980	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
 981} while (0)
 982
 983/**
 984 * wait_event_lock_irq - sleep until a condition gets true. The
 985 *			 condition is checked under the lock. This
 986 *			 is expected to be called with the lock
 987 *			 taken.
 988 * @wq_head: the waitqueue to wait on
 989 * @condition: a C expression for the event to wait for
 990 * @lock: a locked spinlock_t, which will be released before schedule()
 991 *	  and reacquired afterwards.
 992 *
 993 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 994 * @condition evaluates to true. The @condition is checked each time
 995 * the waitqueue @wq_head is woken up.
 996 *
 997 * wake_up() has to be called after changing any variable that could
 998 * change the result of the wait condition.
 999 *
1000 * This is supposed to be called while holding the lock. The lock is
1001 * dropped before going to sleep and is reacquired afterwards.
1002 */
1003#define wait_event_lock_irq(wq_head, condition, lock)				\
1004do {										\
1005	if (condition)								\
1006		break;								\
1007	__wait_event_lock_irq(wq_head, condition, lock, );			\
1008} while (0)
1009
1010
1011#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
1012	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
1013		      spin_unlock_irq(&lock);					\
1014		      cmd;							\
1015		      schedule();						\
1016		      spin_lock_irq(&lock))
1017
1018/**
1019 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1020 *		The condition is checked under the lock. This is expected to
1021 *		be called with the lock taken.
1022 * @wq_head: the waitqueue to wait on
1023 * @condition: a C expression for the event to wait for
1024 * @lock: a locked spinlock_t, which will be released before cmd and
1025 *	  schedule() and reacquired afterwards.
1026 * @cmd: a command which is invoked outside the critical section before
1027 *	 sleep
1028 *
1029 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1030 * @condition evaluates to true or a signal is received. The @condition is
1031 * checked each time the waitqueue @wq_head is woken up.
1032 *
1033 * wake_up() has to be called after changing any variable that could
1034 * change the result of the wait condition.
1035 *
1036 * This is supposed to be called while holding the lock. The lock is
1037 * dropped before invoking the cmd and going to sleep and is reacquired
1038 * afterwards.
1039 *
1040 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1041 * and 0 if @condition evaluated to true.
1042 */
1043#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1044({										\
1045	int __ret = 0;								\
1046	if (!(condition))							\
1047		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1048						condition, lock, cmd);		\
1049	__ret;									\
1050})
1051
1052/**
1053 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1054 *		The condition is checked under the lock. This is expected
1055 *		to be called with the lock taken.
1056 * @wq_head: the waitqueue to wait on
1057 * @condition: a C expression for the event to wait for
1058 * @lock: a locked spinlock_t, which will be released before schedule()
1059 *	  and reacquired afterwards.
1060 *
1061 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1062 * @condition evaluates to true or signal is received. The @condition is
1063 * checked each time the waitqueue @wq_head is woken up.
1064 *
1065 * wake_up() has to be called after changing any variable that could
1066 * change the result of the wait condition.
1067 *
1068 * This is supposed to be called while holding the lock. The lock is
1069 * dropped before going to sleep and is reacquired afterwards.
1070 *
1071 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1072 * and 0 if @condition evaluated to true.
1073 */
1074#define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1075({										\
1076	int __ret = 0;								\
1077	if (!(condition))							\
1078		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1079						condition, lock,);		\
1080	__ret;									\
1081})
1082
1083#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1084	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1085		      state, 0, timeout,					\
1086		      spin_unlock_irq(&lock);					\
1087		      __ret = schedule_timeout(__ret);				\
1088		      spin_lock_irq(&lock));
1089
1090/**
1091 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1092 *		true or a timeout elapses. The condition is checked under
1093 *		the lock. This is expected to be called with the lock taken.
1094 * @wq_head: the waitqueue to wait on
1095 * @condition: a C expression for the event to wait for
1096 * @lock: a locked spinlock_t, which will be released before schedule()
1097 *	  and reacquired afterwards.
1098 * @timeout: timeout, in jiffies
1099 *
1100 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1101 * @condition evaluates to true or signal is received. The @condition is
1102 * checked each time the waitqueue @wq_head is woken up.
1103 *
1104 * wake_up() has to be called after changing any variable that could
1105 * change the result of the wait condition.
1106 *
1107 * This is supposed to be called while holding the lock. The lock is
1108 * dropped before going to sleep and is reacquired afterwards.
1109 *
1110 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1111 * was interrupted by a signal, and the remaining jiffies otherwise
1112 * if the condition evaluated to true before the timeout elapsed.
1113 */
1114#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1115						  timeout)			\
1116({										\
1117	long __ret = timeout;							\
1118	if (!___wait_cond_timeout(condition))					\
1119		__ret = __wait_event_lock_irq_timeout(				\
1120					wq_head, condition, lock, timeout,	\
1121					TASK_INTERRUPTIBLE);			\
1122	__ret;									\
1123})
1124
1125#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1126({										\
1127	long __ret = timeout;							\
1128	if (!___wait_cond_timeout(condition))					\
1129		__ret = __wait_event_lock_irq_timeout(				\
1130					wq_head, condition, lock, timeout,	\
1131					TASK_UNINTERRUPTIBLE);			\
1132	__ret;									\
1133})
1134
1135/*
1136 * Waitqueues which are removed from the waitqueue_head at wakeup time
1137 */
1138void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1139bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1140long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1141void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1142long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1143int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1144int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1145
1146#define DEFINE_WAIT_FUNC(name, function)					\
1147	struct wait_queue_entry name = {					\
1148		.private	= current,					\
1149		.func		= function,					\
1150		.entry		= LIST_HEAD_INIT((name).entry),			\
1151	}
1152
1153#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1154
1155#define init_wait(wait)								\
1156	do {									\
1157		(wait)->private = current;					\
1158		(wait)->func = autoremove_wake_function;			\
1159		INIT_LIST_HEAD(&(wait)->entry);					\
1160		(wait)->flags = 0;						\
1161	} while (0)
1162
1163bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
 
1164
1165#endif /* _LINUX_WAIT_H */
v6.9.4
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_WAIT_H
   3#define _LINUX_WAIT_H
   4/*
   5 * Linux wait queue related types and methods
   6 */
   7#include <linux/list.h>
   8#include <linux/stddef.h>
   9#include <linux/spinlock.h>
  10
  11#include <asm/current.h>
 
  12
  13typedef struct wait_queue_entry wait_queue_entry_t;
  14
  15typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  16int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  17
  18/* wait_queue_entry::flags */
  19#define WQ_FLAG_EXCLUSIVE	0x01
  20#define WQ_FLAG_WOKEN		0x02
  21#define WQ_FLAG_CUSTOM		0x04
  22#define WQ_FLAG_DONE		0x08
  23#define WQ_FLAG_PRIORITY	0x10
 
  24
  25/*
  26 * A single wait-queue entry structure:
  27 */
  28struct wait_queue_entry {
  29	unsigned int		flags;
  30	void			*private;
  31	wait_queue_func_t	func;
  32	struct list_head	entry;
  33};
  34
  35struct wait_queue_head {
  36	spinlock_t		lock;
  37	struct list_head	head;
  38};
  39typedef struct wait_queue_head wait_queue_head_t;
  40
  41struct task_struct;
  42
  43/*
  44 * Macros for declaration and initialisaton of the datatypes
  45 */
  46
  47#define __WAITQUEUE_INITIALIZER(name, tsk) {					\
  48	.private	= tsk,							\
  49	.func		= default_wake_function,				\
  50	.entry		= { NULL, NULL } }
  51
  52#define DECLARE_WAITQUEUE(name, tsk)						\
  53	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
  54
  55#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
  56	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
  57	.head		= LIST_HEAD_INIT(name.head) }
  58
  59#define DECLARE_WAIT_QUEUE_HEAD(name) \
  60	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  61
  62extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
  63
  64#define init_waitqueue_head(wq_head)						\
  65	do {									\
  66		static struct lock_class_key __key;				\
  67										\
  68		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
  69	} while (0)
  70
  71#ifdef CONFIG_LOCKDEP
  72# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  73	({ init_waitqueue_head(&name); name; })
  74# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  75	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  76#else
  77# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  78#endif
  79
  80static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
  81{
  82	wq_entry->flags		= 0;
  83	wq_entry->private	= p;
  84	wq_entry->func		= default_wake_function;
  85}
  86
  87static inline void
  88init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
  89{
  90	wq_entry->flags		= 0;
  91	wq_entry->private	= NULL;
  92	wq_entry->func		= func;
  93}
  94
  95/**
  96 * waitqueue_active -- locklessly test for waiters on the queue
  97 * @wq_head: the waitqueue to test for waiters
  98 *
  99 * returns true if the wait list is not empty
 100 *
 101 * NOTE: this function is lockless and requires care, incorrect usage _will_
 102 * lead to sporadic and non-obvious failure.
 103 *
 104 * Use either while holding wait_queue_head::lock or when used for wakeups
 105 * with an extra smp_mb() like::
 106 *
 107 *      CPU0 - waker                    CPU1 - waiter
 108 *
 109 *                                      for (;;) {
 110 *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
 111 *      smp_mb();                         // smp_mb() from set_current_state()
 112 *      if (waitqueue_active(wq_head))         if (@cond)
 113 *        wake_up(wq_head);                      break;
 114 *                                        schedule();
 115 *                                      }
 116 *                                      finish_wait(&wq_head, &wait);
 117 *
 118 * Because without the explicit smp_mb() it's possible for the
 119 * waitqueue_active() load to get hoisted over the @cond store such that we'll
 120 * observe an empty wait list while the waiter might not observe @cond.
 121 *
 122 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
 123 * which (when the lock is uncontended) are of roughly equal cost.
 124 */
 125static inline int waitqueue_active(struct wait_queue_head *wq_head)
 126{
 127	return !list_empty(&wq_head->head);
 128}
 129
 130/**
 131 * wq_has_single_sleeper - check if there is only one sleeper
 132 * @wq_head: wait queue head
 133 *
 134 * Returns true of wq_head has only one sleeper on the list.
 135 *
 136 * Please refer to the comment for waitqueue_active.
 137 */
 138static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
 139{
 140	return list_is_singular(&wq_head->head);
 141}
 142
 143/**
 144 * wq_has_sleeper - check if there are any waiting processes
 145 * @wq_head: wait queue head
 146 *
 147 * Returns true if wq_head has waiting processes
 148 *
 149 * Please refer to the comment for waitqueue_active.
 150 */
 151static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
 152{
 153	/*
 154	 * We need to be sure we are in sync with the
 155	 * add_wait_queue modifications to the wait queue.
 156	 *
 157	 * This memory barrier should be paired with one on the
 158	 * waiting side.
 159	 */
 160	smp_mb();
 161	return waitqueue_active(wq_head);
 162}
 163
 164extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 165extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 166extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 167extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 168
 169static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 170{
 171	struct list_head *head = &wq_head->head;
 172	struct wait_queue_entry *wq;
 173
 174	list_for_each_entry(wq, &wq_head->head, entry) {
 175		if (!(wq->flags & WQ_FLAG_PRIORITY))
 176			break;
 177		head = &wq->entry;
 178	}
 179	list_add(&wq_entry->entry, head);
 180}
 181
 182/*
 183 * Used for wake-one threads:
 184 */
 185static inline void
 186__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 187{
 188	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 189	__add_wait_queue(wq_head, wq_entry);
 190}
 191
 192static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 193{
 194	list_add_tail(&wq_entry->entry, &wq_head->head);
 195}
 196
 197static inline void
 198__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 199{
 200	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 201	__add_wait_queue_entry_tail(wq_head, wq_entry);
 202}
 203
 204static inline void
 205__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 206{
 207	list_del(&wq_entry->entry);
 208}
 209
 210int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
 211void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 212void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 
 
 213void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 214void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 215void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
 216void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
 217void __wake_up_pollfree(struct wait_queue_head *wq_head);
 218
 219#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
 220#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
 221#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
 222#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
 223#define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
 224
 225#define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
 226#define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
 227#define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
 228#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE)
 229
 230/*
 231 * Wakeup macros to be used to report events to the targets.
 232 */
 233#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
 234#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
 235#define wake_up_poll(x, m)							\
 236	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
 237#define wake_up_poll_on_current_cpu(x, m)					\
 238	__wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m))
 239#define wake_up_locked_poll(x, m)						\
 240	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
 241#define wake_up_interruptible_poll(x, m)					\
 242	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
 243#define wake_up_interruptible_sync_poll(x, m)					\
 244	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
 245#define wake_up_interruptible_sync_poll_locked(x, m)				\
 246	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
 247
 248/**
 249 * wake_up_pollfree - signal that a polled waitqueue is going away
 250 * @wq_head: the wait queue head
 251 *
 252 * In the very rare cases where a ->poll() implementation uses a waitqueue whose
 253 * lifetime is tied to a task rather than to the 'struct file' being polled,
 254 * this function must be called before the waitqueue is freed so that
 255 * non-blocking polls (e.g. epoll) are notified that the queue is going away.
 256 *
 257 * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
 258 * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
 259 */
 260static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
 261{
 262	/*
 263	 * For performance reasons, we don't always take the queue lock here.
 264	 * Therefore, we might race with someone removing the last entry from
 265	 * the queue, and proceed while they still hold the queue lock.
 266	 * However, rcu_read_lock() is required to be held in such cases, so we
 267	 * can safely proceed with an RCU-delayed free.
 268	 */
 269	if (waitqueue_active(wq_head))
 270		__wake_up_pollfree(wq_head);
 271}
 272
 273#define ___wait_cond_timeout(condition)						\
 274({										\
 275	bool __cond = (condition);						\
 276	if (__cond && !__ret)							\
 277		__ret = 1;							\
 278	__cond || !__ret;							\
 279})
 280
 281#define ___wait_is_interruptible(state)						\
 282	(!__builtin_constant_p(state) ||					\
 283	 (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
 284
 285extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
 286
 287/*
 288 * The below macro ___wait_event() has an explicit shadow of the __ret
 289 * variable when used from the wait_event_*() macros.
 290 *
 291 * This is so that both can use the ___wait_cond_timeout() construct
 292 * to wrap the condition.
 293 *
 294 * The type inconsistency of the wait_event_*() __ret variable is also
 295 * on purpose; we use long where we can return timeout values and int
 296 * otherwise.
 297 */
 298
 299#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
 300({										\
 301	__label__ __out;							\
 302	struct wait_queue_entry __wq_entry;					\
 303	long __ret = ret;	/* explicit shadow */				\
 304										\
 305	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
 306	for (;;) {								\
 307		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
 308										\
 309		if (condition)							\
 310			break;							\
 311										\
 312		if (___wait_is_interruptible(state) && __int) {			\
 313			__ret = __int;						\
 314			goto __out;						\
 315		}								\
 316										\
 317		cmd;								\
 318	}									\
 319	finish_wait(&wq_head, &__wq_entry);					\
 320__out:	__ret;									\
 321})
 322
 323#define __wait_event(wq_head, condition)					\
 324	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 325			    schedule())
 326
 327/**
 328 * wait_event - sleep until a condition gets true
 329 * @wq_head: the waitqueue to wait on
 330 * @condition: a C expression for the event to wait for
 331 *
 332 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 333 * @condition evaluates to true. The @condition is checked each time
 334 * the waitqueue @wq_head is woken up.
 335 *
 336 * wake_up() has to be called after changing any variable that could
 337 * change the result of the wait condition.
 338 */
 339#define wait_event(wq_head, condition)						\
 340do {										\
 341	might_sleep();								\
 342	if (condition)								\
 343		break;								\
 344	__wait_event(wq_head, condition);					\
 345} while (0)
 346
 347#define __io_wait_event(wq_head, condition)					\
 348	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 349			    io_schedule())
 350
 351/*
 352 * io_wait_event() -- like wait_event() but with io_schedule()
 353 */
 354#define io_wait_event(wq_head, condition)					\
 355do {										\
 356	might_sleep();								\
 357	if (condition)								\
 358		break;								\
 359	__io_wait_event(wq_head, condition);					\
 360} while (0)
 361
 362#define __wait_event_freezable(wq_head, condition)				\
 363	___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE),	\
 364			0, 0, schedule())
 365
 366/**
 367 * wait_event_freezable - sleep (or freeze) until a condition gets true
 368 * @wq_head: the waitqueue to wait on
 369 * @condition: a C expression for the event to wait for
 370 *
 371 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
 372 * to system load) until the @condition evaluates to true. The
 373 * @condition is checked each time the waitqueue @wq_head is woken up.
 374 *
 375 * wake_up() has to be called after changing any variable that could
 376 * change the result of the wait condition.
 377 */
 378#define wait_event_freezable(wq_head, condition)				\
 379({										\
 380	int __ret = 0;								\
 381	might_sleep();								\
 382	if (!(condition))							\
 383		__ret = __wait_event_freezable(wq_head, condition);		\
 384	__ret;									\
 385})
 386
 387#define __wait_event_timeout(wq_head, condition, timeout)			\
 388	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 389		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
 390		      __ret = schedule_timeout(__ret))
 391
 392/**
 393 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
 394 * @wq_head: the waitqueue to wait on
 395 * @condition: a C expression for the event to wait for
 396 * @timeout: timeout, in jiffies
 397 *
 398 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 399 * @condition evaluates to true. The @condition is checked each time
 400 * the waitqueue @wq_head is woken up.
 401 *
 402 * wake_up() has to be called after changing any variable that could
 403 * change the result of the wait condition.
 404 *
 405 * Returns:
 406 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 407 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 408 * or the remaining jiffies (at least 1) if the @condition evaluated
 409 * to %true before the @timeout elapsed.
 410 */
 411#define wait_event_timeout(wq_head, condition, timeout)				\
 412({										\
 413	long __ret = timeout;							\
 414	might_sleep();								\
 415	if (!___wait_cond_timeout(condition))					\
 416		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
 417	__ret;									\
 418})
 419
 420#define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
 421	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 422		      (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout,		\
 423		      __ret = schedule_timeout(__ret))
 424
 425/*
 426 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
 427 * increasing load and is freezable.
 428 */
 429#define wait_event_freezable_timeout(wq_head, condition, timeout)		\
 430({										\
 431	long __ret = timeout;							\
 432	might_sleep();								\
 433	if (!___wait_cond_timeout(condition))					\
 434		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
 435	__ret;									\
 436})
 437
 438#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
 439	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
 440			    cmd1; schedule(); cmd2)
 441/*
 442 * Just like wait_event_cmd(), except it sets exclusive flag
 443 */
 444#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
 445do {										\
 446	if (condition)								\
 447		break;								\
 448	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
 449} while (0)
 450
 451#define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
 452	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 453			    cmd1; schedule(); cmd2)
 454
 455/**
 456 * wait_event_cmd - sleep until a condition gets true
 457 * @wq_head: the waitqueue to wait on
 458 * @condition: a C expression for the event to wait for
 459 * @cmd1: the command will be executed before sleep
 460 * @cmd2: the command will be executed after sleep
 461 *
 462 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 463 * @condition evaluates to true. The @condition is checked each time
 464 * the waitqueue @wq_head is woken up.
 465 *
 466 * wake_up() has to be called after changing any variable that could
 467 * change the result of the wait condition.
 468 */
 469#define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
 470do {										\
 471	if (condition)								\
 472		break;								\
 473	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
 474} while (0)
 475
 476#define __wait_event_interruptible(wq_head, condition)				\
 477	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 478		      schedule())
 479
 480/**
 481 * wait_event_interruptible - sleep until a condition gets true
 482 * @wq_head: the waitqueue to wait on
 483 * @condition: a C expression for the event to wait for
 484 *
 485 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 486 * @condition evaluates to true or a signal is received.
 487 * The @condition is checked each time the waitqueue @wq_head is woken up.
 488 *
 489 * wake_up() has to be called after changing any variable that could
 490 * change the result of the wait condition.
 491 *
 492 * The function will return -ERESTARTSYS if it was interrupted by a
 493 * signal and 0 if @condition evaluated to true.
 494 */
 495#define wait_event_interruptible(wq_head, condition)				\
 496({										\
 497	int __ret = 0;								\
 498	might_sleep();								\
 499	if (!(condition))							\
 500		__ret = __wait_event_interruptible(wq_head, condition);		\
 501	__ret;									\
 502})
 503
 504#define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
 505	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 506		      TASK_INTERRUPTIBLE, 0, timeout,				\
 507		      __ret = schedule_timeout(__ret))
 508
 509/**
 510 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
 511 * @wq_head: the waitqueue to wait on
 512 * @condition: a C expression for the event to wait for
 513 * @timeout: timeout, in jiffies
 514 *
 515 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 516 * @condition evaluates to true or a signal is received.
 517 * The @condition is checked each time the waitqueue @wq_head is woken up.
 518 *
 519 * wake_up() has to be called after changing any variable that could
 520 * change the result of the wait condition.
 521 *
 522 * Returns:
 523 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 524 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 525 * the remaining jiffies (at least 1) if the @condition evaluated
 526 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
 527 * interrupted by a signal.
 528 */
 529#define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
 530({										\
 531	long __ret = timeout;							\
 532	might_sleep();								\
 533	if (!___wait_cond_timeout(condition))					\
 534		__ret = __wait_event_interruptible_timeout(wq_head,		\
 535						condition, timeout);		\
 536	__ret;									\
 537})
 538
 539#define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
 540({										\
 541	int __ret = 0;								\
 542	struct hrtimer_sleeper __t;						\
 543										\
 544	hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,			\
 545				      HRTIMER_MODE_REL);			\
 546	if ((timeout) != KTIME_MAX) {						\
 547		hrtimer_set_expires_range_ns(&__t.timer, timeout,		\
 548					current->timer_slack_ns);		\
 549		hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL);		\
 550	}									\
 551										\
 552	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
 553		if (!__t.task) {						\
 554			__ret = -ETIME;						\
 555			break;							\
 556		}								\
 557		schedule());							\
 558										\
 559	hrtimer_cancel(&__t.timer);						\
 560	destroy_hrtimer_on_stack(&__t.timer);					\
 561	__ret;									\
 562})
 563
 564/**
 565 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
 566 * @wq_head: the waitqueue to wait on
 567 * @condition: a C expression for the event to wait for
 568 * @timeout: timeout, as a ktime_t
 569 *
 570 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 571 * @condition evaluates to true or a signal is received.
 572 * The @condition is checked each time the waitqueue @wq_head is woken up.
 573 *
 574 * wake_up() has to be called after changing any variable that could
 575 * change the result of the wait condition.
 576 *
 577 * The function returns 0 if @condition became true, or -ETIME if the timeout
 578 * elapsed.
 579 */
 580#define wait_event_hrtimeout(wq_head, condition, timeout)			\
 581({										\
 582	int __ret = 0;								\
 583	might_sleep();								\
 584	if (!(condition))							\
 585		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
 586					       TASK_UNINTERRUPTIBLE);		\
 587	__ret;									\
 588})
 589
 590/**
 591 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
 592 * @wq: the waitqueue to wait on
 593 * @condition: a C expression for the event to wait for
 594 * @timeout: timeout, as a ktime_t
 595 *
 596 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 597 * @condition evaluates to true or a signal is received.
 598 * The @condition is checked each time the waitqueue @wq is woken up.
 599 *
 600 * wake_up() has to be called after changing any variable that could
 601 * change the result of the wait condition.
 602 *
 603 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
 604 * interrupted by a signal, or -ETIME if the timeout elapsed.
 605 */
 606#define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
 607({										\
 608	long __ret = 0;								\
 609	might_sleep();								\
 610	if (!(condition))							\
 611		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
 612					       TASK_INTERRUPTIBLE);		\
 613	__ret;									\
 614})
 615
 616#define __wait_event_interruptible_exclusive(wq, condition)			\
 617	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
 618		      schedule())
 619
 620#define wait_event_interruptible_exclusive(wq, condition)			\
 621({										\
 622	int __ret = 0;								\
 623	might_sleep();								\
 624	if (!(condition))							\
 625		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
 626	__ret;									\
 627})
 628
 629#define __wait_event_killable_exclusive(wq, condition)				\
 630	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
 631		      schedule())
 632
 633#define wait_event_killable_exclusive(wq, condition)				\
 634({										\
 635	int __ret = 0;								\
 636	might_sleep();								\
 637	if (!(condition))							\
 638		__ret = __wait_event_killable_exclusive(wq, condition);		\
 639	__ret;									\
 640})
 641
 642
 643#define __wait_event_freezable_exclusive(wq, condition)				\
 644	___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\
 645			schedule())
 646
 647#define wait_event_freezable_exclusive(wq, condition)				\
 648({										\
 649	int __ret = 0;								\
 650	might_sleep();								\
 651	if (!(condition))							\
 652		__ret = __wait_event_freezable_exclusive(wq, condition);	\
 653	__ret;									\
 654})
 655
 656/**
 657 * wait_event_idle - wait for a condition without contributing to system load
 658 * @wq_head: the waitqueue to wait on
 659 * @condition: a C expression for the event to wait for
 660 *
 661 * The process is put to sleep (TASK_IDLE) until the
 662 * @condition evaluates to true.
 663 * The @condition is checked each time the waitqueue @wq_head is woken up.
 664 *
 665 * wake_up() has to be called after changing any variable that could
 666 * change the result of the wait condition.
 667 *
 668 */
 669#define wait_event_idle(wq_head, condition)					\
 670do {										\
 671	might_sleep();								\
 672	if (!(condition))							\
 673		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
 674} while (0)
 675
 676/**
 677 * wait_event_idle_exclusive - wait for a condition with contributing to system load
 678 * @wq_head: the waitqueue to wait on
 679 * @condition: a C expression for the event to wait for
 680 *
 681 * The process is put to sleep (TASK_IDLE) until the
 682 * @condition evaluates to true.
 683 * The @condition is checked each time the waitqueue @wq_head is woken up.
 684 *
 685 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 686 * set thus if other processes wait on the same list, when this
 687 * process is woken further processes are not considered.
 688 *
 689 * wake_up() has to be called after changing any variable that could
 690 * change the result of the wait condition.
 691 *
 692 */
 693#define wait_event_idle_exclusive(wq_head, condition)				\
 694do {										\
 695	might_sleep();								\
 696	if (!(condition))							\
 697		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
 698} while (0)
 699
 700#define __wait_event_idle_timeout(wq_head, condition, timeout)			\
 701	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 702		      TASK_IDLE, 0, timeout,					\
 703		      __ret = schedule_timeout(__ret))
 704
 705/**
 706 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
 707 * @wq_head: the waitqueue to wait on
 708 * @condition: a C expression for the event to wait for
 709 * @timeout: timeout, in jiffies
 710 *
 711 * The process is put to sleep (TASK_IDLE) until the
 712 * @condition evaluates to true. The @condition is checked each time
 713 * the waitqueue @wq_head is woken up.
 714 *
 715 * wake_up() has to be called after changing any variable that could
 716 * change the result of the wait condition.
 717 *
 718 * Returns:
 719 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 720 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 721 * or the remaining jiffies (at least 1) if the @condition evaluated
 722 * to %true before the @timeout elapsed.
 723 */
 724#define wait_event_idle_timeout(wq_head, condition, timeout)			\
 725({										\
 726	long __ret = timeout;							\
 727	might_sleep();								\
 728	if (!___wait_cond_timeout(condition))					\
 729		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
 730	__ret;									\
 731})
 732
 733#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
 734	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 735		      TASK_IDLE, 1, timeout,					\
 736		      __ret = schedule_timeout(__ret))
 737
 738/**
 739 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
 740 * @wq_head: the waitqueue to wait on
 741 * @condition: a C expression for the event to wait for
 742 * @timeout: timeout, in jiffies
 743 *
 744 * The process is put to sleep (TASK_IDLE) until the
 745 * @condition evaluates to true. The @condition is checked each time
 746 * the waitqueue @wq_head is woken up.
 747 *
 748 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 749 * set thus if other processes wait on the same list, when this
 750 * process is woken further processes are not considered.
 751 *
 752 * wake_up() has to be called after changing any variable that could
 753 * change the result of the wait condition.
 754 *
 755 * Returns:
 756 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 757 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 758 * or the remaining jiffies (at least 1) if the @condition evaluated
 759 * to %true before the @timeout elapsed.
 760 */
 761#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
 762({										\
 763	long __ret = timeout;							\
 764	might_sleep();								\
 765	if (!___wait_cond_timeout(condition))					\
 766		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
 767	__ret;									\
 768})
 769
 770extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
 771extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
 772
 773#define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
 774({										\
 775	int __ret;								\
 776	DEFINE_WAIT(__wait);							\
 777	if (exclusive)								\
 778		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
 779	do {									\
 780		__ret = fn(&(wq), &__wait);					\
 781		if (__ret)							\
 782			break;							\
 783	} while (!(condition));							\
 784	__remove_wait_queue(&(wq), &__wait);					\
 785	__set_current_state(TASK_RUNNING);					\
 786	__ret;									\
 787})
 788
 789
 790/**
 791 * wait_event_interruptible_locked - sleep until a condition gets true
 792 * @wq: the waitqueue to wait on
 793 * @condition: a C expression for the event to wait for
 794 *
 795 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 796 * @condition evaluates to true or a signal is received.
 797 * The @condition is checked each time the waitqueue @wq is woken up.
 798 *
 799 * It must be called with wq.lock being held.  This spinlock is
 800 * unlocked while sleeping but @condition testing is done while lock
 801 * is held and when this macro exits the lock is held.
 802 *
 803 * The lock is locked/unlocked using spin_lock()/spin_unlock()
 804 * functions which must match the way they are locked/unlocked outside
 805 * of this macro.
 806 *
 807 * wake_up_locked() has to be called after changing any variable that could
 808 * change the result of the wait condition.
 809 *
 810 * The function will return -ERESTARTSYS if it was interrupted by a
 811 * signal and 0 if @condition evaluated to true.
 812 */
 813#define wait_event_interruptible_locked(wq, condition)				\
 814	((condition)								\
 815	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
 816
 817/**
 818 * wait_event_interruptible_locked_irq - sleep until a condition gets true
 819 * @wq: the waitqueue to wait on
 820 * @condition: a C expression for the event to wait for
 821 *
 822 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 823 * @condition evaluates to true or a signal is received.
 824 * The @condition is checked each time the waitqueue @wq is woken up.
 825 *
 826 * It must be called with wq.lock being held.  This spinlock is
 827 * unlocked while sleeping but @condition testing is done while lock
 828 * is held and when this macro exits the lock is held.
 829 *
 830 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
 831 * functions which must match the way they are locked/unlocked outside
 832 * of this macro.
 833 *
 834 * wake_up_locked() has to be called after changing any variable that could
 835 * change the result of the wait condition.
 836 *
 837 * The function will return -ERESTARTSYS if it was interrupted by a
 838 * signal and 0 if @condition evaluated to true.
 839 */
 840#define wait_event_interruptible_locked_irq(wq, condition)			\
 841	((condition)								\
 842	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
 843
 844/**
 845 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
 846 * @wq: the waitqueue to wait on
 847 * @condition: a C expression for the event to wait for
 848 *
 849 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 850 * @condition evaluates to true or a signal is received.
 851 * The @condition is checked each time the waitqueue @wq is woken up.
 852 *
 853 * It must be called with wq.lock being held.  This spinlock is
 854 * unlocked while sleeping but @condition testing is done while lock
 855 * is held and when this macro exits the lock is held.
 856 *
 857 * The lock is locked/unlocked using spin_lock()/spin_unlock()
 858 * functions which must match the way they are locked/unlocked outside
 859 * of this macro.
 860 *
 861 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 862 * set thus when other process waits process on the list if this
 863 * process is awaken further processes are not considered.
 864 *
 865 * wake_up_locked() has to be called after changing any variable that could
 866 * change the result of the wait condition.
 867 *
 868 * The function will return -ERESTARTSYS if it was interrupted by a
 869 * signal and 0 if @condition evaluated to true.
 870 */
 871#define wait_event_interruptible_exclusive_locked(wq, condition)		\
 872	((condition)								\
 873	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
 874
 875/**
 876 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
 877 * @wq: the waitqueue to wait on
 878 * @condition: a C expression for the event to wait for
 879 *
 880 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
 881 * @condition evaluates to true or a signal is received.
 882 * The @condition is checked each time the waitqueue @wq is woken up.
 883 *
 884 * It must be called with wq.lock being held.  This spinlock is
 885 * unlocked while sleeping but @condition testing is done while lock
 886 * is held and when this macro exits the lock is held.
 887 *
 888 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
 889 * functions which must match the way they are locked/unlocked outside
 890 * of this macro.
 891 *
 892 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
 893 * set thus when other process waits process on the list if this
 894 * process is awaken further processes are not considered.
 895 *
 896 * wake_up_locked() has to be called after changing any variable that could
 897 * change the result of the wait condition.
 898 *
 899 * The function will return -ERESTARTSYS if it was interrupted by a
 900 * signal and 0 if @condition evaluated to true.
 901 */
 902#define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
 903	((condition)								\
 904	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
 905
 906
 907#define __wait_event_killable(wq, condition)					\
 908	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
 909
 910/**
 911 * wait_event_killable - sleep until a condition gets true
 912 * @wq_head: the waitqueue to wait on
 913 * @condition: a C expression for the event to wait for
 914 *
 915 * The process is put to sleep (TASK_KILLABLE) until the
 916 * @condition evaluates to true or a signal is received.
 917 * The @condition is checked each time the waitqueue @wq_head is woken up.
 918 *
 919 * wake_up() has to be called after changing any variable that could
 920 * change the result of the wait condition.
 921 *
 922 * The function will return -ERESTARTSYS if it was interrupted by a
 923 * signal and 0 if @condition evaluated to true.
 924 */
 925#define wait_event_killable(wq_head, condition)					\
 926({										\
 927	int __ret = 0;								\
 928	might_sleep();								\
 929	if (!(condition))							\
 930		__ret = __wait_event_killable(wq_head, condition);		\
 931	__ret;									\
 932})
 933
 934#define __wait_event_state(wq, condition, state)				\
 935	___wait_event(wq, condition, state, 0, 0, schedule())
 936
 937/**
 938 * wait_event_state - sleep until a condition gets true
 939 * @wq_head: the waitqueue to wait on
 940 * @condition: a C expression for the event to wait for
 941 * @state: state to sleep in
 942 *
 943 * The process is put to sleep (@state) until the @condition evaluates to true
 944 * or a signal is received (when allowed by @state).  The @condition is checked
 945 * each time the waitqueue @wq_head is woken up.
 946 *
 947 * wake_up() has to be called after changing any variable that could
 948 * change the result of the wait condition.
 949 *
 950 * The function will return -ERESTARTSYS if it was interrupted by a signal
 951 * (when allowed by @state) and 0 if @condition evaluated to true.
 952 */
 953#define wait_event_state(wq_head, condition, state)				\
 954({										\
 955	int __ret = 0;								\
 956	might_sleep();								\
 957	if (!(condition))							\
 958		__ret = __wait_event_state(wq_head, condition, state);		\
 959	__ret;									\
 960})
 961
 962#define __wait_event_killable_timeout(wq_head, condition, timeout)		\
 963	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
 964		      TASK_KILLABLE, 0, timeout,				\
 965		      __ret = schedule_timeout(__ret))
 966
 967/**
 968 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
 969 * @wq_head: the waitqueue to wait on
 970 * @condition: a C expression for the event to wait for
 971 * @timeout: timeout, in jiffies
 972 *
 973 * The process is put to sleep (TASK_KILLABLE) until the
 974 * @condition evaluates to true or a kill signal is received.
 975 * The @condition is checked each time the waitqueue @wq_head is woken up.
 976 *
 977 * wake_up() has to be called after changing any variable that could
 978 * change the result of the wait condition.
 979 *
 980 * Returns:
 981 * 0 if the @condition evaluated to %false after the @timeout elapsed,
 982 * 1 if the @condition evaluated to %true after the @timeout elapsed,
 983 * the remaining jiffies (at least 1) if the @condition evaluated
 984 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
 985 * interrupted by a kill signal.
 986 *
 987 * Only kill signals interrupt this process.
 988 */
 989#define wait_event_killable_timeout(wq_head, condition, timeout)		\
 990({										\
 991	long __ret = timeout;							\
 992	might_sleep();								\
 993	if (!___wait_cond_timeout(condition))					\
 994		__ret = __wait_event_killable_timeout(wq_head,			\
 995						condition, timeout);		\
 996	__ret;									\
 997})
 998
 999
1000#define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
1001	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
1002			    spin_unlock_irq(&lock);				\
1003			    cmd;						\
1004			    schedule();						\
1005			    spin_lock_irq(&lock))
1006
1007/**
1008 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
1009 *			     condition is checked under the lock. This
1010 *			     is expected to be called with the lock
1011 *			     taken.
1012 * @wq_head: the waitqueue to wait on
1013 * @condition: a C expression for the event to wait for
1014 * @lock: a locked spinlock_t, which will be released before cmd
1015 *	  and schedule() and reacquired afterwards.
1016 * @cmd: a command which is invoked outside the critical section before
1017 *	 sleep
1018 *
1019 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1020 * @condition evaluates to true. The @condition is checked each time
1021 * the waitqueue @wq_head is woken up.
1022 *
1023 * wake_up() has to be called after changing any variable that could
1024 * change the result of the wait condition.
1025 *
1026 * This is supposed to be called while holding the lock. The lock is
1027 * dropped before invoking the cmd and going to sleep and is reacquired
1028 * afterwards.
1029 */
1030#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
1031do {										\
1032	if (condition)								\
1033		break;								\
1034	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
1035} while (0)
1036
1037/**
1038 * wait_event_lock_irq - sleep until a condition gets true. The
1039 *			 condition is checked under the lock. This
1040 *			 is expected to be called with the lock
1041 *			 taken.
1042 * @wq_head: the waitqueue to wait on
1043 * @condition: a C expression for the event to wait for
1044 * @lock: a locked spinlock_t, which will be released before schedule()
1045 *	  and reacquired afterwards.
1046 *
1047 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1048 * @condition evaluates to true. The @condition is checked each time
1049 * the waitqueue @wq_head is woken up.
1050 *
1051 * wake_up() has to be called after changing any variable that could
1052 * change the result of the wait condition.
1053 *
1054 * This is supposed to be called while holding the lock. The lock is
1055 * dropped before going to sleep and is reacquired afterwards.
1056 */
1057#define wait_event_lock_irq(wq_head, condition, lock)				\
1058do {										\
1059	if (condition)								\
1060		break;								\
1061	__wait_event_lock_irq(wq_head, condition, lock, );			\
1062} while (0)
1063
1064
1065#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
1066	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
1067		      spin_unlock_irq(&lock);					\
1068		      cmd;							\
1069		      schedule();						\
1070		      spin_lock_irq(&lock))
1071
1072/**
1073 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1074 *		The condition is checked under the lock. This is expected to
1075 *		be called with the lock taken.
1076 * @wq_head: the waitqueue to wait on
1077 * @condition: a C expression for the event to wait for
1078 * @lock: a locked spinlock_t, which will be released before cmd and
1079 *	  schedule() and reacquired afterwards.
1080 * @cmd: a command which is invoked outside the critical section before
1081 *	 sleep
1082 *
1083 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1084 * @condition evaluates to true or a signal is received. The @condition is
1085 * checked each time the waitqueue @wq_head is woken up.
1086 *
1087 * wake_up() has to be called after changing any variable that could
1088 * change the result of the wait condition.
1089 *
1090 * This is supposed to be called while holding the lock. The lock is
1091 * dropped before invoking the cmd and going to sleep and is reacquired
1092 * afterwards.
1093 *
1094 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1095 * and 0 if @condition evaluated to true.
1096 */
1097#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1098({										\
1099	int __ret = 0;								\
1100	if (!(condition))							\
1101		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1102						condition, lock, cmd);		\
1103	__ret;									\
1104})
1105
1106/**
1107 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1108 *		The condition is checked under the lock. This is expected
1109 *		to be called with the lock taken.
1110 * @wq_head: the waitqueue to wait on
1111 * @condition: a C expression for the event to wait for
1112 * @lock: a locked spinlock_t, which will be released before schedule()
1113 *	  and reacquired afterwards.
1114 *
1115 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1116 * @condition evaluates to true or signal is received. The @condition is
1117 * checked each time the waitqueue @wq_head is woken up.
1118 *
1119 * wake_up() has to be called after changing any variable that could
1120 * change the result of the wait condition.
1121 *
1122 * This is supposed to be called while holding the lock. The lock is
1123 * dropped before going to sleep and is reacquired afterwards.
1124 *
1125 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1126 * and 0 if @condition evaluated to true.
1127 */
1128#define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1129({										\
1130	int __ret = 0;								\
1131	if (!(condition))							\
1132		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1133						condition, lock,);		\
1134	__ret;									\
1135})
1136
1137#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1138	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1139		      state, 0, timeout,					\
1140		      spin_unlock_irq(&lock);					\
1141		      __ret = schedule_timeout(__ret);				\
1142		      spin_lock_irq(&lock));
1143
1144/**
1145 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1146 *		true or a timeout elapses. The condition is checked under
1147 *		the lock. This is expected to be called with the lock taken.
1148 * @wq_head: the waitqueue to wait on
1149 * @condition: a C expression for the event to wait for
1150 * @lock: a locked spinlock_t, which will be released before schedule()
1151 *	  and reacquired afterwards.
1152 * @timeout: timeout, in jiffies
1153 *
1154 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1155 * @condition evaluates to true or signal is received. The @condition is
1156 * checked each time the waitqueue @wq_head is woken up.
1157 *
1158 * wake_up() has to be called after changing any variable that could
1159 * change the result of the wait condition.
1160 *
1161 * This is supposed to be called while holding the lock. The lock is
1162 * dropped before going to sleep and is reacquired afterwards.
1163 *
1164 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1165 * was interrupted by a signal, and the remaining jiffies otherwise
1166 * if the condition evaluated to true before the timeout elapsed.
1167 */
1168#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1169						  timeout)			\
1170({										\
1171	long __ret = timeout;							\
1172	if (!___wait_cond_timeout(condition))					\
1173		__ret = __wait_event_lock_irq_timeout(				\
1174					wq_head, condition, lock, timeout,	\
1175					TASK_INTERRUPTIBLE);			\
1176	__ret;									\
1177})
1178
1179#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1180({										\
1181	long __ret = timeout;							\
1182	if (!___wait_cond_timeout(condition))					\
1183		__ret = __wait_event_lock_irq_timeout(				\
1184					wq_head, condition, lock, timeout,	\
1185					TASK_UNINTERRUPTIBLE);			\
1186	__ret;									\
1187})
1188
1189/*
1190 * Waitqueues which are removed from the waitqueue_head at wakeup time
1191 */
1192void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1193bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1194long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1195void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1196long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1197int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1198int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1199
1200#define DEFINE_WAIT_FUNC(name, function)					\
1201	struct wait_queue_entry name = {					\
1202		.private	= current,					\
1203		.func		= function,					\
1204		.entry		= LIST_HEAD_INIT((name).entry),			\
1205	}
1206
1207#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1208
1209#define init_wait(wait)								\
1210	do {									\
1211		(wait)->private = current;					\
1212		(wait)->func = autoremove_wake_function;			\
1213		INIT_LIST_HEAD(&(wait)->entry);					\
1214		(wait)->flags = 0;						\
1215	} while (0)
1216
1217typedef int (*task_call_f)(struct task_struct *p, void *arg);
1218extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
1219
1220#endif /* _LINUX_WAIT_H */