Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * workqueue.h --- work queue handling for Linux.
  4 */
  5
  6#ifndef _LINUX_WORKQUEUE_H
  7#define _LINUX_WORKQUEUE_H
  8
  9#include <linux/timer.h>
 10#include <linux/linkage.h>
 11#include <linux/bitops.h>
 12#include <linux/lockdep.h>
 13#include <linux/threads.h>
 14#include <linux/atomic.h>
 15#include <linux/cpumask.h>
 16#include <linux/rcupdate.h>
 17
 18struct workqueue_struct;
 19
 20struct work_struct;
 21typedef void (*work_func_t)(struct work_struct *work);
 22void delayed_work_timer_fn(struct timer_list *t);
 23
 24/*
 25 * The first word is the work queue pointer and the flags rolled into
 26 * one
 27 */
 28#define work_data_bits(work) ((unsigned long *)(&(work)->data))
 29
 30enum {
 31	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
 32	WORK_STRUCT_DELAYED_BIT	= 1,	/* work item is delayed */
 33	WORK_STRUCT_PWQ_BIT	= 2,	/* data points to pwq */
 34	WORK_STRUCT_LINKED_BIT	= 3,	/* next work is linked to this one */
 35#ifdef CONFIG_DEBUG_OBJECTS_WORK
 36	WORK_STRUCT_STATIC_BIT	= 4,	/* static initializer (debugobjects) */
 37	WORK_STRUCT_COLOR_SHIFT	= 5,	/* color for workqueue flushing */
 38#else
 39	WORK_STRUCT_COLOR_SHIFT	= 4,	/* color for workqueue flushing */
 40#endif
 41
 42	WORK_STRUCT_COLOR_BITS	= 4,
 43
 44	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
 45	WORK_STRUCT_DELAYED	= 1 << WORK_STRUCT_DELAYED_BIT,
 46	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
 47	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
 48#ifdef CONFIG_DEBUG_OBJECTS_WORK
 49	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
 50#else
 51	WORK_STRUCT_STATIC	= 0,
 52#endif
 53
 54	/*
 55	 * The last color is no color used for works which don't
 56	 * participate in workqueue flushing.
 57	 */
 58	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS) - 1,
 59	WORK_NO_COLOR		= WORK_NR_COLORS,
 60
 61	/* not bound to any CPU, prefer the local CPU */
 62	WORK_CPU_UNBOUND	= NR_CPUS,
 
 
 63
 64	/*
 65	 * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
 66	 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
 67	 * flush colors.
 68	 */
 69	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT +
 70				  WORK_STRUCT_COLOR_BITS,
 71
 72	/* data contains off-queue information when !WORK_STRUCT_PWQ */
 73	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
 74
 75	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
 76	WORK_OFFQ_CANCELING	= (1 << __WORK_OFFQ_CANCELING),
 77
 78	/*
 79	 * When a work item is off queue, its high bits point to the last
 80	 * pool it was on.  Cap at 31 bits and use the highest number to
 81	 * indicate that no pool is associated.
 82	 */
 83	WORK_OFFQ_FLAG_BITS	= 1,
 84	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
 85	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
 86	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
 87	WORK_OFFQ_POOL_NONE	= (1LU << WORK_OFFQ_POOL_BITS) - 1,
 88
 89	/* convenience constants */
 90	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1,
 91	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
 92	WORK_STRUCT_NO_POOL	= (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
 93
 94	/* bit mask for work_busy() return values */
 95	WORK_BUSY_PENDING	= 1 << 0,
 96	WORK_BUSY_RUNNING	= 1 << 1,
 97
 98	/* maximum string length for set_worker_desc() */
 99	WORKER_DESC_LEN		= 24,
100};
101
102struct work_struct {
103	atomic_long_t data;
104	struct list_head entry;
105	work_func_t func;
106#ifdef CONFIG_LOCKDEP
107	struct lockdep_map lockdep_map;
108#endif
109};
110
111#define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
112#define WORK_DATA_STATIC_INIT()	\
113	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
114
115struct delayed_work {
116	struct work_struct work;
117	struct timer_list timer;
118
119	/* target workqueue and CPU ->timer uses to queue ->work */
120	struct workqueue_struct *wq;
121	int cpu;
122};
123
124struct rcu_work {
125	struct work_struct work;
126	struct rcu_head rcu;
127
128	/* target workqueue ->rcu uses to queue ->work */
129	struct workqueue_struct *wq;
130};
131
132/**
133 * struct workqueue_attrs - A struct for workqueue attributes.
134 *
135 * This can be used to change attributes of an unbound workqueue.
136 */
137struct workqueue_attrs {
138	/**
139	 * @nice: nice level
140	 */
141	int nice;
142
143	/**
144	 * @cpumask: allowed CPUs
145	 */
146	cpumask_var_t cpumask;
147
148	/**
149	 * @no_numa: disable NUMA affinity
150	 *
151	 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
152	 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
153	 * doesn't participate in pool hash calculations or equality comparisons.
154	 */
155	bool no_numa;
156};
157
158static inline struct delayed_work *to_delayed_work(struct work_struct *work)
159{
160	return container_of(work, struct delayed_work, work);
161}
162
163static inline struct rcu_work *to_rcu_work(struct work_struct *work)
164{
165	return container_of(work, struct rcu_work, work);
166}
167
168struct execute_work {
169	struct work_struct work;
170};
171
172#ifdef CONFIG_LOCKDEP
173/*
174 * NB: because we have to copy the lockdep_map, setting _key
175 * here is required, otherwise it could get initialised to the
176 * copy of the lockdep_map!
177 */
178#define __WORK_INIT_LOCKDEP_MAP(n, k) \
179	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
180#else
181#define __WORK_INIT_LOCKDEP_MAP(n, k)
182#endif
183
184#define __WORK_INITIALIZER(n, f) {					\
185	.data = WORK_DATA_STATIC_INIT(),				\
186	.entry	= { &(n).entry, &(n).entry },				\
187	.func = (f),							\
188	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
189	}
190
191#define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
192	.work = __WORK_INITIALIZER((n).work, (f)),			\
193	.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
194				     (tflags) | TIMER_IRQSAFE),		\
195	}
196
197#define DECLARE_WORK(n, f)						\
 
 
 
 
 
198	struct work_struct n = __WORK_INITIALIZER(n, f)
199
200#define DECLARE_DELAYED_WORK(n, f)					\
201	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
 
 
 
 
 
 
 
 
 
 
 
202
203#define DECLARE_DEFERRABLE_WORK(n, f)					\
204	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
205
206#ifdef CONFIG_DEBUG_OBJECTS_WORK
207extern void __init_work(struct work_struct *work, int onstack);
208extern void destroy_work_on_stack(struct work_struct *work);
209extern void destroy_delayed_work_on_stack(struct delayed_work *work);
210static inline unsigned int work_static(struct work_struct *work)
211{
212	return *work_data_bits(work) & WORK_STRUCT_STATIC;
213}
214#else
215static inline void __init_work(struct work_struct *work, int onstack) { }
216static inline void destroy_work_on_stack(struct work_struct *work) { }
217static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
218static inline unsigned int work_static(struct work_struct *work) { return 0; }
219#endif
220
221/*
222 * initialize all of a work item in one go
223 *
224 * NOTE! No point in using "atomic_long_set()": using a direct
225 * assignment of the work data initializer allows the compiler
226 * to generate better code.
227 */
228#ifdef CONFIG_LOCKDEP
229#define __INIT_WORK(_work, _func, _onstack)				\
230	do {								\
231		static struct lock_class_key __key;			\
232									\
233		__init_work((_work), _onstack);				\
234		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
235		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
236		INIT_LIST_HEAD(&(_work)->entry);			\
237		(_work)->func = (_func);				\
238	} while (0)
239#else
240#define __INIT_WORK(_work, _func, _onstack)				\
241	do {								\
242		__init_work((_work), _onstack);				\
243		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
244		INIT_LIST_HEAD(&(_work)->entry);			\
245		(_work)->func = (_func);				\
246	} while (0)
247#endif
248
249#define INIT_WORK(_work, _func)						\
250	__INIT_WORK((_work), (_func), 0)
251
252#define INIT_WORK_ONSTACK(_work, _func)					\
253	__INIT_WORK((_work), (_func), 1)
254
255#define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
256	do {								\
257		INIT_WORK(&(_work)->work, (_func));			\
258		__init_timer(&(_work)->timer,				\
259			     delayed_work_timer_fn,			\
260			     (_tflags) | TIMER_IRQSAFE);		\
261	} while (0)
262
263#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
264	do {								\
265		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
266		__init_timer_on_stack(&(_work)->timer,			\
267				      delayed_work_timer_fn,		\
268				      (_tflags) | TIMER_IRQSAFE);	\
269	} while (0)
270
271#define INIT_DELAYED_WORK(_work, _func)					\
272	__INIT_DELAYED_WORK(_work, _func, 0)
273
274#define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
275	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
276
277#define INIT_DEFERRABLE_WORK(_work, _func)				\
278	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
279
280#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
281	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
282
283#define INIT_RCU_WORK(_work, _func)					\
284	INIT_WORK(&(_work)->work, (_func))
 
 
 
285
286#define INIT_RCU_WORK_ONSTACK(_work, _func)				\
287	INIT_WORK_ONSTACK(&(_work)->work, (_func))
 
 
 
288
289/**
290 * work_pending - Find out whether a work item is currently pending
291 * @work: The work item in question
292 */
293#define work_pending(work) \
294	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
295
296/**
297 * delayed_work_pending - Find out whether a delayable work item is currently
298 * pending
299 * @w: The work item in question
300 */
301#define delayed_work_pending(w) \
302	work_pending(&(w)->work)
303
 
 
 
 
 
 
 
304/*
305 * Workqueue flags and constants.  For details, please refer to
306 * Documentation/core-api/workqueue.rst.
307 */
308enum {
 
309	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
310	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
311	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
312	WQ_HIGHPRI		= 1 << 4, /* high priority */
313	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
314	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
315
316	/*
317	 * Per-cpu workqueues are generally preferred because they tend to
318	 * show better performance thanks to cache locality.  Per-cpu
319	 * workqueues exclude the scheduler from choosing the CPU to
320	 * execute the worker threads, which has an unfortunate side effect
321	 * of increasing power consumption.
322	 *
323	 * The scheduler considers a CPU idle if it doesn't have any task
324	 * to execute and tries to keep idle cores idle to conserve power;
325	 * however, for example, a per-cpu work item scheduled from an
326	 * interrupt handler on an idle CPU will force the scheduler to
327	 * excute the work item on that CPU breaking the idleness, which in
328	 * turn may lead to more scheduling choices which are sub-optimal
329	 * in terms of power consumption.
330	 *
331	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
332	 * but become unbound if workqueue.power_efficient kernel param is
333	 * specified.  Per-cpu workqueues which are identified to
334	 * contribute significantly to power-consumption are identified and
335	 * marked with this flag and enabling the power_efficient mode
336	 * leads to noticeable power saving at the cost of small
337	 * performance disadvantage.
338	 *
339	 * http://thread.gmane.org/gmane.linux.kernel/1480396
340	 */
341	WQ_POWER_EFFICIENT	= 1 << 7,
342
343	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
344	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
345	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
346	__WQ_ORDERED_EXPLICIT	= 1 << 19, /* internal: alloc_ordered_workqueue() */
347
348	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
349	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
350	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
351};
352
353/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
354#define WQ_UNBOUND_MAX_ACTIVE	\
355	max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
356
357/*
358 * System-wide workqueues which are always present.
359 *
360 * system_wq is the one used by schedule[_delayed]_work[_on]().
361 * Multi-CPU multi-threaded.  There are users which expect relatively
362 * short queue flush time.  Don't queue works which can run for too
363 * long.
364 *
365 * system_highpri_wq is similar to system_wq but for work items which
366 * require WQ_HIGHPRI.
367 *
368 * system_long_wq is similar to system_wq but may host long running
369 * works.  Queue flushing might take relatively long.
370 *
 
 
 
 
371 * system_unbound_wq is unbound workqueue.  Workers are not bound to
372 * any specific CPU, not concurrency managed, and all queued works are
373 * executed immediately as long as max_active limit is not reached and
374 * resources are available.
375 *
376 * system_freezable_wq is equivalent to system_wq except that it's
377 * freezable.
378 *
379 * *_power_efficient_wq are inclined towards saving power and converted
380 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
381 * they are same as their non-power-efficient counterparts - e.g.
382 * system_power_efficient_wq is identical to system_wq if
383 * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
384 */
385extern struct workqueue_struct *system_wq;
386extern struct workqueue_struct *system_highpri_wq;
387extern struct workqueue_struct *system_long_wq;
 
388extern struct workqueue_struct *system_unbound_wq;
389extern struct workqueue_struct *system_freezable_wq;
390extern struct workqueue_struct *system_power_efficient_wq;
391extern struct workqueue_struct *system_freezable_power_efficient_wq;
 
 
 
392
393/**
394 * alloc_workqueue - allocate a workqueue
395 * @fmt: printf format for the name of the workqueue
396 * @flags: WQ_* flags
397 * @max_active: max in-flight work items, 0 for default
398 * remaining args: args for @fmt
399 *
400 * Allocate a workqueue with the specified parameters.  For detailed
401 * information on WQ_* flags, please refer to
402 * Documentation/core-api/workqueue.rst.
 
 
403 *
404 * RETURNS:
405 * Pointer to the allocated workqueue on success, %NULL on failure.
406 */
407struct workqueue_struct *alloc_workqueue(const char *fmt,
408					 unsigned int flags,
409					 int max_active, ...);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410
411/**
412 * alloc_ordered_workqueue - allocate an ordered workqueue
413 * @fmt: printf format for the name of the workqueue
414 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
415 * @args...: args for @fmt
416 *
417 * Allocate an ordered workqueue.  An ordered workqueue executes at
418 * most one work item at any given time in the queued order.  They are
419 * implemented as unbound workqueues with @max_active of one.
420 *
421 * RETURNS:
422 * Pointer to the allocated workqueue on success, %NULL on failure.
423 */
424#define alloc_ordered_workqueue(fmt, flags, args...)			\
425	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |		\
426			__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
427
428#define create_workqueue(name)						\
429	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
430#define create_freezable_workqueue(name)				\
431	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
432			WQ_MEM_RECLAIM, 1, (name))
433#define create_singlethread_workqueue(name)				\
434	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
435
436extern void destroy_workqueue(struct workqueue_struct *wq);
 
 
 
 
 
437
438struct workqueue_attrs *alloc_workqueue_attrs(void);
439void free_workqueue_attrs(struct workqueue_attrs *attrs);
440int apply_workqueue_attrs(struct workqueue_struct *wq,
441			  const struct workqueue_attrs *attrs);
442int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
443
444extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
 
445			struct work_struct *work);
446extern bool queue_work_node(int node, struct workqueue_struct *wq,
447			    struct work_struct *work);
448extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
449			struct delayed_work *work, unsigned long delay);
450extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
451			struct delayed_work *dwork, unsigned long delay);
452extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
453
454extern void flush_workqueue(struct workqueue_struct *wq);
455extern void drain_workqueue(struct workqueue_struct *wq);
 
456
 
 
 
 
 
457extern int schedule_on_each_cpu(work_func_t func);
 
458
459int execute_in_process_context(work_func_t fn, struct execute_work *);
460
461extern bool flush_work(struct work_struct *work);
 
462extern bool cancel_work_sync(struct work_struct *work);
463
464extern bool flush_delayed_work(struct delayed_work *dwork);
465extern bool cancel_delayed_work(struct delayed_work *dwork);
466extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
467
468extern bool flush_rcu_work(struct rcu_work *rwork);
469
470extern void workqueue_set_max_active(struct workqueue_struct *wq,
471				     int max_active);
472extern struct work_struct *current_work(void);
473extern bool current_is_workqueue_rescuer(void);
474extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
475extern unsigned int work_busy(struct work_struct *work);
476extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
477extern void print_worker_info(const char *log_lvl, struct task_struct *task);
478extern void show_workqueue_state(void);
479extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
480
481/**
482 * queue_work - queue work on a workqueue
483 * @wq: workqueue to use
484 * @work: work to queue
485 *
486 * Returns %false if @work was already on a queue, %true otherwise.
487 *
488 * We queue the work to the CPU on which it was submitted, but if the CPU dies
489 * it can be processed by another CPU.
490 *
491 * Memory-ordering properties:  If it returns %true, guarantees that all stores
492 * preceding the call to queue_work() in the program order will be visible from
493 * the CPU which will execute @work by the time such work executes, e.g.,
494 *
495 * { x is initially 0 }
496 *
497 *   CPU0				CPU1
498 *
499 *   WRITE_ONCE(x, 1);			[ @work is being executed ]
500 *   r0 = queue_work(wq, work);		  r1 = READ_ONCE(x);
501 *
502 * Forbids: r0 == true && r1 == 0
503 */
504static inline bool queue_work(struct workqueue_struct *wq,
505			      struct work_struct *work)
506{
507	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
508}
509
510/**
511 * queue_delayed_work - queue work on a workqueue after delay
512 * @wq: workqueue to use
513 * @dwork: delayable work to queue
514 * @delay: number of jiffies to wait before queueing
515 *
516 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
517 */
518static inline bool queue_delayed_work(struct workqueue_struct *wq,
519				      struct delayed_work *dwork,
520				      unsigned long delay)
521{
522	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
523}
524
525/**
526 * mod_delayed_work - modify delay of or queue a delayed work
527 * @wq: workqueue to use
528 * @dwork: work to queue
529 * @delay: number of jiffies to wait before queueing
530 *
531 * mod_delayed_work_on() on local CPU.
532 */
533static inline bool mod_delayed_work(struct workqueue_struct *wq,
534				    struct delayed_work *dwork,
535				    unsigned long delay)
536{
537	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
538}
539
540/**
541 * schedule_work_on - put work task on a specific cpu
542 * @cpu: cpu to put the work task on
543 * @work: job to be done
544 *
545 * This puts a job on a specific cpu
546 */
547static inline bool schedule_work_on(int cpu, struct work_struct *work)
548{
549	return queue_work_on(cpu, system_wq, work);
550}
551
552/**
553 * schedule_work - put work task in global workqueue
554 * @work: job to be done
555 *
556 * Returns %false if @work was already on the kernel-global workqueue and
557 * %true otherwise.
558 *
559 * This puts a job in the kernel-global workqueue if it was not already
560 * queued and leaves it in the same position on the kernel-global
561 * workqueue otherwise.
562 *
563 * Shares the same memory-ordering properties of queue_work(), cf. the
564 * DocBook header of queue_work().
565 */
566static inline bool schedule_work(struct work_struct *work)
567{
568	return queue_work(system_wq, work);
569}
570
571/**
572 * flush_scheduled_work - ensure that any scheduled work has run to completion.
573 *
574 * Forces execution of the kernel-global workqueue and blocks until its
575 * completion.
576 *
577 * Think twice before calling this function!  It's very easy to get into
578 * trouble if you don't take great care.  Either of the following situations
579 * will lead to deadlock:
580 *
581 *	One of the work items currently on the workqueue needs to acquire
582 *	a lock held by your code or its caller.
583 *
584 *	Your code is running in the context of a work routine.
585 *
586 * They will be detected by lockdep when they occur, but the first might not
587 * occur very often.  It depends on what work items are on the workqueue and
588 * what locks they need, which you have no control over.
589 *
590 * In most situations flushing the entire workqueue is overkill; you merely
591 * need to know that a particular work item isn't queued and isn't running.
592 * In such cases you should use cancel_delayed_work_sync() or
593 * cancel_work_sync() instead.
594 */
595static inline void flush_scheduled_work(void)
596{
597	flush_workqueue(system_wq);
598}
599
600/**
601 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
602 * @cpu: cpu to use
603 * @dwork: job to be done
604 * @delay: number of jiffies to wait
605 *
606 * After waiting for a given time this puts a job in the kernel-global
607 * workqueue on the specified CPU.
608 */
609static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
610					    unsigned long delay)
611{
612	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
613}
614
615/**
616 * schedule_delayed_work - put work task in global workqueue after delay
617 * @dwork: job to be done
618 * @delay: number of jiffies to wait or 0 for immediate execution
619 *
620 * After waiting for a given time this puts a job in the kernel-global
621 * workqueue.
622 */
623static inline bool schedule_delayed_work(struct delayed_work *dwork,
624					 unsigned long delay)
625{
626	return queue_delayed_work(system_wq, dwork, delay);
 
627}
628
629#ifndef CONFIG_SMP
630static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
631{
632	return fn(arg);
633}
634static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
635{
636	return fn(arg);
637}
638#else
639long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
640long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
641#endif /* CONFIG_SMP */
642
643#ifdef CONFIG_FREEZER
644extern void freeze_workqueues_begin(void);
645extern bool freeze_workqueues_busy(void);
646extern void thaw_workqueues(void);
647#endif /* CONFIG_FREEZER */
648
649#ifdef CONFIG_SYSFS
650int workqueue_sysfs_register(struct workqueue_struct *wq);
651#else	/* CONFIG_SYSFS */
652static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
653{ return 0; }
654#endif	/* CONFIG_SYSFS */
655
656#ifdef CONFIG_WQ_WATCHDOG
657void wq_watchdog_touch(int cpu);
658#else	/* CONFIG_WQ_WATCHDOG */
659static inline void wq_watchdog_touch(int cpu) { }
660#endif	/* CONFIG_WQ_WATCHDOG */
661
662#ifdef CONFIG_SMP
663int workqueue_prepare_cpu(unsigned int cpu);
664int workqueue_online_cpu(unsigned int cpu);
665int workqueue_offline_cpu(unsigned int cpu);
666#endif
667
668void __init workqueue_init_early(void);
669void __init workqueue_init(void);
670
671#endif
v3.5.6
 
  1/*
  2 * workqueue.h --- work queue handling for Linux.
  3 */
  4
  5#ifndef _LINUX_WORKQUEUE_H
  6#define _LINUX_WORKQUEUE_H
  7
  8#include <linux/timer.h>
  9#include <linux/linkage.h>
 10#include <linux/bitops.h>
 11#include <linux/lockdep.h>
 12#include <linux/threads.h>
 13#include <linux/atomic.h>
 
 
 14
 15struct workqueue_struct;
 16
 17struct work_struct;
 18typedef void (*work_func_t)(struct work_struct *work);
 
 19
 20/*
 21 * The first word is the work queue pointer and the flags rolled into
 22 * one
 23 */
 24#define work_data_bits(work) ((unsigned long *)(&(work)->data))
 25
 26enum {
 27	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
 28	WORK_STRUCT_DELAYED_BIT	= 1,	/* work item is delayed */
 29	WORK_STRUCT_CWQ_BIT	= 2,	/* data points to cwq */
 30	WORK_STRUCT_LINKED_BIT	= 3,	/* next work is linked to this one */
 31#ifdef CONFIG_DEBUG_OBJECTS_WORK
 32	WORK_STRUCT_STATIC_BIT	= 4,	/* static initializer (debugobjects) */
 33	WORK_STRUCT_COLOR_SHIFT	= 5,	/* color for workqueue flushing */
 34#else
 35	WORK_STRUCT_COLOR_SHIFT	= 4,	/* color for workqueue flushing */
 36#endif
 37
 38	WORK_STRUCT_COLOR_BITS	= 4,
 39
 40	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
 41	WORK_STRUCT_DELAYED	= 1 << WORK_STRUCT_DELAYED_BIT,
 42	WORK_STRUCT_CWQ		= 1 << WORK_STRUCT_CWQ_BIT,
 43	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
 44#ifdef CONFIG_DEBUG_OBJECTS_WORK
 45	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
 46#else
 47	WORK_STRUCT_STATIC	= 0,
 48#endif
 49
 50	/*
 51	 * The last color is no color used for works which don't
 52	 * participate in workqueue flushing.
 53	 */
 54	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS) - 1,
 55	WORK_NO_COLOR		= WORK_NR_COLORS,
 56
 57	/* special cpu IDs */
 58	WORK_CPU_UNBOUND	= NR_CPUS,
 59	WORK_CPU_NONE		= NR_CPUS + 1,
 60	WORK_CPU_LAST		= WORK_CPU_NONE,
 61
 62	/*
 63	 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
 64	 * off.  This makes cwqs aligned to 256 bytes and allows 15
 65	 * workqueue flush colors.
 66	 */
 67	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT +
 68				  WORK_STRUCT_COLOR_BITS,
 69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1,
 71	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
 72	WORK_STRUCT_NO_CPU	= WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
 73
 74	/* bit mask for work_busy() return values */
 75	WORK_BUSY_PENDING	= 1 << 0,
 76	WORK_BUSY_RUNNING	= 1 << 1,
 
 
 
 77};
 78
 79struct work_struct {
 80	atomic_long_t data;
 81	struct list_head entry;
 82	work_func_t func;
 83#ifdef CONFIG_LOCKDEP
 84	struct lockdep_map lockdep_map;
 85#endif
 86};
 87
 88#define WORK_DATA_INIT()	ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
 89#define WORK_DATA_STATIC_INIT()	\
 90	ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
 91
 92struct delayed_work {
 93	struct work_struct work;
 94	struct timer_list timer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95};
 96
 97static inline struct delayed_work *to_delayed_work(struct work_struct *work)
 98{
 99	return container_of(work, struct delayed_work, work);
100}
101
 
 
 
 
 
102struct execute_work {
103	struct work_struct work;
104};
105
106#ifdef CONFIG_LOCKDEP
107/*
108 * NB: because we have to copy the lockdep_map, setting _key
109 * here is required, otherwise it could get initialised to the
110 * copy of the lockdep_map!
111 */
112#define __WORK_INIT_LOCKDEP_MAP(n, k) \
113	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
114#else
115#define __WORK_INIT_LOCKDEP_MAP(n, k)
116#endif
117
118#define __WORK_INITIALIZER(n, f) {				\
119	.data = WORK_DATA_STATIC_INIT(),			\
120	.entry	= { &(n).entry, &(n).entry },			\
121	.func = (f),						\
122	__WORK_INIT_LOCKDEP_MAP(#n, &(n))			\
123	}
124
125#define __DELAYED_WORK_INITIALIZER(n, f) {			\
126	.work = __WORK_INITIALIZER((n).work, (f)),		\
127	.timer = TIMER_INITIALIZER(NULL, 0, 0),			\
 
128	}
129
130#define __DEFERRED_WORK_INITIALIZER(n, f) {			\
131	.work = __WORK_INITIALIZER((n).work, (f)),		\
132	.timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0),	\
133	}
134
135#define DECLARE_WORK(n, f)					\
136	struct work_struct n = __WORK_INITIALIZER(n, f)
137
138#define DECLARE_DELAYED_WORK(n, f)				\
139	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
140
141#define DECLARE_DEFERRED_WORK(n, f)				\
142	struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)
143
144/*
145 * initialize a work item's function pointer
146 */
147#define PREPARE_WORK(_work, _func)				\
148	do {							\
149		(_work)->func = (_func);			\
150	} while (0)
151
152#define PREPARE_DELAYED_WORK(_work, _func)			\
153	PREPARE_WORK(&(_work)->work, (_func))
154
155#ifdef CONFIG_DEBUG_OBJECTS_WORK
156extern void __init_work(struct work_struct *work, int onstack);
157extern void destroy_work_on_stack(struct work_struct *work);
 
158static inline unsigned int work_static(struct work_struct *work)
159{
160	return *work_data_bits(work) & WORK_STRUCT_STATIC;
161}
162#else
163static inline void __init_work(struct work_struct *work, int onstack) { }
164static inline void destroy_work_on_stack(struct work_struct *work) { }
 
165static inline unsigned int work_static(struct work_struct *work) { return 0; }
166#endif
167
168/*
169 * initialize all of a work item in one go
170 *
171 * NOTE! No point in using "atomic_long_set()": using a direct
172 * assignment of the work data initializer allows the compiler
173 * to generate better code.
174 */
175#ifdef CONFIG_LOCKDEP
176#define __INIT_WORK(_work, _func, _onstack)				\
177	do {								\
178		static struct lock_class_key __key;			\
179									\
180		__init_work((_work), _onstack);				\
181		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
182		lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
183		INIT_LIST_HEAD(&(_work)->entry);			\
184		PREPARE_WORK((_work), (_func));				\
185	} while (0)
186#else
187#define __INIT_WORK(_work, _func, _onstack)				\
188	do {								\
189		__init_work((_work), _onstack);				\
190		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
191		INIT_LIST_HEAD(&(_work)->entry);			\
192		PREPARE_WORK((_work), (_func));				\
193	} while (0)
194#endif
195
196#define INIT_WORK(_work, _func)					\
197	do {							\
198		__INIT_WORK((_work), (_func), 0);		\
 
 
 
 
 
 
 
 
 
199	} while (0)
200
201#define INIT_WORK_ONSTACK(_work, _func)				\
202	do {							\
203		__INIT_WORK((_work), (_func), 1);		\
 
 
 
204	} while (0)
205
206#define INIT_DELAYED_WORK(_work, _func)				\
207	do {							\
208		INIT_WORK(&(_work)->work, (_func));		\
209		init_timer(&(_work)->timer);			\
210	} while (0)
 
 
 
 
 
 
211
212#define INIT_DELAYED_WORK_ONSTACK(_work, _func)			\
213	do {							\
214		INIT_WORK_ONSTACK(&(_work)->work, (_func));	\
215		init_timer_on_stack(&(_work)->timer);		\
216	} while (0)
217
218#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)		\
219	do {							\
220		INIT_WORK(&(_work)->work, (_func));		\
221		init_timer_deferrable(&(_work)->timer);		\
222	} while (0)
223
224/**
225 * work_pending - Find out whether a work item is currently pending
226 * @work: The work item in question
227 */
228#define work_pending(work) \
229	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
230
231/**
232 * delayed_work_pending - Find out whether a delayable work item is currently
233 * pending
234 * @work: The work item in question
235 */
236#define delayed_work_pending(w) \
237	work_pending(&(w)->work)
238
239/**
240 * work_clear_pending - for internal use only, mark a work item as not pending
241 * @work: The work item in question
242 */
243#define work_clear_pending(work) \
244	clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
245
246/*
247 * Workqueue flags and constants.  For details, please refer to
248 * Documentation/workqueue.txt.
249 */
250enum {
251	WQ_NON_REENTRANT	= 1 << 0, /* guarantee non-reentrance */
252	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
253	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
254	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
255	WQ_HIGHPRI		= 1 << 4, /* high priority */
256	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu instensive workqueue */
 
257
258	WQ_DRAINING		= 1 << 6, /* internal: workqueue is draining */
259	WQ_RESCUER		= 1 << 7, /* internal: workqueue has rescuer */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
261	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
262	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
263	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
264};
265
266/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
267#define WQ_UNBOUND_MAX_ACTIVE	\
268	max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
269
270/*
271 * System-wide workqueues which are always present.
272 *
273 * system_wq is the one used by schedule[_delayed]_work[_on]().
274 * Multi-CPU multi-threaded.  There are users which expect relatively
275 * short queue flush time.  Don't queue works which can run for too
276 * long.
277 *
 
 
 
278 * system_long_wq is similar to system_wq but may host long running
279 * works.  Queue flushing might take relatively long.
280 *
281 * system_nrt_wq is non-reentrant and guarantees that any given work
282 * item is never executed in parallel by multiple CPUs.  Queue
283 * flushing might take relatively long.
284 *
285 * system_unbound_wq is unbound workqueue.  Workers are not bound to
286 * any specific CPU, not concurrency managed, and all queued works are
287 * executed immediately as long as max_active limit is not reached and
288 * resources are available.
289 *
290 * system_freezable_wq is equivalent to system_wq except that it's
291 * freezable.
292 *
293 * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
294 * it's freezable.
 
 
 
295 */
296extern struct workqueue_struct *system_wq;
 
297extern struct workqueue_struct *system_long_wq;
298extern struct workqueue_struct *system_nrt_wq;
299extern struct workqueue_struct *system_unbound_wq;
300extern struct workqueue_struct *system_freezable_wq;
301extern struct workqueue_struct *system_nrt_freezable_wq;
302
303extern struct workqueue_struct *
304__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
305	struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
306
307/**
308 * alloc_workqueue - allocate a workqueue
309 * @fmt: printf format for the name of the workqueue
310 * @flags: WQ_* flags
311 * @max_active: max in-flight work items, 0 for default
312 * @args: args for @fmt
313 *
314 * Allocate a workqueue with the specified parameters.  For detailed
315 * information on WQ_* flags, please refer to Documentation/workqueue.txt.
316 *
317 * The __lock_name macro dance is to guarantee that single lock_class_key
318 * doesn't end up with different namesm, which isn't allowed by lockdep.
319 *
320 * RETURNS:
321 * Pointer to the allocated workqueue on success, %NULL on failure.
322 */
323#ifdef CONFIG_LOCKDEP
324#define alloc_workqueue(fmt, flags, max_active, args...)	\
325({								\
326	static struct lock_class_key __key;			\
327	const char *__lock_name;				\
328								\
329	if (__builtin_constant_p(fmt))				\
330		__lock_name = (fmt);				\
331	else							\
332		__lock_name = #fmt;				\
333								\
334	__alloc_workqueue_key((fmt), (flags), (max_active),	\
335			      &__key, __lock_name, ##args);	\
336})
337#else
338#define alloc_workqueue(fmt, flags, max_active, args...)	\
339	__alloc_workqueue_key((fmt), (flags), (max_active),	\
340			      NULL, NULL, ##args)
341#endif
342
343/**
344 * alloc_ordered_workqueue - allocate an ordered workqueue
345 * @fmt: printf format for the name of the workqueue
346 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
347 * @args: args for @fmt
348 *
349 * Allocate an ordered workqueue.  An ordered workqueue executes at
350 * most one work item at any given time in the queued order.  They are
351 * implemented as unbound workqueues with @max_active of one.
352 *
353 * RETURNS:
354 * Pointer to the allocated workqueue on success, %NULL on failure.
355 */
356#define alloc_ordered_workqueue(fmt, flags, args...)		\
357	alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
 
 
 
 
 
 
 
 
 
358
359#define create_workqueue(name)					\
360	alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
361#define create_freezable_workqueue(name)			\
362	alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
363#define create_singlethread_workqueue(name)			\
364	alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
365
366extern void destroy_workqueue(struct workqueue_struct *wq);
 
 
 
 
367
368extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
369extern int queue_work_on(int cpu, struct workqueue_struct *wq,
370			struct work_struct *work);
371extern int queue_delayed_work(struct workqueue_struct *wq,
372			struct delayed_work *work, unsigned long delay);
373extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
374			struct delayed_work *work, unsigned long delay);
 
 
 
375
376extern void flush_workqueue(struct workqueue_struct *wq);
377extern void drain_workqueue(struct workqueue_struct *wq);
378extern void flush_scheduled_work(void);
379
380extern int schedule_work(struct work_struct *work);
381extern int schedule_work_on(int cpu, struct work_struct *work);
382extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
383extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
384					unsigned long delay);
385extern int schedule_on_each_cpu(work_func_t func);
386extern int keventd_up(void);
387
388int execute_in_process_context(work_func_t fn, struct execute_work *);
389
390extern bool flush_work(struct work_struct *work);
391extern bool flush_work_sync(struct work_struct *work);
392extern bool cancel_work_sync(struct work_struct *work);
393
394extern bool flush_delayed_work(struct delayed_work *dwork);
395extern bool flush_delayed_work_sync(struct delayed_work *work);
396extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
397
 
 
398extern void workqueue_set_max_active(struct workqueue_struct *wq,
399				     int max_active);
400extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
401extern unsigned int work_cpu(struct work_struct *work);
 
402extern unsigned int work_busy(struct work_struct *work);
 
 
 
 
403
404/*
405 * Kill off a pending schedule_delayed_work().  Note that the work callback
406 * function may still be running on return from cancel_delayed_work(), unless
407 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
408 * cancel_work_sync() to wait on it.
409 */
410static inline bool cancel_delayed_work(struct delayed_work *work)
411{
412	bool ret;
413
414	ret = del_timer_sync(&work->timer);
415	if (ret)
416		work_clear_pending(&work->work);
417	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418}
419
420/*
421 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
422 * if it returns 0 the timer function may be running and the queueing is in
423 * progress.
424 */
425static inline bool __cancel_delayed_work(struct delayed_work *work)
426{
427	bool ret;
428
429	ret = del_timer(&work->timer);
430	if (ret)
431		work_clear_pending(&work->work);
432	return ret;
433}
434
435#ifndef CONFIG_SMP
436static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 
 
 
 
437{
438	return fn(arg);
439}
440#else
441long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
 
442#endif /* CONFIG_SMP */
443
444#ifdef CONFIG_FREEZER
445extern void freeze_workqueues_begin(void);
446extern bool freeze_workqueues_busy(void);
447extern void thaw_workqueues(void);
448#endif /* CONFIG_FREEZER */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449
450#endif