Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * kernel/workqueue.c - generic async execution with shared worker pool
   3 *
   4 * Copyright (C) 2002		Ingo Molnar
   5 *
   6 *   Derived from the taskqueue/keventd code by:
   7 *     David Woodhouse <dwmw2@infradead.org>
   8 *     Andrew Morton
   9 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10 *     Theodore Ts'o <tytso@mit.edu>
  11 *
  12 * Made to use alloc_percpu by Christoph Lameter.
  13 *
  14 * Copyright (C) 2010		SUSE Linux Products GmbH
  15 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
  16 *
  17 * This is the generic async execution mechanism.  Work items as are
  18 * executed in process context.  The worker pool is shared and
  19 * automatically managed.  There are two worker pools for each CPU (one for
  20 * normal work items and the other for high priority ones) and some extra
  21 * pools for workqueues which are not bound to any specific CPU - the
  22 * number of these backing pools is dynamic.
  23 *
  24 * Please read Documentation/workqueue.txt for details.
  25 */
  26
  27#include <linux/export.h>
  28#include <linux/kernel.h>
  29#include <linux/sched.h>
  30#include <linux/init.h>
  31#include <linux/signal.h>
  32#include <linux/completion.h>
  33#include <linux/workqueue.h>
  34#include <linux/slab.h>
  35#include <linux/cpu.h>
  36#include <linux/notifier.h>
  37#include <linux/kthread.h>
  38#include <linux/hardirq.h>
  39#include <linux/mempolicy.h>
  40#include <linux/freezer.h>
  41#include <linux/kallsyms.h>
  42#include <linux/debug_locks.h>
  43#include <linux/lockdep.h>
  44#include <linux/idr.h>
  45#include <linux/jhash.h>
  46#include <linux/hashtable.h>
  47#include <linux/rculist.h>
  48#include <linux/nodemask.h>
  49#include <linux/moduleparam.h>
  50#include <linux/uaccess.h>
  51
  52#include "workqueue_internal.h"
  53
  54enum {
  55	/*
  56	 * worker_pool flags
  57	 *
  58	 * A bound pool is either associated or disassociated with its CPU.
  59	 * While associated (!DISASSOCIATED), all workers are bound to the
  60	 * CPU and none has %WORKER_UNBOUND set and concurrency management
  61	 * is in effect.
  62	 *
  63	 * While DISASSOCIATED, the cpu may be offline and all workers have
  64	 * %WORKER_UNBOUND set and concurrency management disabled, and may
  65	 * be executing on any CPU.  The pool behaves as an unbound one.
  66	 *
  67	 * Note that DISASSOCIATED should be flipped only while holding
  68	 * manager_mutex to avoid changing binding state while
  69	 * create_worker() is in progress.
  70	 */
  71	POOL_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
  72	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
  73	POOL_FREEZING		= 1 << 3,	/* freeze in progress */
  74
  75	/* worker flags */
  76	WORKER_STARTED		= 1 << 0,	/* started */
  77	WORKER_DIE		= 1 << 1,	/* die die die */
  78	WORKER_IDLE		= 1 << 2,	/* is idle */
  79	WORKER_PREP		= 1 << 3,	/* preparing to run works */
 
 
  80	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
  81	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
  82	WORKER_REBOUND		= 1 << 8,	/* worker was rebound */
  83
  84	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_CPU_INTENSIVE |
  85				  WORKER_UNBOUND | WORKER_REBOUND,
  86
  87	NR_STD_WORKER_POOLS	= 2,		/* # standard pools per cpu */
 
 
 
 
 
  88
  89	UNBOUND_POOL_HASH_ORDER	= 6,		/* hashed by pool->attrs */
  90	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
 
 
  91
  92	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
  93	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
  94
  95	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  96						/* call for help after 10ms
  97						   (min two ticks) */
  98	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
  99	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
 
 100
 101	/*
 102	 * Rescue workers are used only on emergencies and shared by
 103	 * all cpus.  Give -20.
 104	 */
 105	RESCUER_NICE_LEVEL	= -20,
 106	HIGHPRI_NICE_LEVEL	= -20,
 107
 108	WQ_NAME_LEN		= 24,
 109};
 110
 111/*
 112 * Structure fields follow one of the following exclusion rules.
 113 *
 114 * I: Modifiable by initialization/destruction paths and read-only for
 115 *    everyone else.
 116 *
 117 * P: Preemption protected.  Disabling preemption is enough and should
 118 *    only be modified and accessed from the local cpu.
 119 *
 120 * L: pool->lock protected.  Access with pool->lock held.
 121 *
 122 * X: During normal operation, modification requires pool->lock and should
 123 *    be done only from local cpu.  Either disabling preemption on local
 124 *    cpu or grabbing pool->lock is enough for read access.  If
 125 *    POOL_DISASSOCIATED is set, it's identical to L.
 126 *
 127 * MG: pool->manager_mutex and pool->lock protected.  Writes require both
 128 *     locks.  Reads can happen under either lock.
 129 *
 130 * PL: wq_pool_mutex protected.
 131 *
 132 * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
 133 *
 134 * WQ: wq->mutex protected.
 135 *
 136 * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
 137 *
 138 * MD: wq_mayday_lock protected.
 139 */
 140
 141/* struct worker is defined in workqueue_internal.h */
 142
 143struct worker_pool {
 144	spinlock_t		lock;		/* the pool lock */
 145	int			cpu;		/* I: the associated cpu */
 146	int			node;		/* I: the associated node ID */
 147	int			id;		/* I: pool ID */
 
 
 
 
 
 
 
 
 
 
 
 
 
 148	unsigned int		flags;		/* X: flags */
 
 
 
 149
 
 
 
 
 
 
 
 150	struct list_head	worklist;	/* L: list of pending works */
 151	int			nr_workers;	/* L: total number of workers */
 
 152
 153	/* nr_idle includes the ones off idle_list for rebinding */
 154	int			nr_idle;	/* L: currently idle ones */
 155
 
 156	struct list_head	idle_list;	/* X: list of idle workers */
 157	struct timer_list	idle_timer;	/* L: worker idle timeout */
 158	struct timer_list	mayday_timer;	/* L: SOS timer for workers */
 159
 160	/* a workers is either on busy_hash or idle_list, or the manager */
 161	DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
 162						/* L: hash of busy workers */
 163
 164	/* see manage_workers() for details on the two manager mutexes */
 165	struct mutex		manager_arb;	/* manager arbitration */
 166	struct mutex		manager_mutex;	/* manager exclusion */
 167	struct idr		worker_idr;	/* MG: worker IDs and iteration */
 168
 169	struct workqueue_attrs	*attrs;		/* I: worker attributes */
 170	struct hlist_node	hash_node;	/* PL: unbound_pool_hash node */
 171	int			refcnt;		/* PL: refcnt for unbound pools */
 172
 173	/*
 174	 * The current concurrency level.  As it's likely to be accessed
 175	 * from other CPUs during try_to_wake_up(), put it in a separate
 176	 * cacheline.
 177	 */
 178	atomic_t		nr_running ____cacheline_aligned_in_smp;
 179
 180	/*
 181	 * Destruction of pool is sched-RCU protected to allow dereferences
 182	 * from get_work_pool().
 183	 */
 184	struct rcu_head		rcu;
 185} ____cacheline_aligned_in_smp;
 186
 187/*
 188 * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
 189 * of work_struct->data are used for flags and the remaining high bits
 190 * point to the pwq; thus, pwqs need to be aligned at two's power of the
 191 * number of flag bits.
 192 */
 193struct pool_workqueue {
 194	struct worker_pool	*pool;		/* I: the associated pool */
 195	struct workqueue_struct *wq;		/* I: the owning workqueue */
 196	int			work_color;	/* L: current color */
 197	int			flush_color;	/* L: flushing color */
 198	int			refcnt;		/* L: reference count */
 199	int			nr_in_flight[WORK_NR_COLORS];
 200						/* L: nr of in_flight works */
 201	int			nr_active;	/* L: nr of active works */
 202	int			max_active;	/* L: max active works */
 203	struct list_head	delayed_works;	/* L: delayed works */
 204	struct list_head	pwqs_node;	/* WR: node on wq->pwqs */
 205	struct list_head	mayday_node;	/* MD: node on wq->maydays */
 206
 207	/*
 208	 * Release of unbound pwq is punted to system_wq.  See put_pwq()
 209	 * and pwq_unbound_release_workfn() for details.  pool_workqueue
 210	 * itself is also sched-RCU protected so that the first pwq can be
 211	 * determined without grabbing wq->mutex.
 212	 */
 213	struct work_struct	unbound_release_work;
 214	struct rcu_head		rcu;
 215} __aligned(1 << WORK_STRUCT_FLAG_BITS);
 216
 217/*
 218 * Structure used to wait for workqueue flush.
 219 */
 220struct wq_flusher {
 221	struct list_head	list;		/* WQ: list of flushers */
 222	int			flush_color;	/* WQ: flush color waiting for */
 223	struct completion	done;		/* flush completion */
 224};
 225
 226struct wq_device;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227
 228/*
 229 * The externally visible workqueue.  It relays the issued work items to
 230 * the appropriate worker_pool through its pool_workqueues.
 231 */
 232struct workqueue_struct {
 233	struct list_head	pwqs;		/* WR: all pwqs of this wq */
 234	struct list_head	list;		/* PL: list of all workqueues */
 
 
 
 
 
 
 
 
 
 
 
 
 
 235
 236	struct mutex		mutex;		/* protects this wq */
 237	int			work_color;	/* WQ: current work color */
 238	int			flush_color;	/* WQ: current flush color */
 239	atomic_t		nr_pwqs_to_flush; /* flush in progress */
 240	struct wq_flusher	*first_flusher;	/* WQ: first flusher */
 241	struct list_head	flusher_queue;	/* WQ: flush waiters */
 242	struct list_head	flusher_overflow; /* WQ: flush overflow list */
 243
 244	struct list_head	maydays;	/* MD: pwqs requesting rescue */
 245	struct worker		*rescuer;	/* I: rescue worker */
 246
 247	int			nr_drainers;	/* WQ: drain in progress */
 248	int			saved_max_active; /* WQ: saved pwq max_active */
 249
 250	struct workqueue_attrs	*unbound_attrs;	/* WQ: only for unbound wqs */
 251	struct pool_workqueue	*dfl_pwq;	/* WQ: only for unbound wqs */
 252
 253#ifdef CONFIG_SYSFS
 254	struct wq_device	*wq_dev;	/* I: for sysfs interface */
 255#endif
 256#ifdef CONFIG_LOCKDEP
 257	struct lockdep_map	lockdep_map;
 258#endif
 259	char			name[WQ_NAME_LEN]; /* I: workqueue name */
 260
 261	/* hot fields used during command issue, aligned to cacheline */
 262	unsigned int		flags ____cacheline_aligned; /* WQ: WQ_* flags */
 263	struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
 264	struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */
 265};
 266
 267static struct kmem_cache *pwq_cache;
 268
 269static int wq_numa_tbl_len;		/* highest possible NUMA node id + 1 */
 270static cpumask_var_t *wq_numa_possible_cpumask;
 271					/* possible CPUs of each node */
 272
 273static bool wq_disable_numa;
 274module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 275
 276/* see the comment above the definition of WQ_POWER_EFFICIENT */
 277#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
 278static bool wq_power_efficient = true;
 279#else
 280static bool wq_power_efficient;
 281#endif
 282
 283module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 284
 285static bool wq_numa_enabled;		/* unbound NUMA affinity enabled */
 286
 287/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
 288static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 289
 290static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
 291static DEFINE_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
 292
 293static LIST_HEAD(workqueues);		/* PL: list of all workqueues */
 294static bool workqueue_freezing;		/* PL: have wqs started freezing? */
 295
 296/* the per-cpu worker pools */
 297static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
 298				     cpu_worker_pools);
 299
 300static DEFINE_IDR(worker_pool_idr);	/* PR: idr of all pools */
 301
 302/* PL: hash of all unbound pools keyed by pool->attrs */
 303static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 304
 305/* I: attributes used when instantiating standard unbound pools on demand */
 306static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 307
 308/* I: attributes used when instantiating ordered pools on demand */
 309static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
 310
 311struct workqueue_struct *system_wq __read_mostly;
 312EXPORT_SYMBOL(system_wq);
 313struct workqueue_struct *system_highpri_wq __read_mostly;
 314EXPORT_SYMBOL_GPL(system_highpri_wq);
 315struct workqueue_struct *system_long_wq __read_mostly;
 316EXPORT_SYMBOL_GPL(system_long_wq);
 317struct workqueue_struct *system_unbound_wq __read_mostly;
 318EXPORT_SYMBOL_GPL(system_unbound_wq);
 319struct workqueue_struct *system_freezable_wq __read_mostly;
 
 
 
 
 320EXPORT_SYMBOL_GPL(system_freezable_wq);
 321struct workqueue_struct *system_power_efficient_wq __read_mostly;
 322EXPORT_SYMBOL_GPL(system_power_efficient_wq);
 323struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
 324EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
 325
 326static int worker_thread(void *__worker);
 327static void copy_workqueue_attrs(struct workqueue_attrs *to,
 328				 const struct workqueue_attrs *from);
 329
 330#define CREATE_TRACE_POINTS
 331#include <trace/events/workqueue.h>
 332
 333#define assert_rcu_or_pool_mutex()					\
 334	rcu_lockdep_assert(rcu_read_lock_sched_held() ||		\
 335			   lockdep_is_held(&wq_pool_mutex),		\
 336			   "sched RCU or wq_pool_mutex should be held")
 337
 338#define assert_rcu_or_wq_mutex(wq)					\
 339	rcu_lockdep_assert(rcu_read_lock_sched_held() ||		\
 340			   lockdep_is_held(&wq->mutex),			\
 341			   "sched RCU or wq->mutex should be held")
 342
 343#ifdef CONFIG_LOCKDEP
 344#define assert_manager_or_pool_lock(pool)				\
 345	WARN_ONCE(debug_locks &&					\
 346		  !lockdep_is_held(&(pool)->manager_mutex) &&		\
 347		  !lockdep_is_held(&(pool)->lock),			\
 348		  "pool->manager_mutex or ->lock should be held")
 349#else
 350#define assert_manager_or_pool_lock(pool)	do { } while (0)
 351#endif
 352
 353#define for_each_cpu_worker_pool(pool, cpu)				\
 354	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
 355	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
 356	     (pool)++)
 357
 358/**
 359 * for_each_pool - iterate through all worker_pools in the system
 360 * @pool: iteration cursor
 361 * @pi: integer used for iteration
 362 *
 363 * This must be called either with wq_pool_mutex held or sched RCU read
 364 * locked.  If the pool needs to be used beyond the locking in effect, the
 365 * caller is responsible for guaranteeing that the pool stays online.
 366 *
 367 * The if/else clause exists only for the lockdep assertion and can be
 368 * ignored.
 369 */
 370#define for_each_pool(pool, pi)						\
 371	idr_for_each_entry(&worker_pool_idr, pool, pi)			\
 372		if (({ assert_rcu_or_pool_mutex(); false; })) { }	\
 373		else
 374
 375/**
 376 * for_each_pool_worker - iterate through all workers of a worker_pool
 377 * @worker: iteration cursor
 378 * @wi: integer used for iteration
 379 * @pool: worker_pool to iterate workers of
 380 *
 381 * This must be called with either @pool->manager_mutex or ->lock held.
 382 *
 383 * The if/else clause exists only for the lockdep assertion and can be
 384 * ignored.
 385 */
 386#define for_each_pool_worker(worker, wi, pool)				\
 387	idr_for_each_entry(&(pool)->worker_idr, (worker), (wi))		\
 388		if (({ assert_manager_or_pool_lock((pool)); false; })) { } \
 389		else
 390
 391/**
 392 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
 393 * @pwq: iteration cursor
 394 * @wq: the target workqueue
 395 *
 396 * This must be called either with wq->mutex held or sched RCU read locked.
 397 * If the pwq needs to be used beyond the locking in effect, the caller is
 398 * responsible for guaranteeing that the pwq stays online.
 399 *
 400 * The if/else clause exists only for the lockdep assertion and can be
 401 * ignored.
 402 */
 403#define for_each_pwq(pwq, wq)						\
 404	list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)		\
 405		if (({ assert_rcu_or_wq_mutex(wq); false; })) { }	\
 406		else
 
 
 
 
 
 
 
 
 
 
 
 407
 408#ifdef CONFIG_DEBUG_OBJECTS_WORK
 409
 410static struct debug_obj_descr work_debug_descr;
 411
 412static void *work_debug_hint(void *addr)
 413{
 414	return ((struct work_struct *) addr)->func;
 415}
 416
 417/*
 418 * fixup_init is called when:
 419 * - an active object is initialized
 420 */
 421static int work_fixup_init(void *addr, enum debug_obj_state state)
 422{
 423	struct work_struct *work = addr;
 424
 425	switch (state) {
 426	case ODEBUG_STATE_ACTIVE:
 427		cancel_work_sync(work);
 428		debug_object_init(work, &work_debug_descr);
 429		return 1;
 430	default:
 431		return 0;
 432	}
 433}
 434
 435/*
 436 * fixup_activate is called when:
 437 * - an active object is activated
 438 * - an unknown object is activated (might be a statically initialized object)
 439 */
 440static int work_fixup_activate(void *addr, enum debug_obj_state state)
 441{
 442	struct work_struct *work = addr;
 443
 444	switch (state) {
 445
 446	case ODEBUG_STATE_NOTAVAILABLE:
 447		/*
 448		 * This is not really a fixup. The work struct was
 449		 * statically initialized. We just make sure that it
 450		 * is tracked in the object tracker.
 451		 */
 452		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
 453			debug_object_init(work, &work_debug_descr);
 454			debug_object_activate(work, &work_debug_descr);
 455			return 0;
 456		}
 457		WARN_ON_ONCE(1);
 458		return 0;
 459
 460	case ODEBUG_STATE_ACTIVE:
 461		WARN_ON(1);
 462
 463	default:
 464		return 0;
 465	}
 466}
 467
 468/*
 469 * fixup_free is called when:
 470 * - an active object is freed
 471 */
 472static int work_fixup_free(void *addr, enum debug_obj_state state)
 473{
 474	struct work_struct *work = addr;
 475
 476	switch (state) {
 477	case ODEBUG_STATE_ACTIVE:
 478		cancel_work_sync(work);
 479		debug_object_free(work, &work_debug_descr);
 480		return 1;
 481	default:
 482		return 0;
 483	}
 484}
 485
 486static struct debug_obj_descr work_debug_descr = {
 487	.name		= "work_struct",
 488	.debug_hint	= work_debug_hint,
 489	.fixup_init	= work_fixup_init,
 490	.fixup_activate	= work_fixup_activate,
 491	.fixup_free	= work_fixup_free,
 492};
 493
 494static inline void debug_work_activate(struct work_struct *work)
 495{
 496	debug_object_activate(work, &work_debug_descr);
 497}
 498
 499static inline void debug_work_deactivate(struct work_struct *work)
 500{
 501	debug_object_deactivate(work, &work_debug_descr);
 502}
 503
 504void __init_work(struct work_struct *work, int onstack)
 505{
 506	if (onstack)
 507		debug_object_init_on_stack(work, &work_debug_descr);
 508	else
 509		debug_object_init(work, &work_debug_descr);
 510}
 511EXPORT_SYMBOL_GPL(__init_work);
 512
 513void destroy_work_on_stack(struct work_struct *work)
 514{
 515	debug_object_free(work, &work_debug_descr);
 516}
 517EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 518
 519void destroy_delayed_work_on_stack(struct delayed_work *work)
 520{
 521	destroy_timer_on_stack(&work->timer);
 522	debug_object_free(&work->work, &work_debug_descr);
 523}
 524EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
 525
 526#else
 527static inline void debug_work_activate(struct work_struct *work) { }
 528static inline void debug_work_deactivate(struct work_struct *work) { }
 529#endif
 530
 531/**
 532 * worker_pool_assign_id - allocate ID and assing it to @pool
 533 * @pool: the pool pointer of interest
 534 *
 535 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
 536 * successfully, -errno on failure.
 
 
 
 537 */
 538static int worker_pool_assign_id(struct worker_pool *pool)
 539{
 540	int ret;
 541
 542	lockdep_assert_held(&wq_pool_mutex);
 
 
 
 
 
 
 543
 544	ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
 545			GFP_KERNEL);
 546	if (ret >= 0) {
 547		pool->id = ret;
 548		return 0;
 549	}
 550	return ret;
 
 551}
 552
 553/**
 554 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
 555 * @wq: the target workqueue
 556 * @node: the node ID
 557 *
 558 * This must be called either with pwq_lock held or sched RCU read locked.
 559 * If the pwq needs to be used beyond the locking in effect, the caller is
 560 * responsible for guaranteeing that the pwq stays online.
 561 *
 562 * Return: The unbound pool_workqueue for @node.
 563 */
 564static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
 565						  int node)
 566{
 567	assert_rcu_or_wq_mutex(wq);
 568	return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569}
 570
 571static unsigned int work_color_to_flags(int color)
 572{
 573	return color << WORK_STRUCT_COLOR_SHIFT;
 574}
 575
 576static int get_work_color(struct work_struct *work)
 577{
 578	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 579		((1 << WORK_STRUCT_COLOR_BITS) - 1);
 580}
 581
 582static int work_next_color(int color)
 583{
 584	return (color + 1) % WORK_NR_COLORS;
 585}
 586
 587/*
 588 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
 589 * contain the pointer to the queued pwq.  Once execution starts, the flag
 590 * is cleared and the high bits contain OFFQ flags and pool ID.
 591 *
 592 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
 593 * and clear_work_data() can be used to set the pwq, pool or clear
 594 * work->data.  These functions should only be called while the work is
 595 * owned - ie. while the PENDING bit is set.
 596 *
 597 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
 598 * corresponding to a work.  Pool is available once the work has been
 599 * queued anywhere after initialization until it is sync canceled.  pwq is
 600 * available only while the work item is queued.
 601 *
 602 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
 603 * canceled.  While being canceled, a work item may have its PENDING set
 604 * but stay off timer and worklist for arbitrarily long and nobody should
 605 * try to steal the PENDING bit.
 606 */
 607static inline void set_work_data(struct work_struct *work, unsigned long data,
 608				 unsigned long flags)
 609{
 610	WARN_ON_ONCE(!work_pending(work));
 611	atomic_long_set(&work->data, data | flags | work_static(work));
 612}
 613
 614static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
 
 615			 unsigned long extra_flags)
 616{
 617	set_work_data(work, (unsigned long)pwq,
 618		      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
 619}
 620
 621static void set_work_pool_and_keep_pending(struct work_struct *work,
 622					   int pool_id)
 623{
 624	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
 625		      WORK_STRUCT_PENDING);
 626}
 627
 628static void set_work_pool_and_clear_pending(struct work_struct *work,
 629					    int pool_id)
 630{
 631	/*
 632	 * The following wmb is paired with the implied mb in
 633	 * test_and_set_bit(PENDING) and ensures all updates to @work made
 634	 * here are visible to and precede any updates by the next PENDING
 635	 * owner.
 636	 */
 637	smp_wmb();
 638	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 639}
 640
 641static void clear_work_data(struct work_struct *work)
 642{
 643	smp_wmb();	/* see set_work_pool_and_clear_pending() */
 644	set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 645}
 646
 647static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 648{
 649	unsigned long data = atomic_long_read(&work->data);
 650
 651	if (data & WORK_STRUCT_PWQ)
 652		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 653	else
 654		return NULL;
 655}
 656
 657/**
 658 * get_work_pool - return the worker_pool a given work was associated with
 659 * @work: the work item of interest
 660 *
 661 * Pools are created and destroyed under wq_pool_mutex, and allows read
 662 * access under sched-RCU read lock.  As such, this function should be
 663 * called under wq_pool_mutex or with preemption disabled.
 664 *
 665 * All fields of the returned pool are accessible as long as the above
 666 * mentioned locking is in effect.  If the returned pool needs to be used
 667 * beyond the critical section, the caller is responsible for ensuring the
 668 * returned pool is and stays online.
 669 *
 670 * Return: The worker_pool @work was last associated with.  %NULL if none.
 671 */
 672static struct worker_pool *get_work_pool(struct work_struct *work)
 673{
 674	unsigned long data = atomic_long_read(&work->data);
 675	int pool_id;
 676
 677	assert_rcu_or_pool_mutex();
 678
 679	if (data & WORK_STRUCT_PWQ)
 680		return ((struct pool_workqueue *)
 681			(data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 682
 683	pool_id = data >> WORK_OFFQ_POOL_SHIFT;
 684	if (pool_id == WORK_OFFQ_POOL_NONE)
 685		return NULL;
 686
 687	return idr_find(&worker_pool_idr, pool_id);
 688}
 689
 690/**
 691 * get_work_pool_id - return the worker pool ID a given work is associated with
 692 * @work: the work item of interest
 693 *
 694 * Return: The worker_pool ID @work was last associated with.
 695 * %WORK_OFFQ_POOL_NONE if none.
 696 */
 697static int get_work_pool_id(struct work_struct *work)
 698{
 699	unsigned long data = atomic_long_read(&work->data);
 700
 701	if (data & WORK_STRUCT_PWQ)
 702		return ((struct pool_workqueue *)
 703			(data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
 704
 705	return data >> WORK_OFFQ_POOL_SHIFT;
 706}
 707
 708static void mark_work_canceling(struct work_struct *work)
 709{
 710	unsigned long pool_id = get_work_pool_id(work);
 711
 712	pool_id <<= WORK_OFFQ_POOL_SHIFT;
 713	set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 714}
 715
 716static bool work_is_canceling(struct work_struct *work)
 717{
 718	unsigned long data = atomic_long_read(&work->data);
 719
 720	return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 721}
 722
 723/*
 724 * Policy functions.  These define the policies on how the global worker
 725 * pools are managed.  Unless noted otherwise, these functions assume that
 726 * they're being called with pool->lock held.
 727 */
 728
 729static bool __need_more_worker(struct worker_pool *pool)
 730{
 731	return !atomic_read(&pool->nr_running);
 
 732}
 733
 734/*
 735 * Need to wake up a worker?  Called from anything but currently
 736 * running workers.
 737 *
 738 * Note that, because unbound workers never contribute to nr_running, this
 739 * function will always return %true for unbound pools as long as the
 740 * worklist isn't empty.
 741 */
 742static bool need_more_worker(struct worker_pool *pool)
 743{
 744	return !list_empty(&pool->worklist) && __need_more_worker(pool);
 745}
 746
 747/* Can I start working?  Called from busy but !running workers. */
 748static bool may_start_working(struct worker_pool *pool)
 749{
 750	return pool->nr_idle;
 751}
 752
 753/* Do I need to keep working?  Called from currently running workers. */
 754static bool keep_working(struct worker_pool *pool)
 755{
 756	return !list_empty(&pool->worklist) &&
 757		atomic_read(&pool->nr_running) <= 1;
 
 
 
 758}
 759
 760/* Do we need a new worker?  Called from manager. */
 761static bool need_to_create_worker(struct worker_pool *pool)
 762{
 763	return need_more_worker(pool) && !may_start_working(pool);
 764}
 765
 766/* Do I need to be the manager? */
 767static bool need_to_manage_workers(struct worker_pool *pool)
 768{
 769	return need_to_create_worker(pool) ||
 770		(pool->flags & POOL_MANAGE_WORKERS);
 771}
 772
 773/* Do we have too many workers and should some go away? */
 774static bool too_many_workers(struct worker_pool *pool)
 775{
 776	bool managing = mutex_is_locked(&pool->manager_arb);
 777	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 778	int nr_busy = pool->nr_workers - nr_idle;
 779
 780	/*
 781	 * nr_idle and idle_list may disagree if idle rebinding is in
 782	 * progress.  Never return %true if idle_list is empty.
 783	 */
 784	if (list_empty(&pool->idle_list))
 785		return false;
 786
 787	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 788}
 789
 790/*
 791 * Wake up functions.
 792 */
 793
 794/* Return the first worker.  Safe with preemption disabled */
 795static struct worker *first_worker(struct worker_pool *pool)
 796{
 797	if (unlikely(list_empty(&pool->idle_list)))
 798		return NULL;
 799
 800	return list_first_entry(&pool->idle_list, struct worker, entry);
 801}
 802
 803/**
 804 * wake_up_worker - wake up an idle worker
 805 * @pool: worker pool to wake worker from
 806 *
 807 * Wake up the first idle worker of @pool.
 808 *
 809 * CONTEXT:
 810 * spin_lock_irq(pool->lock).
 811 */
 812static void wake_up_worker(struct worker_pool *pool)
 813{
 814	struct worker *worker = first_worker(pool);
 815
 816	if (likely(worker))
 817		wake_up_process(worker->task);
 818}
 819
 820/**
 821 * wq_worker_waking_up - a worker is waking up
 822 * @task: task waking up
 823 * @cpu: CPU @task is waking up to
 824 *
 825 * This function is called during try_to_wake_up() when a worker is
 826 * being awoken.
 827 *
 828 * CONTEXT:
 829 * spin_lock_irq(rq->lock)
 830 */
 831void wq_worker_waking_up(struct task_struct *task, int cpu)
 832{
 833	struct worker *worker = kthread_data(task);
 834
 835	if (!(worker->flags & WORKER_NOT_RUNNING)) {
 836		WARN_ON_ONCE(worker->pool->cpu != cpu);
 837		atomic_inc(&worker->pool->nr_running);
 838	}
 839}
 840
 841/**
 842 * wq_worker_sleeping - a worker is going to sleep
 843 * @task: task going to sleep
 844 * @cpu: CPU in question, must be the current CPU number
 845 *
 846 * This function is called during schedule() when a busy worker is
 847 * going to sleep.  Worker on the same cpu can be woken up by
 848 * returning pointer to its task.
 849 *
 850 * CONTEXT:
 851 * spin_lock_irq(rq->lock)
 852 *
 853 * Return:
 854 * Worker task on @cpu to wake up, %NULL if none.
 855 */
 856struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
 
 857{
 858	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
 859	struct worker_pool *pool;
 
 860
 861	/*
 862	 * Rescuers, which may not have all the fields set up like normal
 863	 * workers, also reach here, let's not access anything before
 864	 * checking NOT_RUNNING.
 865	 */
 866	if (worker->flags & WORKER_NOT_RUNNING)
 867		return NULL;
 868
 869	pool = worker->pool;
 870
 871	/* this can only happen on the local cpu */
 872	if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
 873		return NULL;
 874
 875	/*
 876	 * The counterpart of the following dec_and_test, implied mb,
 877	 * worklist not empty test sequence is in insert_work().
 878	 * Please read comment there.
 879	 *
 880	 * NOT_RUNNING is clear.  This means that we're bound to and
 881	 * running on the local cpu w/ rq lock held and preemption
 882	 * disabled, which in turn means that none else could be
 883	 * manipulating idle_list, so dereferencing idle_list without pool
 884	 * lock is safe.
 885	 */
 886	if (atomic_dec_and_test(&pool->nr_running) &&
 887	    !list_empty(&pool->worklist))
 888		to_wakeup = first_worker(pool);
 889	return to_wakeup ? to_wakeup->task : NULL;
 890}
 891
 892/**
 893 * worker_set_flags - set worker flags and adjust nr_running accordingly
 894 * @worker: self
 895 * @flags: flags to set
 896 * @wakeup: wakeup an idle worker if necessary
 897 *
 898 * Set @flags in @worker->flags and adjust nr_running accordingly.  If
 899 * nr_running becomes zero and @wakeup is %true, an idle worker is
 900 * woken up.
 901 *
 902 * CONTEXT:
 903 * spin_lock_irq(pool->lock)
 904 */
 905static inline void worker_set_flags(struct worker *worker, unsigned int flags,
 906				    bool wakeup)
 907{
 908	struct worker_pool *pool = worker->pool;
 909
 910	WARN_ON_ONCE(worker->task != current);
 911
 912	/*
 913	 * If transitioning into NOT_RUNNING, adjust nr_running and
 914	 * wake up an idle worker as necessary if requested by
 915	 * @wakeup.
 916	 */
 917	if ((flags & WORKER_NOT_RUNNING) &&
 918	    !(worker->flags & WORKER_NOT_RUNNING)) {
 
 
 919		if (wakeup) {
 920			if (atomic_dec_and_test(&pool->nr_running) &&
 921			    !list_empty(&pool->worklist))
 922				wake_up_worker(pool);
 923		} else
 924			atomic_dec(&pool->nr_running);
 925	}
 926
 927	worker->flags |= flags;
 928}
 929
 930/**
 931 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 932 * @worker: self
 933 * @flags: flags to clear
 934 *
 935 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 936 *
 937 * CONTEXT:
 938 * spin_lock_irq(pool->lock)
 939 */
 940static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 941{
 942	struct worker_pool *pool = worker->pool;
 943	unsigned int oflags = worker->flags;
 944
 945	WARN_ON_ONCE(worker->task != current);
 946
 947	worker->flags &= ~flags;
 948
 949	/*
 950	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 951	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 952	 * of multiple flags, not a single flag.
 953	 */
 954	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 955		if (!(worker->flags & WORKER_NOT_RUNNING))
 956			atomic_inc(&pool->nr_running);
 957}
 958
 959/**
 960 * find_worker_executing_work - find worker which is executing a work
 961 * @pool: pool of interest
 962 * @work: work to find worker for
 963 *
 964 * Find a worker which is executing @work on @pool by searching
 965 * @pool->busy_hash which is keyed by the address of @work.  For a worker
 966 * to match, its current execution should match the address of @work and
 967 * its work function.  This is to avoid unwanted dependency between
 968 * unrelated work executions through a work item being recycled while still
 969 * being executed.
 970 *
 971 * This is a bit tricky.  A work item may be freed once its execution
 972 * starts and nothing prevents the freed area from being recycled for
 973 * another work item.  If the same work item address ends up being reused
 974 * before the original execution finishes, workqueue will identify the
 975 * recycled work item as currently executing and make it wait until the
 976 * current execution finishes, introducing an unwanted dependency.
 977 *
 978 * This function checks the work item address and work function to avoid
 979 * false positives.  Note that this isn't complete as one may construct a
 980 * work function which can introduce dependency onto itself through a
 981 * recycled work item.  Well, if somebody wants to shoot oneself in the
 982 * foot that badly, there's only so much we can do, and if such deadlock
 983 * actually occurs, it should be easy to locate the culprit work function.
 984 *
 985 * CONTEXT:
 986 * spin_lock_irq(pool->lock).
 987 *
 988 * Return:
 989 * Pointer to worker which is executing @work if found, %NULL
 990 * otherwise.
 991 */
 992static struct worker *find_worker_executing_work(struct worker_pool *pool,
 993						 struct work_struct *work)
 994{
 995	struct worker *worker;
 
 996
 997	hash_for_each_possible(pool->busy_hash, worker, hentry,
 998			       (unsigned long)work)
 999		if (worker->current_work == work &&
1000		    worker->current_func == work->func)
1001			return worker;
1002
1003	return NULL;
1004}
1005
1006/**
1007 * move_linked_works - move linked works to a list
1008 * @work: start of series of works to be scheduled
1009 * @head: target list to append @work to
1010 * @nextp: out paramter for nested worklist walking
1011 *
1012 * Schedule linked works starting from @work to @head.  Work series to
1013 * be scheduled starts at @work and includes any consecutive work with
1014 * WORK_STRUCT_LINKED set in its predecessor.
1015 *
1016 * If @nextp is not NULL, it's updated to point to the next work of
1017 * the last scheduled work.  This allows move_linked_works() to be
1018 * nested inside outer list_for_each_entry_safe().
1019 *
1020 * CONTEXT:
1021 * spin_lock_irq(pool->lock).
1022 */
1023static void move_linked_works(struct work_struct *work, struct list_head *head,
1024			      struct work_struct **nextp)
1025{
1026	struct work_struct *n;
1027
1028	/*
1029	 * Linked worklist will always end before the end of the list,
1030	 * use NULL for list head.
1031	 */
1032	list_for_each_entry_safe_from(work, n, NULL, entry) {
1033		list_move_tail(&work->entry, head);
1034		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1035			break;
1036	}
1037
1038	/*
1039	 * If we're already inside safe list traversal and have moved
1040	 * multiple works to the scheduled queue, the next position
1041	 * needs to be updated.
1042	 */
1043	if (nextp)
1044		*nextp = n;
1045}
1046
1047/**
1048 * get_pwq - get an extra reference on the specified pool_workqueue
1049 * @pwq: pool_workqueue to get
1050 *
1051 * Obtain an extra reference on @pwq.  The caller should guarantee that
1052 * @pwq has positive refcnt and be holding the matching pool->lock.
1053 */
1054static void get_pwq(struct pool_workqueue *pwq)
1055{
1056	lockdep_assert_held(&pwq->pool->lock);
1057	WARN_ON_ONCE(pwq->refcnt <= 0);
1058	pwq->refcnt++;
1059}
1060
1061/**
1062 * put_pwq - put a pool_workqueue reference
1063 * @pwq: pool_workqueue to put
1064 *
1065 * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1066 * destruction.  The caller should be holding the matching pool->lock.
1067 */
1068static void put_pwq(struct pool_workqueue *pwq)
1069{
1070	lockdep_assert_held(&pwq->pool->lock);
1071	if (likely(--pwq->refcnt))
1072		return;
1073	if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1074		return;
1075	/*
1076	 * @pwq can't be released under pool->lock, bounce to
1077	 * pwq_unbound_release_workfn().  This never recurses on the same
1078	 * pool->lock as this path is taken only for unbound workqueues and
1079	 * the release work item is scheduled on a per-cpu workqueue.  To
1080	 * avoid lockdep warning, unbound pool->locks are given lockdep
1081	 * subclass of 1 in get_unbound_pool().
1082	 */
1083	schedule_work(&pwq->unbound_release_work);
1084}
1085
1086/**
1087 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1088 * @pwq: pool_workqueue to put (can be %NULL)
1089 *
1090 * put_pwq() with locking.  This function also allows %NULL @pwq.
1091 */
1092static void put_pwq_unlocked(struct pool_workqueue *pwq)
 
 
1093{
1094	if (pwq) {
1095		/*
1096		 * As both pwqs and pools are sched-RCU protected, the
1097		 * following lock operations are safe.
1098		 */
1099		spin_lock_irq(&pwq->pool->lock);
1100		put_pwq(pwq);
1101		spin_unlock_irq(&pwq->pool->lock);
1102	}
1103}
1104
1105static void pwq_activate_delayed_work(struct work_struct *work)
1106{
1107	struct pool_workqueue *pwq = get_work_pwq(work);
1108
1109	trace_workqueue_activate_work(work);
1110	move_linked_works(work, &pwq->pool->worklist, NULL);
1111	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1112	pwq->nr_active++;
1113}
1114
1115static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1116{
1117	struct work_struct *work = list_first_entry(&pwq->delayed_works,
1118						    struct work_struct, entry);
1119
1120	pwq_activate_delayed_work(work);
 
 
 
1121}
1122
1123/**
1124 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1125 * @pwq: pwq of interest
1126 * @color: color of work which left the queue
1127 *
1128 * A work either has completed or is removed from pending queue,
1129 * decrement nr_in_flight of its pwq and handle workqueue flushing.
 
1130 *
1131 * CONTEXT:
1132 * spin_lock_irq(pool->lock).
 
 
 
 
1133 */
1134static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
 
1135{
1136	/* uncolored work items don't participate in flushing or nr_active */
1137	if (color == WORK_NO_COLOR)
1138		goto out_put;
1139
1140	pwq->nr_in_flight[color]--;
1141
1142	pwq->nr_active--;
1143	if (!list_empty(&pwq->delayed_works)) {
1144		/* one down, submit a delayed one */
1145		if (pwq->nr_active < pwq->max_active)
1146			pwq_activate_first_delayed(pwq);
1147	}
1148
1149	/* is flush in progress and are we at the flushing tip? */
1150	if (likely(pwq->flush_color != color))
1151		goto out_put;
1152
1153	/* are there still in-flight works? */
1154	if (pwq->nr_in_flight[color])
1155		goto out_put;
1156
1157	/* this pwq is done, clear flush_color */
1158	pwq->flush_color = -1;
1159
1160	/*
1161	 * If this was the last pwq, wake up the first flusher.  It
1162	 * will handle the rest.
1163	 */
1164	if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1165		complete(&pwq->wq->first_flusher->done);
1166out_put:
1167	put_pwq(pwq);
1168}
1169
1170/**
1171 * try_to_grab_pending - steal work item from worklist and disable irq
1172 * @work: work item to steal
1173 * @is_dwork: @work is a delayed_work
1174 * @flags: place to store irq state
1175 *
1176 * Try to grab PENDING bit of @work.  This function can handle @work in any
1177 * stable state - idle, on timer or on worklist.
1178 *
1179 * Return:
1180 *  1		if @work was pending and we successfully stole PENDING
1181 *  0		if @work was idle and we claimed PENDING
1182 *  -EAGAIN	if PENDING couldn't be grabbed at the moment, safe to busy-retry
1183 *  -ENOENT	if someone else is canceling @work, this state may persist
1184 *		for arbitrarily long
1185 *
1186 * Note:
1187 * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1188 * interrupted while holding PENDING and @work off queue, irq must be
1189 * disabled on entry.  This, combined with delayed_work->timer being
1190 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
 
1191 *
1192 * On successful return, >= 0, irq is disabled and the caller is
1193 * responsible for releasing it using local_irq_restore(*@flags).
1194 *
1195 * This function is safe to call from any context including IRQ handler.
 
1196 */
1197static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1198			       unsigned long *flags)
1199{
1200	struct worker_pool *pool;
1201	struct pool_workqueue *pwq;
1202
1203	local_irq_save(*flags);
 
1204
1205	/* try to steal the timer if it exists */
1206	if (is_dwork) {
1207		struct delayed_work *dwork = to_delayed_work(work);
1208
1209		/*
1210		 * dwork->timer is irqsafe.  If del_timer() fails, it's
1211		 * guaranteed that the timer is not queued anywhere and not
1212		 * running on the local CPU.
1213		 */
1214		if (likely(del_timer(&dwork->timer)))
1215			return 1;
1216	}
1217
1218	/* try to claim PENDING the normal way */
1219	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1220		return 0;
1221
1222	/*
1223	 * The queueing is in progress, or it is already queued. Try to
1224	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1225	 */
1226	pool = get_work_pool(work);
1227	if (!pool)
1228		goto fail;
1229
1230	spin_lock(&pool->lock);
1231	/*
1232	 * work->data is guaranteed to point to pwq only while the work
1233	 * item is queued on pwq->wq, and both updating work->data to point
1234	 * to pwq on queueing and to pool on dequeueing are done under
1235	 * pwq->pool->lock.  This in turn guarantees that, if work->data
1236	 * points to pwq which is associated with a locked pool, the work
1237	 * item is currently queued on that pool.
1238	 */
1239	pwq = get_work_pwq(work);
1240	if (pwq && pwq->pool == pool) {
1241		debug_work_deactivate(work);
1242
1243		/*
1244		 * A delayed work item cannot be grabbed directly because
1245		 * it might have linked NO_COLOR work items which, if left
1246		 * on the delayed_list, will confuse pwq->nr_active
1247		 * management later on and cause stall.  Make sure the work
1248		 * item is activated before grabbing.
1249		 */
1250		if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1251			pwq_activate_delayed_work(work);
1252
1253		list_del_init(&work->entry);
1254		pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
1255
1256		/* work->data points to pwq iff queued, point to pool */
1257		set_work_pool_and_keep_pending(work, pool->id);
1258
1259		spin_unlock(&pool->lock);
1260		return 1;
1261	}
1262	spin_unlock(&pool->lock);
1263fail:
1264	local_irq_restore(*flags);
1265	if (work_is_canceling(work))
1266		return -ENOENT;
1267	cpu_relax();
1268	return -EAGAIN;
1269}
1270
1271/**
1272 * insert_work - insert a work into a pool
1273 * @pwq: pwq @work belongs to
1274 * @work: work to insert
1275 * @head: insertion point
1276 * @extra_flags: extra WORK_STRUCT_* flags to set
1277 *
1278 * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1279 * work_struct flags.
1280 *
1281 * CONTEXT:
1282 * spin_lock_irq(pool->lock).
1283 */
1284static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1285			struct list_head *head, unsigned int extra_flags)
 
1286{
1287	struct worker_pool *pool = pwq->pool;
1288
1289	/* we own @work, set data and link */
1290	set_work_pwq(work, pwq, extra_flags);
 
 
 
 
 
 
 
1291	list_add_tail(&work->entry, head);
1292	get_pwq(pwq);
1293
1294	/*
1295	 * Ensure either wq_worker_sleeping() sees the above
1296	 * list_add_tail() or we see zero nr_running to avoid workers lying
1297	 * around lazily while there are works to be processed.
1298	 */
1299	smp_mb();
1300
1301	if (__need_more_worker(pool))
1302		wake_up_worker(pool);
1303}
1304
1305/*
1306 * Test whether @work is being queued from another work executing on the
1307 * same workqueue.
 
1308 */
1309static bool is_chained_work(struct workqueue_struct *wq)
1310{
1311	struct worker *worker;
 
1312
1313	worker = current_wq_worker();
1314	/*
1315	 * Return %true iff I'm a worker execuing a work item on @wq.  If
1316	 * I'm @worker, it's safe to dereference it without locking.
1317	 */
1318	return worker && worker->current_pwq->wq == wq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319}
1320
1321static void __queue_work(int cpu, struct workqueue_struct *wq,
1322			 struct work_struct *work)
1323{
1324	struct pool_workqueue *pwq;
1325	struct worker_pool *last_pool;
1326	struct list_head *worklist;
1327	unsigned int work_flags;
1328	unsigned int req_cpu = cpu;
1329
1330	/*
1331	 * While a work item is PENDING && off queue, a task trying to
1332	 * steal the PENDING will busy-loop waiting for it to either get
1333	 * queued or lose PENDING.  Grabbing PENDING and queueing should
1334	 * happen with IRQ disabled.
1335	 */
1336	WARN_ON_ONCE(!irqs_disabled());
1337
1338	debug_work_activate(work);
1339
1340	/* if draining, only works from the same workqueue are allowed */
1341	if (unlikely(wq->flags & __WQ_DRAINING) &&
1342	    WARN_ON_ONCE(!is_chained_work(wq)))
1343		return;
1344retry:
1345	if (req_cpu == WORK_CPU_UNBOUND)
1346		cpu = raw_smp_processor_id();
1347
1348	/* pwq which will be used unless @work is executing elsewhere */
1349	if (!(wq->flags & WQ_UNBOUND))
1350		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1351	else
1352		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1353
1354	/*
1355	 * If @work was previously on a different pool, it might still be
1356	 * running there, in which case the work needs to be queued on that
1357	 * pool to guarantee non-reentrancy.
1358	 */
1359	last_pool = get_work_pool(work);
1360	if (last_pool && last_pool != pwq->pool) {
1361		struct worker *worker;
1362
1363		spin_lock(&last_pool->lock);
 
1364
1365		worker = find_worker_executing_work(last_pool, work);
 
 
 
 
 
 
 
 
 
1366
1367		if (worker && worker->current_pwq->wq == wq) {
1368			pwq = worker->current_pwq;
1369		} else {
1370			/* meh... not running there, queue here */
1371			spin_unlock(&last_pool->lock);
1372			spin_lock(&pwq->pool->lock);
1373		}
1374	} else {
1375		spin_lock(&pwq->pool->lock);
1376	}
1377
1378	/*
1379	 * pwq is determined and locked.  For unbound pools, we could have
1380	 * raced with pwq release and it could already be dead.  If its
1381	 * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1382	 * without another pwq replacing it in the numa_pwq_tbl or while
1383	 * work items are executing on it, so the retrying is guaranteed to
1384	 * make forward-progress.
1385	 */
1386	if (unlikely(!pwq->refcnt)) {
1387		if (wq->flags & WQ_UNBOUND) {
1388			spin_unlock(&pwq->pool->lock);
1389			cpu_relax();
1390			goto retry;
1391		}
1392		/* oops */
1393		WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1394			  wq->name, cpu);
1395	}
1396
1397	/* pwq determined, queue */
1398	trace_workqueue_queue_work(req_cpu, pwq, work);
 
1399
1400	if (WARN_ON(!list_empty(&work->entry))) {
1401		spin_unlock(&pwq->pool->lock);
1402		return;
1403	}
1404
1405	pwq->nr_in_flight[pwq->work_color]++;
1406	work_flags = work_color_to_flags(pwq->work_color);
1407
1408	if (likely(pwq->nr_active < pwq->max_active)) {
1409		trace_workqueue_activate_work(work);
1410		pwq->nr_active++;
1411		worklist = &pwq->pool->worklist;
1412	} else {
1413		work_flags |= WORK_STRUCT_DELAYED;
1414		worklist = &pwq->delayed_works;
1415	}
1416
1417	insert_work(pwq, work, worklist, work_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1418
1419	spin_unlock(&pwq->pool->lock);
 
 
 
1420}
 
1421
1422/**
1423 * queue_work_on - queue work on specific cpu
1424 * @cpu: CPU number to execute work on
1425 * @wq: workqueue to use
1426 * @work: work to queue
1427 *
 
 
1428 * We queue the work to a specific CPU, the caller must ensure it
1429 * can't go away.
1430 *
1431 * Return: %false if @work was already on a queue, %true otherwise.
1432 */
1433bool queue_work_on(int cpu, struct workqueue_struct *wq,
1434		   struct work_struct *work)
1435{
1436	bool ret = false;
1437	unsigned long flags;
1438
1439	local_irq_save(flags);
1440
1441	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1442		__queue_work(cpu, wq, work);
1443		ret = true;
1444	}
1445
1446	local_irq_restore(flags);
1447	return ret;
1448}
1449EXPORT_SYMBOL(queue_work_on);
1450
1451void delayed_work_timer_fn(unsigned long __data)
1452{
1453	struct delayed_work *dwork = (struct delayed_work *)__data;
 
1454
1455	/* should have been called from irqsafe timer with irq already off */
1456	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
1457}
1458EXPORT_SYMBOL(delayed_work_timer_fn);
1459
1460static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1461				struct delayed_work *dwork, unsigned long delay)
 
 
 
 
 
 
 
 
1462{
1463	struct timer_list *timer = &dwork->timer;
1464	struct work_struct *work = &dwork->work;
1465
1466	WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1467		     timer->data != (unsigned long)dwork);
1468	WARN_ON_ONCE(timer_pending(timer));
1469	WARN_ON_ONCE(!list_empty(&work->entry));
1470
1471	/*
1472	 * If @delay is 0, queue @dwork->work immediately.  This is for
1473	 * both optimization and correctness.  The earliest @timer can
1474	 * expire is on the closest next tick and delayed_work users depend
1475	 * on that there's no such delay when @delay is 0.
1476	 */
1477	if (!delay) {
1478		__queue_work(cpu, wq, &dwork->work);
1479		return;
1480	}
1481
1482	timer_stats_timer_set_start_info(&dwork->timer);
1483
1484	dwork->wq = wq;
1485	dwork->cpu = cpu;
1486	timer->expires = jiffies + delay;
1487
1488	if (unlikely(cpu != WORK_CPU_UNBOUND))
1489		add_timer_on(timer, cpu);
1490	else
1491		add_timer(timer);
1492}
 
1493
1494/**
1495 * queue_delayed_work_on - queue work on specific CPU after delay
1496 * @cpu: CPU number to execute work on
1497 * @wq: workqueue to use
1498 * @dwork: work to queue
1499 * @delay: number of jiffies to wait before queueing
1500 *
1501 * Return: %false if @work was already on a queue, %true otherwise.  If
1502 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1503 * execution.
1504 */
1505bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1506			   struct delayed_work *dwork, unsigned long delay)
1507{
 
 
1508	struct work_struct *work = &dwork->work;
1509	bool ret = false;
1510	unsigned long flags;
1511
1512	/* read the comment in __queue_work() */
1513	local_irq_save(flags);
1514
1515	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1516		__queue_delayed_work(cpu, wq, dwork, delay);
1517		ret = true;
1518	}
1519
1520	local_irq_restore(flags);
1521	return ret;
1522}
1523EXPORT_SYMBOL(queue_delayed_work_on);
1524
1525/**
1526 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1527 * @cpu: CPU number to execute work on
1528 * @wq: workqueue to use
1529 * @dwork: work to queue
1530 * @delay: number of jiffies to wait before queueing
1531 *
1532 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1533 * modify @dwork's timer so that it expires after @delay.  If @delay is
1534 * zero, @work is guaranteed to be scheduled immediately regardless of its
1535 * current state.
1536 *
1537 * Return: %false if @dwork was idle and queued, %true if @dwork was
1538 * pending and its timer was modified.
1539 *
1540 * This function is safe to call from any context including IRQ handler.
1541 * See try_to_grab_pending() for details.
1542 */
1543bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1544			 struct delayed_work *dwork, unsigned long delay)
1545{
1546	unsigned long flags;
1547	int ret;
1548
1549	do {
1550		ret = try_to_grab_pending(&dwork->work, true, &flags);
1551	} while (unlikely(ret == -EAGAIN));
 
 
 
 
1552
1553	if (likely(ret >= 0)) {
1554		__queue_delayed_work(cpu, wq, dwork, delay);
1555		local_irq_restore(flags);
1556	}
 
 
1557
1558	/* -ENOENT from try_to_grab_pending() becomes %true */
 
 
 
 
 
 
 
 
 
 
 
1559	return ret;
1560}
1561EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1562
1563/**
1564 * worker_enter_idle - enter idle state
1565 * @worker: worker which is entering idle state
1566 *
1567 * @worker is entering idle state.  Update stats and idle timer if
1568 * necessary.
1569 *
1570 * LOCKING:
1571 * spin_lock_irq(pool->lock).
1572 */
1573static void worker_enter_idle(struct worker *worker)
1574{
1575	struct worker_pool *pool = worker->pool;
1576
1577	if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1578	    WARN_ON_ONCE(!list_empty(&worker->entry) &&
1579			 (worker->hentry.next || worker->hentry.pprev)))
1580		return;
1581
1582	/* can't use worker_set_flags(), also called from start_worker() */
1583	worker->flags |= WORKER_IDLE;
1584	pool->nr_idle++;
1585	worker->last_active = jiffies;
1586
1587	/* idle_list is LIFO */
1588	list_add(&worker->entry, &pool->idle_list);
1589
1590	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1591		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1592
1593	/*
1594	 * Sanity check nr_running.  Because wq_unbind_fn() releases
1595	 * pool->lock between setting %WORKER_UNBOUND and zapping
1596	 * nr_running, the warning may trigger spuriously.  Check iff
1597	 * unbind is not in progress.
1598	 */
1599	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1600		     pool->nr_workers == pool->nr_idle &&
1601		     atomic_read(&pool->nr_running));
1602}
1603
1604/**
1605 * worker_leave_idle - leave idle state
1606 * @worker: worker which is leaving idle state
1607 *
1608 * @worker is leaving idle state.  Update stats.
1609 *
1610 * LOCKING:
1611 * spin_lock_irq(pool->lock).
1612 */
1613static void worker_leave_idle(struct worker *worker)
1614{
1615	struct worker_pool *pool = worker->pool;
1616
1617	if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1618		return;
1619	worker_clr_flags(worker, WORKER_IDLE);
1620	pool->nr_idle--;
1621	list_del_init(&worker->entry);
1622}
1623
1624/**
1625 * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
1626 * @pool: target worker_pool
1627 *
1628 * Bind %current to the cpu of @pool if it is associated and lock @pool.
1629 *
1630 * Works which are scheduled while the cpu is online must at least be
1631 * scheduled to a worker which is bound to the cpu so that if they are
1632 * flushed from cpu callbacks while cpu is going down, they are
1633 * guaranteed to execute on the cpu.
1634 *
1635 * This function is to be used by unbound workers and rescuers to bind
1636 * themselves to the target cpu and may race with cpu going down or
1637 * coming online.  kthread_bind() can't be used because it may put the
1638 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1639 * verbatim as it's best effort and blocking and pool may be
1640 * [dis]associated in the meantime.
1641 *
1642 * This function tries set_cpus_allowed() and locks pool and verifies the
1643 * binding against %POOL_DISASSOCIATED which is set during
1644 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
1645 * enters idle state or fetches works without dropping lock, it can
1646 * guarantee the scheduling requirement described in the first paragraph.
1647 *
1648 * CONTEXT:
1649 * Might sleep.  Called without any lock but returns with pool->lock
1650 * held.
1651 *
1652 * Return:
1653 * %true if the associated pool is online (@worker is successfully
1654 * bound), %false if offline.
1655 */
1656static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
1657__acquires(&pool->lock)
1658{
 
 
 
1659	while (true) {
1660		/*
1661		 * The following call may fail, succeed or succeed
1662		 * without actually migrating the task to the cpu if
1663		 * it races with cpu hotunplug operation.  Verify
1664		 * against POOL_DISASSOCIATED.
1665		 */
1666		if (!(pool->flags & POOL_DISASSOCIATED))
1667			set_cpus_allowed_ptr(current, pool->attrs->cpumask);
1668
1669		spin_lock_irq(&pool->lock);
1670		if (pool->flags & POOL_DISASSOCIATED)
1671			return false;
1672		if (task_cpu(current) == pool->cpu &&
1673		    cpumask_equal(&current->cpus_allowed, pool->attrs->cpumask))
 
1674			return true;
1675		spin_unlock_irq(&pool->lock);
1676
1677		/*
1678		 * We've raced with CPU hot[un]plug.  Give it a breather
1679		 * and retry migration.  cond_resched() is required here;
1680		 * otherwise, we might deadlock against cpu_stop trying to
1681		 * bring down the CPU on non-preemptive kernel.
1682		 */
1683		cpu_relax();
1684		cond_resched();
1685	}
1686}
1687
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1688static struct worker *alloc_worker(void)
1689{
1690	struct worker *worker;
1691
1692	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1693	if (worker) {
1694		INIT_LIST_HEAD(&worker->entry);
1695		INIT_LIST_HEAD(&worker->scheduled);
 
1696		/* on creation a worker is in !idle && prep state */
1697		worker->flags = WORKER_PREP;
1698	}
1699	return worker;
1700}
1701
1702/**
1703 * create_worker - create a new workqueue worker
1704 * @pool: pool the new worker will belong to
 
1705 *
1706 * Create a new worker which is bound to @pool.  The returned worker
1707 * can be started by calling start_worker() or destroyed using
1708 * destroy_worker().
1709 *
1710 * CONTEXT:
1711 * Might sleep.  Does GFP_KERNEL allocations.
1712 *
1713 * Return:
1714 * Pointer to the newly created worker.
1715 */
1716static struct worker *create_worker(struct worker_pool *pool)
1717{
 
1718	struct worker *worker = NULL;
1719	int id = -1;
1720	char id_buf[16];
1721
1722	lockdep_assert_held(&pool->manager_mutex);
1723
1724	/*
1725	 * ID is needed to determine kthread name.  Allocate ID first
1726	 * without installing the pointer.
1727	 */
1728	idr_preload(GFP_KERNEL);
1729	spin_lock_irq(&pool->lock);
1730
1731	id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT);
1732
1733	spin_unlock_irq(&pool->lock);
1734	idr_preload_end();
1735	if (id < 0)
1736		goto fail;
1737
1738	worker = alloc_worker();
1739	if (!worker)
1740		goto fail;
1741
1742	worker->pool = pool;
1743	worker->id = id;
1744
1745	if (pool->cpu >= 0)
1746		snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1747			 pool->attrs->nice < 0  ? "H" : "");
 
 
1748	else
1749		snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1750
1751	worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1752					      "kworker/%s", id_buf);
1753	if (IS_ERR(worker->task))
1754		goto fail;
1755
1756	set_user_nice(worker->task, pool->attrs->nice);
1757
1758	/* prevent userland from meddling with cpumask of workqueue workers */
1759	worker->task->flags |= PF_NO_SETAFFINITY;
1760
1761	/*
1762	 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1763	 * online CPUs.  It'll be re-applied when any of the CPUs come up.
1764	 */
1765	set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1766
1767	/*
1768	 * The caller is responsible for ensuring %POOL_DISASSOCIATED
1769	 * remains stable across this function.  See the comments above the
1770	 * flag definition for details.
1771	 */
1772	if (pool->flags & POOL_DISASSOCIATED)
1773		worker->flags |= WORKER_UNBOUND;
1774
1775	/* successful, commit the pointer to idr */
1776	spin_lock_irq(&pool->lock);
1777	idr_replace(&pool->worker_idr, worker, worker->id);
1778	spin_unlock_irq(&pool->lock);
1779
1780	return worker;
1781
1782fail:
1783	if (id >= 0) {
1784		spin_lock_irq(&pool->lock);
1785		idr_remove(&pool->worker_idr, id);
1786		spin_unlock_irq(&pool->lock);
1787	}
1788	kfree(worker);
1789	return NULL;
1790}
1791
1792/**
1793 * start_worker - start a newly created worker
1794 * @worker: worker to start
1795 *
1796 * Make the pool aware of @worker and start it.
1797 *
1798 * CONTEXT:
1799 * spin_lock_irq(pool->lock).
1800 */
1801static void start_worker(struct worker *worker)
1802{
1803	worker->flags |= WORKER_STARTED;
1804	worker->pool->nr_workers++;
1805	worker_enter_idle(worker);
1806	wake_up_process(worker->task);
1807}
1808
1809/**
1810 * create_and_start_worker - create and start a worker for a pool
1811 * @pool: the target pool
1812 *
1813 * Grab the managership of @pool and create and start a new worker for it.
1814 *
1815 * Return: 0 on success. A negative error code otherwise.
1816 */
1817static int create_and_start_worker(struct worker_pool *pool)
1818{
1819	struct worker *worker;
1820
1821	mutex_lock(&pool->manager_mutex);
1822
1823	worker = create_worker(pool);
1824	if (worker) {
1825		spin_lock_irq(&pool->lock);
1826		start_worker(worker);
1827		spin_unlock_irq(&pool->lock);
1828	}
1829
1830	mutex_unlock(&pool->manager_mutex);
1831
1832	return worker ? 0 : -ENOMEM;
1833}
1834
1835/**
1836 * destroy_worker - destroy a workqueue worker
1837 * @worker: worker to be destroyed
1838 *
1839 * Destroy @worker and adjust @pool stats accordingly.
1840 *
1841 * CONTEXT:
1842 * spin_lock_irq(pool->lock) which is released and regrabbed.
1843 */
1844static void destroy_worker(struct worker *worker)
1845{
1846	struct worker_pool *pool = worker->pool;
1847
1848	lockdep_assert_held(&pool->manager_mutex);
1849	lockdep_assert_held(&pool->lock);
1850
1851	/* sanity check frenzy */
1852	if (WARN_ON(worker->current_work) ||
1853	    WARN_ON(!list_empty(&worker->scheduled)))
1854		return;
1855
1856	if (worker->flags & WORKER_STARTED)
1857		pool->nr_workers--;
1858	if (worker->flags & WORKER_IDLE)
1859		pool->nr_idle--;
1860
1861	/*
1862	 * Once WORKER_DIE is set, the kworker may destroy itself at any
1863	 * point.  Pin to ensure the task stays until we're done with it.
1864	 */
1865	get_task_struct(worker->task);
1866
1867	list_del_init(&worker->entry);
1868	worker->flags |= WORKER_DIE;
1869
1870	idr_remove(&pool->worker_idr, worker->id);
1871
1872	spin_unlock_irq(&pool->lock);
1873
1874	kthread_stop(worker->task);
1875	put_task_struct(worker->task);
1876	kfree(worker);
1877
1878	spin_lock_irq(&pool->lock);
 
1879}
1880
1881static void idle_worker_timeout(unsigned long __pool)
1882{
1883	struct worker_pool *pool = (void *)__pool;
1884
1885	spin_lock_irq(&pool->lock);
1886
1887	if (too_many_workers(pool)) {
1888		struct worker *worker;
1889		unsigned long expires;
1890
1891		/* idle_list is kept in LIFO order, check the last one */
1892		worker = list_entry(pool->idle_list.prev, struct worker, entry);
1893		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1894
1895		if (time_before(jiffies, expires))
1896			mod_timer(&pool->idle_timer, expires);
1897		else {
1898			/* it's been idle for too long, wake up manager */
1899			pool->flags |= POOL_MANAGE_WORKERS;
1900			wake_up_worker(pool);
1901		}
1902	}
1903
1904	spin_unlock_irq(&pool->lock);
1905}
1906
1907static void send_mayday(struct work_struct *work)
1908{
1909	struct pool_workqueue *pwq = get_work_pwq(work);
1910	struct workqueue_struct *wq = pwq->wq;
1911
1912	lockdep_assert_held(&wq_mayday_lock);
1913
1914	if (!wq->rescuer)
1915		return;
1916
1917	/* mayday mayday mayday */
1918	if (list_empty(&pwq->mayday_node)) {
1919		/*
1920		 * If @pwq is for an unbound wq, its base ref may be put at
1921		 * any time due to an attribute change.  Pin @pwq until the
1922		 * rescuer is done with it.
1923		 */
1924		get_pwq(pwq);
1925		list_add_tail(&pwq->mayday_node, &wq->maydays);
1926		wake_up_process(wq->rescuer->task);
1927	}
1928}
1929
1930static void pool_mayday_timeout(unsigned long __pool)
1931{
1932	struct worker_pool *pool = (void *)__pool;
1933	struct work_struct *work;
1934
1935	spin_lock_irq(&wq_mayday_lock);		/* for wq->maydays */
1936	spin_lock(&pool->lock);
1937
1938	if (need_to_create_worker(pool)) {
1939		/*
1940		 * We've been trying to create a new worker but
1941		 * haven't been successful.  We might be hitting an
1942		 * allocation deadlock.  Send distress signals to
1943		 * rescuers.
1944		 */
1945		list_for_each_entry(work, &pool->worklist, entry)
1946			send_mayday(work);
1947	}
1948
1949	spin_unlock(&pool->lock);
1950	spin_unlock_irq(&wq_mayday_lock);
1951
1952	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1953}
1954
1955/**
1956 * maybe_create_worker - create a new worker if necessary
1957 * @pool: pool to create a new worker for
1958 *
1959 * Create a new worker for @pool if necessary.  @pool is guaranteed to
1960 * have at least one idle worker on return from this function.  If
1961 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1962 * sent to all rescuers with works scheduled on @pool to resolve
1963 * possible allocation deadlock.
1964 *
1965 * On return, need_to_create_worker() is guaranteed to be %false and
1966 * may_start_working() %true.
1967 *
1968 * LOCKING:
1969 * spin_lock_irq(pool->lock) which may be released and regrabbed
1970 * multiple times.  Does GFP_KERNEL allocations.  Called only from
1971 * manager.
1972 *
1973 * Return:
1974 * %false if no action was taken and pool->lock stayed locked, %true
1975 * otherwise.
1976 */
1977static bool maybe_create_worker(struct worker_pool *pool)
1978__releases(&pool->lock)
1979__acquires(&pool->lock)
1980{
1981	if (!need_to_create_worker(pool))
1982		return false;
1983restart:
1984	spin_unlock_irq(&pool->lock);
1985
1986	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1987	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1988
1989	while (true) {
1990		struct worker *worker;
1991
1992		worker = create_worker(pool);
1993		if (worker) {
1994			del_timer_sync(&pool->mayday_timer);
1995			spin_lock_irq(&pool->lock);
1996			start_worker(worker);
1997			if (WARN_ON_ONCE(need_to_create_worker(pool)))
1998				goto restart;
1999			return true;
2000		}
2001
2002		if (!need_to_create_worker(pool))
2003			break;
2004
2005		__set_current_state(TASK_INTERRUPTIBLE);
2006		schedule_timeout(CREATE_COOLDOWN);
2007
2008		if (!need_to_create_worker(pool))
2009			break;
2010	}
2011
2012	del_timer_sync(&pool->mayday_timer);
2013	spin_lock_irq(&pool->lock);
2014	if (need_to_create_worker(pool))
2015		goto restart;
2016	return true;
2017}
2018
2019/**
2020 * maybe_destroy_worker - destroy workers which have been idle for a while
2021 * @pool: pool to destroy workers for
2022 *
2023 * Destroy @pool workers which have been idle for longer than
2024 * IDLE_WORKER_TIMEOUT.
2025 *
2026 * LOCKING:
2027 * spin_lock_irq(pool->lock) which may be released and regrabbed
2028 * multiple times.  Called only from manager.
2029 *
2030 * Return:
2031 * %false if no action was taken and pool->lock stayed locked, %true
2032 * otherwise.
2033 */
2034static bool maybe_destroy_workers(struct worker_pool *pool)
2035{
2036	bool ret = false;
2037
2038	while (too_many_workers(pool)) {
2039		struct worker *worker;
2040		unsigned long expires;
2041
2042		worker = list_entry(pool->idle_list.prev, struct worker, entry);
2043		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2044
2045		if (time_before(jiffies, expires)) {
2046			mod_timer(&pool->idle_timer, expires);
2047			break;
2048		}
2049
2050		destroy_worker(worker);
2051		ret = true;
2052	}
2053
2054	return ret;
2055}
2056
2057/**
2058 * manage_workers - manage worker pool
2059 * @worker: self
2060 *
2061 * Assume the manager role and manage the worker pool @worker belongs
2062 * to.  At any given time, there can be only zero or one manager per
2063 * pool.  The exclusion is handled automatically by this function.
2064 *
2065 * The caller can safely start processing works on false return.  On
2066 * true return, it's guaranteed that need_to_create_worker() is false
2067 * and may_start_working() is true.
2068 *
2069 * CONTEXT:
2070 * spin_lock_irq(pool->lock) which may be released and regrabbed
2071 * multiple times.  Does GFP_KERNEL allocations.
2072 *
2073 * Return:
2074 * %false if the pool don't need management and the caller can safely start
2075 * processing works, %true indicates that the function released pool->lock
2076 * and reacquired it to perform some management function and that the
2077 * conditions that the caller verified while holding the lock before
2078 * calling the function might no longer be true.
2079 */
2080static bool manage_workers(struct worker *worker)
2081{
2082	struct worker_pool *pool = worker->pool;
2083	bool ret = false;
2084
2085	/*
2086	 * Managership is governed by two mutexes - manager_arb and
2087	 * manager_mutex.  manager_arb handles arbitration of manager role.
2088	 * Anyone who successfully grabs manager_arb wins the arbitration
2089	 * and becomes the manager.  mutex_trylock() on pool->manager_arb
2090	 * failure while holding pool->lock reliably indicates that someone
2091	 * else is managing the pool and the worker which failed trylock
2092	 * can proceed to executing work items.  This means that anyone
2093	 * grabbing manager_arb is responsible for actually performing
2094	 * manager duties.  If manager_arb is grabbed and released without
2095	 * actual management, the pool may stall indefinitely.
2096	 *
2097	 * manager_mutex is used for exclusion of actual management
2098	 * operations.  The holder of manager_mutex can be sure that none
2099	 * of management operations, including creation and destruction of
2100	 * workers, won't take place until the mutex is released.  Because
2101	 * manager_mutex doesn't interfere with manager role arbitration,
2102	 * it is guaranteed that the pool's management, while may be
2103	 * delayed, won't be disturbed by someone else grabbing
2104	 * manager_mutex.
2105	 */
2106	if (!mutex_trylock(&pool->manager_arb))
2107		return ret;
2108
 
 
 
2109	/*
2110	 * With manager arbitration won, manager_mutex would be free in
2111	 * most cases.  trylock first without dropping @pool->lock.
2112	 */
2113	if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
2114		spin_unlock_irq(&pool->lock);
2115		mutex_lock(&pool->manager_mutex);
2116		spin_lock_irq(&pool->lock);
2117		ret = true;
2118	}
2119
2120	pool->flags &= ~POOL_MANAGE_WORKERS;
2121
2122	/*
2123	 * Destroy and then create so that may_start_working() is true
2124	 * on return.
2125	 */
2126	ret |= maybe_destroy_workers(pool);
2127	ret |= maybe_create_worker(pool);
2128
2129	mutex_unlock(&pool->manager_mutex);
2130	mutex_unlock(&pool->manager_arb);
2131	return ret;
2132}
2133
2134/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2135 * process_one_work - process single work
2136 * @worker: self
2137 * @work: work to process
2138 *
2139 * Process @work.  This function contains all the logics necessary to
2140 * process a single work including synchronization against and
2141 * interaction with other workers on the same cpu, queueing and
2142 * flushing.  As long as context requirement is met, any worker can
2143 * call this function to process a work.
2144 *
2145 * CONTEXT:
2146 * spin_lock_irq(pool->lock) which is released and regrabbed.
2147 */
2148static void process_one_work(struct worker *worker, struct work_struct *work)
2149__releases(&pool->lock)
2150__acquires(&pool->lock)
2151{
2152	struct pool_workqueue *pwq = get_work_pwq(work);
2153	struct worker_pool *pool = worker->pool;
2154	bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
 
 
2155	int work_color;
2156	struct worker *collision;
2157#ifdef CONFIG_LOCKDEP
2158	/*
2159	 * It is permissible to free the struct work_struct from
2160	 * inside the function that is called from it, this we need to
2161	 * take into account for lockdep too.  To avoid bogus "held
2162	 * lock freed" warnings as well as problems when looking into
2163	 * work->lockdep_map, make a copy and use that here.
2164	 */
2165	struct lockdep_map lockdep_map;
2166
2167	lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2168#endif
2169	/*
2170	 * Ensure we're on the correct CPU.  DISASSOCIATED test is
2171	 * necessary to avoid spurious warnings from rescuers servicing the
2172	 * unbound or a disassociated pool.
2173	 */
2174	WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
2175		     !(pool->flags & POOL_DISASSOCIATED) &&
2176		     raw_smp_processor_id() != pool->cpu);
2177
2178	/*
2179	 * A single work shouldn't be executed concurrently by
2180	 * multiple workers on a single cpu.  Check whether anyone is
2181	 * already processing the work.  If so, defer the work to the
2182	 * currently executing one.
2183	 */
2184	collision = find_worker_executing_work(pool, work);
2185	if (unlikely(collision)) {
2186		move_linked_works(work, &collision->scheduled, NULL);
2187		return;
2188	}
2189
2190	/* claim and dequeue */
2191	debug_work_deactivate(work);
2192	hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2193	worker->current_work = work;
2194	worker->current_func = work->func;
2195	worker->current_pwq = pwq;
2196	work_color = get_work_color(work);
2197
 
 
2198	list_del_init(&work->entry);
2199
2200	/*
2201	 * CPU intensive works don't participate in concurrency
2202	 * management.  They're the scheduler's responsibility.
2203	 */
2204	if (unlikely(cpu_intensive))
2205		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
 
2206
2207	/*
2208	 * Unbound pool isn't concurrency managed and work items should be
2209	 * executed ASAP.  Wake up another worker if necessary.
2210	 */
2211	if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
2212		wake_up_worker(pool);
2213
2214	/*
2215	 * Record the last pool and clear PENDING which should be the last
2216	 * update to @work.  Also, do this inside @pool->lock so that
2217	 * PENDING and queued state changes happen together while IRQ is
2218	 * disabled.
2219	 */
2220	set_work_pool_and_clear_pending(work, pool->id);
 
2221
2222	spin_unlock_irq(&pool->lock);
2223
2224	lock_map_acquire_read(&pwq->wq->lockdep_map);
 
2225	lock_map_acquire(&lockdep_map);
2226	trace_workqueue_execute_start(work);
2227	worker->current_func(work);
2228	/*
2229	 * While we must be careful to not use "work" after this, the trace
2230	 * point will only record its address.
2231	 */
2232	trace_workqueue_execute_end(work);
2233	lock_map_release(&lockdep_map);
2234	lock_map_release(&pwq->wq->lockdep_map);
2235
2236	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2237		pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2238		       "     last function: %pf\n",
2239		       current->comm, preempt_count(), task_pid_nr(current),
2240		       worker->current_func);
 
2241		debug_show_held_locks(current);
2242		dump_stack();
2243	}
2244
2245	/*
2246	 * The following prevents a kworker from hogging CPU on !PREEMPT
2247	 * kernels, where a requeueing work item waiting for something to
2248	 * happen could deadlock with stop_machine as such work item could
2249	 * indefinitely requeue itself while all other CPUs are trapped in
2250	 * stop_machine.
2251	 */
2252	cond_resched();
2253
2254	spin_lock_irq(&pool->lock);
2255
2256	/* clear cpu intensive status */
2257	if (unlikely(cpu_intensive))
2258		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2259
2260	/* we're done with it, release */
2261	hash_del(&worker->hentry);
2262	worker->current_work = NULL;
2263	worker->current_func = NULL;
2264	worker->current_pwq = NULL;
2265	worker->desc_valid = false;
2266	pwq_dec_nr_in_flight(pwq, work_color);
2267}
2268
2269/**
2270 * process_scheduled_works - process scheduled works
2271 * @worker: self
2272 *
2273 * Process all scheduled works.  Please note that the scheduled list
2274 * may change while processing a work, so this function repeatedly
2275 * fetches a work from the top and executes it.
2276 *
2277 * CONTEXT:
2278 * spin_lock_irq(pool->lock) which may be released and regrabbed
2279 * multiple times.
2280 */
2281static void process_scheduled_works(struct worker *worker)
2282{
2283	while (!list_empty(&worker->scheduled)) {
2284		struct work_struct *work = list_first_entry(&worker->scheduled,
2285						struct work_struct, entry);
2286		process_one_work(worker, work);
2287	}
2288}
2289
2290/**
2291 * worker_thread - the worker thread function
2292 * @__worker: self
2293 *
2294 * The worker thread function.  All workers belong to a worker_pool -
2295 * either a per-cpu one or dynamic unbound one.  These workers process all
2296 * work items regardless of their specific target workqueue.  The only
2297 * exception is work items which belong to workqueues with a rescuer which
2298 * will be explained in rescuer_thread().
2299 *
2300 * Return: 0
2301 */
2302static int worker_thread(void *__worker)
2303{
2304	struct worker *worker = __worker;
2305	struct worker_pool *pool = worker->pool;
2306
2307	/* tell the scheduler that this is a workqueue worker */
2308	worker->task->flags |= PF_WQ_WORKER;
2309woke_up:
2310	spin_lock_irq(&pool->lock);
2311
2312	/* am I supposed to die? */
2313	if (unlikely(worker->flags & WORKER_DIE)) {
2314		spin_unlock_irq(&pool->lock);
2315		WARN_ON_ONCE(!list_empty(&worker->entry));
2316		worker->task->flags &= ~PF_WQ_WORKER;
2317		return 0;
2318	}
2319
2320	worker_leave_idle(worker);
2321recheck:
2322	/* no more worker necessary? */
2323	if (!need_more_worker(pool))
2324		goto sleep;
2325
2326	/* do we need to manage? */
2327	if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2328		goto recheck;
2329
2330	/*
2331	 * ->scheduled list can only be filled while a worker is
2332	 * preparing to process a work or actually processing it.
2333	 * Make sure nobody diddled with it while I was sleeping.
2334	 */
2335	WARN_ON_ONCE(!list_empty(&worker->scheduled));
2336
2337	/*
2338	 * Finish PREP stage.  We're guaranteed to have at least one idle
2339	 * worker or that someone else has already assumed the manager
2340	 * role.  This is where @worker starts participating in concurrency
2341	 * management if applicable and concurrency management is restored
2342	 * after being rebound.  See rebind_workers() for details.
2343	 */
2344	worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2345
2346	do {
2347		struct work_struct *work =
2348			list_first_entry(&pool->worklist,
2349					 struct work_struct, entry);
2350
2351		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2352			/* optimization path, not strictly necessary */
2353			process_one_work(worker, work);
2354			if (unlikely(!list_empty(&worker->scheduled)))
2355				process_scheduled_works(worker);
2356		} else {
2357			move_linked_works(work, &worker->scheduled, NULL);
2358			process_scheduled_works(worker);
2359		}
2360	} while (keep_working(pool));
2361
2362	worker_set_flags(worker, WORKER_PREP, false);
2363sleep:
2364	if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
2365		goto recheck;
2366
2367	/*
2368	 * pool->lock is held and there's no work to process and no need to
2369	 * manage, sleep.  Workers are woken up only while holding
2370	 * pool->lock or from local cpu, so setting the current state
2371	 * before releasing pool->lock is enough to prevent losing any
2372	 * event.
2373	 */
2374	worker_enter_idle(worker);
2375	__set_current_state(TASK_INTERRUPTIBLE);
2376	spin_unlock_irq(&pool->lock);
2377	schedule();
2378	goto woke_up;
2379}
2380
2381/**
2382 * rescuer_thread - the rescuer thread function
2383 * @__rescuer: self
2384 *
2385 * Workqueue rescuer thread function.  There's one rescuer for each
2386 * workqueue which has WQ_MEM_RECLAIM set.
2387 *
2388 * Regular work processing on a pool may block trying to create a new
2389 * worker which uses GFP_KERNEL allocation which has slight chance of
2390 * developing into deadlock if some works currently on the same queue
2391 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2392 * the problem rescuer solves.
2393 *
2394 * When such condition is possible, the pool summons rescuers of all
2395 * workqueues which have works queued on the pool and let them process
2396 * those works so that forward progress can be guaranteed.
2397 *
2398 * This should happen rarely.
2399 *
2400 * Return: 0
2401 */
2402static int rescuer_thread(void *__rescuer)
2403{
2404	struct worker *rescuer = __rescuer;
2405	struct workqueue_struct *wq = rescuer->rescue_wq;
2406	struct list_head *scheduled = &rescuer->scheduled;
2407	bool should_stop;
 
2408
2409	set_user_nice(current, RESCUER_NICE_LEVEL);
2410
2411	/*
2412	 * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2413	 * doesn't participate in concurrency management.
2414	 */
2415	rescuer->task->flags |= PF_WQ_WORKER;
2416repeat:
2417	set_current_state(TASK_INTERRUPTIBLE);
2418
 
 
 
2419	/*
2420	 * By the time the rescuer is requested to stop, the workqueue
2421	 * shouldn't have any work pending, but @wq->maydays may still have
2422	 * pwq(s) queued.  This can happen by non-rescuer workers consuming
2423	 * all the work items before the rescuer got to them.  Go through
2424	 * @wq->maydays processing before acting on should_stop so that the
2425	 * list is always empty on exit.
2426	 */
2427	should_stop = kthread_should_stop();
2428
2429	/* see whether any pwq is asking for help */
2430	spin_lock_irq(&wq_mayday_lock);
2431
2432	while (!list_empty(&wq->maydays)) {
2433		struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2434					struct pool_workqueue, mayday_node);
2435		struct worker_pool *pool = pwq->pool;
2436		struct work_struct *work, *n;
2437
2438		__set_current_state(TASK_RUNNING);
2439		list_del_init(&pwq->mayday_node);
2440
2441		spin_unlock_irq(&wq_mayday_lock);
2442
2443		/* migrate to the target cpu if possible */
2444		worker_maybe_bind_and_lock(pool);
2445		rescuer->pool = pool;
2446
2447		/*
2448		 * Slurp in all works issued via this workqueue and
2449		 * process'em.
2450		 */
2451		WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
2452		list_for_each_entry_safe(work, n, &pool->worklist, entry)
2453			if (get_work_pwq(work) == pwq)
2454				move_linked_works(work, scheduled, &n);
2455
2456		process_scheduled_works(rescuer);
2457
2458		/*
2459		 * Put the reference grabbed by send_mayday().  @pool won't
2460		 * go away while we're holding its lock.
2461		 */
2462		put_pwq(pwq);
2463
2464		/*
2465		 * Leave this pool.  If keep_working() is %true, notify a
2466		 * regular worker; otherwise, we end up with 0 concurrency
2467		 * and stalling the execution.
2468		 */
2469		if (keep_working(pool))
2470			wake_up_worker(pool);
2471
2472		rescuer->pool = NULL;
2473		spin_unlock(&pool->lock);
2474		spin_lock(&wq_mayday_lock);
2475	}
2476
2477	spin_unlock_irq(&wq_mayday_lock);
2478
2479	if (should_stop) {
2480		__set_current_state(TASK_RUNNING);
2481		rescuer->task->flags &= ~PF_WQ_WORKER;
2482		return 0;
2483	}
2484
2485	/* rescuers should never participate in concurrency management */
2486	WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2487	schedule();
2488	goto repeat;
2489}
2490
2491struct wq_barrier {
2492	struct work_struct	work;
2493	struct completion	done;
2494};
2495
2496static void wq_barrier_func(struct work_struct *work)
2497{
2498	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2499	complete(&barr->done);
2500}
2501
2502/**
2503 * insert_wq_barrier - insert a barrier work
2504 * @pwq: pwq to insert barrier into
2505 * @barr: wq_barrier to insert
2506 * @target: target work to attach @barr to
2507 * @worker: worker currently executing @target, NULL if @target is not executing
2508 *
2509 * @barr is linked to @target such that @barr is completed only after
2510 * @target finishes execution.  Please note that the ordering
2511 * guarantee is observed only with respect to @target and on the local
2512 * cpu.
2513 *
2514 * Currently, a queued barrier can't be canceled.  This is because
2515 * try_to_grab_pending() can't determine whether the work to be
2516 * grabbed is at the head of the queue and thus can't clear LINKED
2517 * flag of the previous work while there must be a valid next work
2518 * after a work with LINKED flag set.
2519 *
2520 * Note that when @worker is non-NULL, @target may be modified
2521 * underneath us, so we can't reliably determine pwq from @target.
2522 *
2523 * CONTEXT:
2524 * spin_lock_irq(pool->lock).
2525 */
2526static void insert_wq_barrier(struct pool_workqueue *pwq,
2527			      struct wq_barrier *barr,
2528			      struct work_struct *target, struct worker *worker)
2529{
2530	struct list_head *head;
2531	unsigned int linked = 0;
2532
2533	/*
2534	 * debugobject calls are safe here even with pool->lock locked
2535	 * as we know for sure that this will not trigger any of the
2536	 * checks and call back into the fixup functions where we
2537	 * might deadlock.
2538	 */
2539	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2540	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2541	init_completion(&barr->done);
2542
2543	/*
2544	 * If @target is currently being executed, schedule the
2545	 * barrier to the worker; otherwise, put it after @target.
2546	 */
2547	if (worker)
2548		head = worker->scheduled.next;
2549	else {
2550		unsigned long *bits = work_data_bits(target);
2551
2552		head = target->entry.next;
2553		/* there can already be other linked works, inherit and set */
2554		linked = *bits & WORK_STRUCT_LINKED;
2555		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2556	}
2557
2558	debug_work_activate(&barr->work);
2559	insert_work(pwq, &barr->work, head,
2560		    work_color_to_flags(WORK_NO_COLOR) | linked);
2561}
2562
2563/**
2564 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2565 * @wq: workqueue being flushed
2566 * @flush_color: new flush color, < 0 for no-op
2567 * @work_color: new work color, < 0 for no-op
2568 *
2569 * Prepare pwqs for workqueue flushing.
2570 *
2571 * If @flush_color is non-negative, flush_color on all pwqs should be
2572 * -1.  If no pwq has in-flight commands at the specified color, all
2573 * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2574 * has in flight commands, its pwq->flush_color is set to
2575 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2576 * wakeup logic is armed and %true is returned.
2577 *
2578 * The caller should have initialized @wq->first_flusher prior to
2579 * calling this function with non-negative @flush_color.  If
2580 * @flush_color is negative, no flush color update is done and %false
2581 * is returned.
2582 *
2583 * If @work_color is non-negative, all pwqs should have the same
2584 * work_color which is previous to @work_color and all will be
2585 * advanced to @work_color.
2586 *
2587 * CONTEXT:
2588 * mutex_lock(wq->mutex).
2589 *
2590 * Return:
2591 * %true if @flush_color >= 0 and there's something to flush.  %false
2592 * otherwise.
2593 */
2594static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2595				      int flush_color, int work_color)
2596{
2597	bool wait = false;
2598	struct pool_workqueue *pwq;
2599
2600	if (flush_color >= 0) {
2601		WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2602		atomic_set(&wq->nr_pwqs_to_flush, 1);
2603	}
2604
2605	for_each_pwq(pwq, wq) {
2606		struct worker_pool *pool = pwq->pool;
 
2607
2608		spin_lock_irq(&pool->lock);
2609
2610		if (flush_color >= 0) {
2611			WARN_ON_ONCE(pwq->flush_color != -1);
2612
2613			if (pwq->nr_in_flight[flush_color]) {
2614				pwq->flush_color = flush_color;
2615				atomic_inc(&wq->nr_pwqs_to_flush);
2616				wait = true;
2617			}
2618		}
2619
2620		if (work_color >= 0) {
2621			WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2622			pwq->work_color = work_color;
2623		}
2624
2625		spin_unlock_irq(&pool->lock);
2626	}
2627
2628	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2629		complete(&wq->first_flusher->done);
2630
2631	return wait;
2632}
2633
2634/**
2635 * flush_workqueue - ensure that any scheduled work has run to completion.
2636 * @wq: workqueue to flush
2637 *
2638 * This function sleeps until all work items which were queued on entry
2639 * have finished execution, but it is not livelocked by new incoming ones.
 
 
 
2640 */
2641void flush_workqueue(struct workqueue_struct *wq)
2642{
2643	struct wq_flusher this_flusher = {
2644		.list = LIST_HEAD_INIT(this_flusher.list),
2645		.flush_color = -1,
2646		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2647	};
2648	int next_color;
2649
2650	lock_map_acquire(&wq->lockdep_map);
2651	lock_map_release(&wq->lockdep_map);
2652
2653	mutex_lock(&wq->mutex);
2654
2655	/*
2656	 * Start-to-wait phase
2657	 */
2658	next_color = work_next_color(wq->work_color);
2659
2660	if (next_color != wq->flush_color) {
2661		/*
2662		 * Color space is not full.  The current work_color
2663		 * becomes our flush_color and work_color is advanced
2664		 * by one.
2665		 */
2666		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2667		this_flusher.flush_color = wq->work_color;
2668		wq->work_color = next_color;
2669
2670		if (!wq->first_flusher) {
2671			/* no flush in progress, become the first flusher */
2672			WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2673
2674			wq->first_flusher = &this_flusher;
2675
2676			if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2677						       wq->work_color)) {
2678				/* nothing to flush, done */
2679				wq->flush_color = next_color;
2680				wq->first_flusher = NULL;
2681				goto out_unlock;
2682			}
2683		} else {
2684			/* wait in queue */
2685			WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2686			list_add_tail(&this_flusher.list, &wq->flusher_queue);
2687			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2688		}
2689	} else {
2690		/*
2691		 * Oops, color space is full, wait on overflow queue.
2692		 * The next flush completion will assign us
2693		 * flush_color and transfer to flusher_queue.
2694		 */
2695		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2696	}
2697
2698	mutex_unlock(&wq->mutex);
2699
2700	wait_for_completion(&this_flusher.done);
2701
2702	/*
2703	 * Wake-up-and-cascade phase
2704	 *
2705	 * First flushers are responsible for cascading flushes and
2706	 * handling overflow.  Non-first flushers can simply return.
2707	 */
2708	if (wq->first_flusher != &this_flusher)
2709		return;
2710
2711	mutex_lock(&wq->mutex);
2712
2713	/* we might have raced, check again with mutex held */
2714	if (wq->first_flusher != &this_flusher)
2715		goto out_unlock;
2716
2717	wq->first_flusher = NULL;
2718
2719	WARN_ON_ONCE(!list_empty(&this_flusher.list));
2720	WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2721
2722	while (true) {
2723		struct wq_flusher *next, *tmp;
2724
2725		/* complete all the flushers sharing the current flush color */
2726		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2727			if (next->flush_color != wq->flush_color)
2728				break;
2729			list_del_init(&next->list);
2730			complete(&next->done);
2731		}
2732
2733		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2734			     wq->flush_color != work_next_color(wq->work_color));
2735
2736		/* this flush_color is finished, advance by one */
2737		wq->flush_color = work_next_color(wq->flush_color);
2738
2739		/* one color has been freed, handle overflow queue */
2740		if (!list_empty(&wq->flusher_overflow)) {
2741			/*
2742			 * Assign the same color to all overflowed
2743			 * flushers, advance work_color and append to
2744			 * flusher_queue.  This is the start-to-wait
2745			 * phase for these overflowed flushers.
2746			 */
2747			list_for_each_entry(tmp, &wq->flusher_overflow, list)
2748				tmp->flush_color = wq->work_color;
2749
2750			wq->work_color = work_next_color(wq->work_color);
2751
2752			list_splice_tail_init(&wq->flusher_overflow,
2753					      &wq->flusher_queue);
2754			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2755		}
2756
2757		if (list_empty(&wq->flusher_queue)) {
2758			WARN_ON_ONCE(wq->flush_color != wq->work_color);
2759			break;
2760		}
2761
2762		/*
2763		 * Need to flush more colors.  Make the next flusher
2764		 * the new first flusher and arm pwqs.
2765		 */
2766		WARN_ON_ONCE(wq->flush_color == wq->work_color);
2767		WARN_ON_ONCE(wq->flush_color != next->flush_color);
2768
2769		list_del_init(&next->list);
2770		wq->first_flusher = next;
2771
2772		if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2773			break;
2774
2775		/*
2776		 * Meh... this color is already done, clear first
2777		 * flusher and repeat cascading.
2778		 */
2779		wq->first_flusher = NULL;
2780	}
2781
2782out_unlock:
2783	mutex_unlock(&wq->mutex);
2784}
2785EXPORT_SYMBOL_GPL(flush_workqueue);
2786
2787/**
2788 * drain_workqueue - drain a workqueue
2789 * @wq: workqueue to drain
2790 *
2791 * Wait until the workqueue becomes empty.  While draining is in progress,
2792 * only chain queueing is allowed.  IOW, only currently pending or running
2793 * work items on @wq can queue further work items on it.  @wq is flushed
2794 * repeatedly until it becomes empty.  The number of flushing is detemined
2795 * by the depth of chaining and should be relatively short.  Whine if it
2796 * takes too long.
2797 */
2798void drain_workqueue(struct workqueue_struct *wq)
2799{
2800	unsigned int flush_cnt = 0;
2801	struct pool_workqueue *pwq;
2802
2803	/*
2804	 * __queue_work() needs to test whether there are drainers, is much
2805	 * hotter than drain_workqueue() and already looks at @wq->flags.
2806	 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2807	 */
2808	mutex_lock(&wq->mutex);
2809	if (!wq->nr_drainers++)
2810		wq->flags |= __WQ_DRAINING;
2811	mutex_unlock(&wq->mutex);
2812reflush:
2813	flush_workqueue(wq);
2814
2815	mutex_lock(&wq->mutex);
2816
2817	for_each_pwq(pwq, wq) {
2818		bool drained;
2819
2820		spin_lock_irq(&pwq->pool->lock);
2821		drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2822		spin_unlock_irq(&pwq->pool->lock);
2823
2824		if (drained)
2825			continue;
2826
2827		if (++flush_cnt == 10 ||
2828		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2829			pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2830				wq->name, flush_cnt);
2831
2832		mutex_unlock(&wq->mutex);
2833		goto reflush;
2834	}
2835
 
2836	if (!--wq->nr_drainers)
2837		wq->flags &= ~__WQ_DRAINING;
2838	mutex_unlock(&wq->mutex);
2839}
2840EXPORT_SYMBOL_GPL(drain_workqueue);
2841
2842static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
 
2843{
2844	struct worker *worker = NULL;
2845	struct worker_pool *pool;
2846	struct pool_workqueue *pwq;
2847
2848	might_sleep();
2849
2850	local_irq_disable();
2851	pool = get_work_pool(work);
2852	if (!pool) {
2853		local_irq_enable();
2854		return false;
2855	}
2856
2857	spin_lock(&pool->lock);
2858	/* see the comment in try_to_grab_pending() with the same code */
2859	pwq = get_work_pwq(work);
2860	if (pwq) {
2861		if (unlikely(pwq->pool != pool))
 
 
 
 
 
2862			goto already_gone;
2863	} else {
2864		worker = find_worker_executing_work(pool, work);
2865		if (!worker)
2866			goto already_gone;
2867		pwq = worker->current_pwq;
2868	}
 
2869
2870	insert_wq_barrier(pwq, barr, work, worker);
2871	spin_unlock_irq(&pool->lock);
2872
2873	/*
2874	 * If @max_active is 1 or rescuer is in use, flushing another work
2875	 * item on the same workqueue may lead to deadlock.  Make sure the
2876	 * flusher is not running on the same workqueue by verifying write
2877	 * access.
2878	 */
2879	if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
2880		lock_map_acquire(&pwq->wq->lockdep_map);
2881	else
2882		lock_map_acquire_read(&pwq->wq->lockdep_map);
2883	lock_map_release(&pwq->wq->lockdep_map);
2884
2885	return true;
2886already_gone:
2887	spin_unlock_irq(&pool->lock);
2888	return false;
2889}
2890
2891/**
2892 * flush_work - wait for a work to finish executing the last queueing instance
2893 * @work: the work to flush
2894 *
2895 * Wait until @work has finished execution.  @work is guaranteed to be idle
2896 * on return if it hasn't been requeued since flush started.
 
 
 
 
 
 
 
2897 *
2898 * Return:
2899 * %true if flush_work() waited for the work to finish execution,
2900 * %false if it was already idle.
2901 */
2902bool flush_work(struct work_struct *work)
2903{
2904	struct wq_barrier barr;
2905
2906	lock_map_acquire(&work->lockdep_map);
2907	lock_map_release(&work->lockdep_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2908
2909	if (start_flush_work(work, &barr)) {
 
 
2910		wait_for_completion(&barr.done);
2911		destroy_work_on_stack(&barr.work);
2912		return true;
2913	} else {
2914		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2915	}
 
 
 
2916}
2917EXPORT_SYMBOL_GPL(flush_work);
2918
2919static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
 
2920{
2921	unsigned long flags;
2922	int ret;
2923
2924	do {
2925		ret = try_to_grab_pending(work, is_dwork, &flags);
2926		/*
2927		 * If someone else is canceling, wait for the same event it
2928		 * would be waiting for before retrying.
2929		 */
2930		if (unlikely(ret == -ENOENT))
2931			flush_work(work);
2932	} while (unlikely(ret < 0));
2933
2934	/* tell other tasks trying to grab @work to back off */
2935	mark_work_canceling(work);
2936	local_irq_restore(flags);
2937
2938	flush_work(work);
2939	clear_work_data(work);
2940	return ret;
2941}
2942
2943/**
2944 * cancel_work_sync - cancel a work and wait for it to finish
2945 * @work: the work to cancel
2946 *
2947 * Cancel @work and wait for its execution to finish.  This function
2948 * can be used even if the work re-queues itself or migrates to
2949 * another workqueue.  On return from this function, @work is
2950 * guaranteed to be not pending or executing on any CPU.
2951 *
2952 * cancel_work_sync(&delayed_work->work) must not be used for
2953 * delayed_work's.  Use cancel_delayed_work_sync() instead.
2954 *
2955 * The caller must ensure that the workqueue on which @work was last
2956 * queued can't be destroyed before this function returns.
2957 *
2958 * Return:
2959 * %true if @work was pending, %false otherwise.
2960 */
2961bool cancel_work_sync(struct work_struct *work)
2962{
2963	return __cancel_work_timer(work, false);
2964}
2965EXPORT_SYMBOL_GPL(cancel_work_sync);
2966
2967/**
2968 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2969 * @dwork: the delayed work to flush
2970 *
2971 * Delayed timer is cancelled and the pending work is queued for
2972 * immediate execution.  Like flush_work(), this function only
2973 * considers the last queueing instance of @dwork.
2974 *
2975 * Return:
2976 * %true if flush_work() waited for the work to finish execution,
2977 * %false if it was already idle.
2978 */
2979bool flush_delayed_work(struct delayed_work *dwork)
2980{
2981	local_irq_disable();
2982	if (del_timer_sync(&dwork->timer))
2983		__queue_work(dwork->cpu, dwork->wq, &dwork->work);
2984	local_irq_enable();
2985	return flush_work(&dwork->work);
2986}
2987EXPORT_SYMBOL(flush_delayed_work);
2988
2989/**
2990 * cancel_delayed_work - cancel a delayed work
2991 * @dwork: delayed_work to cancel
2992 *
2993 * Kill off a pending delayed_work.
2994 *
2995 * Return: %true if @dwork was pending and canceled; %false if it wasn't
2996 * pending.
2997 *
2998 * Note:
2999 * The work callback function may still be running on return, unless
3000 * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3001 * use cancel_delayed_work_sync() to wait on it.
3002 *
3003 * This function is safe to call from any context including IRQ handler.
 
 
3004 */
3005bool cancel_delayed_work(struct delayed_work *dwork)
3006{
3007	unsigned long flags;
3008	int ret;
3009
3010	do {
3011		ret = try_to_grab_pending(&dwork->work, true, &flags);
3012	} while (unlikely(ret == -EAGAIN));
3013
3014	if (unlikely(ret < 0))
3015		return false;
3016
3017	set_work_pool_and_clear_pending(&dwork->work,
3018					get_work_pool_id(&dwork->work));
3019	local_irq_restore(flags);
3020	return ret;
3021}
3022EXPORT_SYMBOL(cancel_delayed_work);
3023
3024/**
3025 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3026 * @dwork: the delayed work cancel
3027 *
3028 * This is cancel_work_sync() for delayed works.
3029 *
3030 * Return:
3031 * %true if @dwork was pending, %false otherwise.
3032 */
3033bool cancel_delayed_work_sync(struct delayed_work *dwork)
3034{
3035	return __cancel_work_timer(&dwork->work, true);
3036}
3037EXPORT_SYMBOL(cancel_delayed_work_sync);
3038
3039/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3040 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3041 * @func: the function to call
3042 *
3043 * schedule_on_each_cpu() executes @func on each online CPU using the
3044 * system workqueue and blocks until all CPUs have completed.
3045 * schedule_on_each_cpu() is very slow.
3046 *
3047 * Return:
3048 * 0 on success, -errno on failure.
3049 */
3050int schedule_on_each_cpu(work_func_t func)
3051{
3052	int cpu;
3053	struct work_struct __percpu *works;
3054
3055	works = alloc_percpu(struct work_struct);
3056	if (!works)
3057		return -ENOMEM;
3058
3059	get_online_cpus();
3060
3061	for_each_online_cpu(cpu) {
3062		struct work_struct *work = per_cpu_ptr(works, cpu);
3063
3064		INIT_WORK(work, func);
3065		schedule_work_on(cpu, work);
3066	}
3067
3068	for_each_online_cpu(cpu)
3069		flush_work(per_cpu_ptr(works, cpu));
3070
3071	put_online_cpus();
3072	free_percpu(works);
3073	return 0;
3074}
3075
3076/**
3077 * flush_scheduled_work - ensure that any scheduled work has run to completion.
3078 *
3079 * Forces execution of the kernel-global workqueue and blocks until its
3080 * completion.
3081 *
3082 * Think twice before calling this function!  It's very easy to get into
3083 * trouble if you don't take great care.  Either of the following situations
3084 * will lead to deadlock:
3085 *
3086 *	One of the work items currently on the workqueue needs to acquire
3087 *	a lock held by your code or its caller.
3088 *
3089 *	Your code is running in the context of a work routine.
3090 *
3091 * They will be detected by lockdep when they occur, but the first might not
3092 * occur very often.  It depends on what work items are on the workqueue and
3093 * what locks they need, which you have no control over.
3094 *
3095 * In most situations flushing the entire workqueue is overkill; you merely
3096 * need to know that a particular work item isn't queued and isn't running.
3097 * In such cases you should use cancel_delayed_work_sync() or
3098 * cancel_work_sync() instead.
3099 */
3100void flush_scheduled_work(void)
3101{
3102	flush_workqueue(system_wq);
3103}
3104EXPORT_SYMBOL(flush_scheduled_work);
3105
3106/**
3107 * execute_in_process_context - reliably execute the routine with user context
3108 * @fn:		the function to execute
3109 * @ew:		guaranteed storage for the execute work structure (must
3110 *		be available when the work executes)
3111 *
3112 * Executes the function immediately if process context is available,
3113 * otherwise schedules the function for delayed execution.
3114 *
3115 * Return:	0 - function was executed
3116 *		1 - function was scheduled for execution
3117 */
3118int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3119{
3120	if (!in_interrupt()) {
3121		fn(&ew->work);
3122		return 0;
3123	}
3124
3125	INIT_WORK(&ew->work, fn);
3126	schedule_work(&ew->work);
3127
3128	return 1;
3129}
3130EXPORT_SYMBOL_GPL(execute_in_process_context);
3131
3132#ifdef CONFIG_SYSFS
3133/*
3134 * Workqueues with WQ_SYSFS flag set is visible to userland via
3135 * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
3136 * following attributes.
3137 *
3138 *  per_cpu	RO bool	: whether the workqueue is per-cpu or unbound
3139 *  max_active	RW int	: maximum number of in-flight work items
3140 *
3141 * Unbound workqueues have the following extra attributes.
3142 *
3143 *  id		RO int	: the associated pool ID
3144 *  nice	RW int	: nice value of the workers
3145 *  cpumask	RW mask	: bitmask of allowed CPUs for the workers
3146 */
3147struct wq_device {
3148	struct workqueue_struct		*wq;
3149	struct device			dev;
3150};
3151
3152static struct workqueue_struct *dev_to_wq(struct device *dev)
3153{
3154	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
3155
3156	return wq_dev->wq;
3157}
3158
3159static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
3160			    char *buf)
3161{
3162	struct workqueue_struct *wq = dev_to_wq(dev);
3163
3164	return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
3165}
3166static DEVICE_ATTR_RO(per_cpu);
3167
3168static ssize_t max_active_show(struct device *dev,
3169			       struct device_attribute *attr, char *buf)
3170{
3171	struct workqueue_struct *wq = dev_to_wq(dev);
3172
3173	return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
3174}
3175
3176static ssize_t max_active_store(struct device *dev,
3177				struct device_attribute *attr, const char *buf,
3178				size_t count)
3179{
3180	struct workqueue_struct *wq = dev_to_wq(dev);
3181	int val;
3182
3183	if (sscanf(buf, "%d", &val) != 1 || val <= 0)
3184		return -EINVAL;
3185
3186	workqueue_set_max_active(wq, val);
3187	return count;
3188}
3189static DEVICE_ATTR_RW(max_active);
3190
3191static struct attribute *wq_sysfs_attrs[] = {
3192	&dev_attr_per_cpu.attr,
3193	&dev_attr_max_active.attr,
3194	NULL,
3195};
3196ATTRIBUTE_GROUPS(wq_sysfs);
3197
3198static ssize_t wq_pool_ids_show(struct device *dev,
3199				struct device_attribute *attr, char *buf)
3200{
3201	struct workqueue_struct *wq = dev_to_wq(dev);
3202	const char *delim = "";
3203	int node, written = 0;
3204
3205	rcu_read_lock_sched();
3206	for_each_node(node) {
3207		written += scnprintf(buf + written, PAGE_SIZE - written,
3208				     "%s%d:%d", delim, node,
3209				     unbound_pwq_by_node(wq, node)->pool->id);
3210		delim = " ";
3211	}
3212	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
3213	rcu_read_unlock_sched();
3214
3215	return written;
3216}
3217
3218static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
3219			    char *buf)
3220{
3221	struct workqueue_struct *wq = dev_to_wq(dev);
3222	int written;
3223
3224	mutex_lock(&wq->mutex);
3225	written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
3226	mutex_unlock(&wq->mutex);
3227
3228	return written;
3229}
3230
3231/* prepare workqueue_attrs for sysfs store operations */
3232static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
3233{
3234	struct workqueue_attrs *attrs;
3235
3236	attrs = alloc_workqueue_attrs(GFP_KERNEL);
3237	if (!attrs)
3238		return NULL;
3239
3240	mutex_lock(&wq->mutex);
3241	copy_workqueue_attrs(attrs, wq->unbound_attrs);
3242	mutex_unlock(&wq->mutex);
3243	return attrs;
3244}
3245
3246static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
3247			     const char *buf, size_t count)
3248{
3249	struct workqueue_struct *wq = dev_to_wq(dev);
3250	struct workqueue_attrs *attrs;
3251	int ret;
3252
3253	attrs = wq_sysfs_prep_attrs(wq);
3254	if (!attrs)
3255		return -ENOMEM;
3256
3257	if (sscanf(buf, "%d", &attrs->nice) == 1 &&
3258	    attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
3259		ret = apply_workqueue_attrs(wq, attrs);
3260	else
3261		ret = -EINVAL;
3262
3263	free_workqueue_attrs(attrs);
3264	return ret ?: count;
3265}
3266
3267static ssize_t wq_cpumask_show(struct device *dev,
3268			       struct device_attribute *attr, char *buf)
3269{
3270	struct workqueue_struct *wq = dev_to_wq(dev);
3271	int written;
3272
3273	mutex_lock(&wq->mutex);
3274	written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask);
3275	mutex_unlock(&wq->mutex);
3276
3277	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
3278	return written;
3279}
3280
3281static ssize_t wq_cpumask_store(struct device *dev,
3282				struct device_attribute *attr,
3283				const char *buf, size_t count)
3284{
3285	struct workqueue_struct *wq = dev_to_wq(dev);
3286	struct workqueue_attrs *attrs;
3287	int ret;
3288
3289	attrs = wq_sysfs_prep_attrs(wq);
3290	if (!attrs)
3291		return -ENOMEM;
3292
3293	ret = cpumask_parse(buf, attrs->cpumask);
3294	if (!ret)
3295		ret = apply_workqueue_attrs(wq, attrs);
3296
3297	free_workqueue_attrs(attrs);
3298	return ret ?: count;
3299}
3300
3301static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
3302			    char *buf)
3303{
3304	struct workqueue_struct *wq = dev_to_wq(dev);
3305	int written;
3306
3307	mutex_lock(&wq->mutex);
3308	written = scnprintf(buf, PAGE_SIZE, "%d\n",
3309			    !wq->unbound_attrs->no_numa);
3310	mutex_unlock(&wq->mutex);
3311
3312	return written;
3313}
3314
3315static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
3316			     const char *buf, size_t count)
3317{
3318	struct workqueue_struct *wq = dev_to_wq(dev);
3319	struct workqueue_attrs *attrs;
3320	int v, ret;
3321
3322	attrs = wq_sysfs_prep_attrs(wq);
3323	if (!attrs)
3324		return -ENOMEM;
3325
3326	ret = -EINVAL;
3327	if (sscanf(buf, "%d", &v) == 1) {
3328		attrs->no_numa = !v;
3329		ret = apply_workqueue_attrs(wq, attrs);
3330	}
3331
3332	free_workqueue_attrs(attrs);
3333	return ret ?: count;
3334}
3335
3336static struct device_attribute wq_sysfs_unbound_attrs[] = {
3337	__ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
3338	__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
3339	__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
3340	__ATTR(numa, 0644, wq_numa_show, wq_numa_store),
3341	__ATTR_NULL,
3342};
3343
3344static struct bus_type wq_subsys = {
3345	.name				= "workqueue",
3346	.dev_groups			= wq_sysfs_groups,
3347};
3348
3349static int __init wq_sysfs_init(void)
3350{
3351	return subsys_virtual_register(&wq_subsys, NULL);
3352}
3353core_initcall(wq_sysfs_init);
3354
3355static void wq_device_release(struct device *dev)
3356{
3357	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
3358
3359	kfree(wq_dev);
3360}
3361
3362/**
3363 * workqueue_sysfs_register - make a workqueue visible in sysfs
3364 * @wq: the workqueue to register
3365 *
3366 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
3367 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
3368 * which is the preferred method.
3369 *
3370 * Workqueue user should use this function directly iff it wants to apply
3371 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
3372 * apply_workqueue_attrs() may race against userland updating the
3373 * attributes.
3374 *
3375 * Return: 0 on success, -errno on failure.
3376 */
3377int workqueue_sysfs_register(struct workqueue_struct *wq)
3378{
3379	struct wq_device *wq_dev;
3380	int ret;
3381
3382	/*
3383	 * Adjusting max_active or creating new pwqs by applyting
3384	 * attributes breaks ordering guarantee.  Disallow exposing ordered
3385	 * workqueues.
3386	 */
3387	if (WARN_ON(wq->flags & __WQ_ORDERED))
3388		return -EINVAL;
3389
3390	wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
3391	if (!wq_dev)
3392		return -ENOMEM;
3393
3394	wq_dev->wq = wq;
3395	wq_dev->dev.bus = &wq_subsys;
3396	wq_dev->dev.init_name = wq->name;
3397	wq_dev->dev.release = wq_device_release;
3398
3399	/*
3400	 * unbound_attrs are created separately.  Suppress uevent until
3401	 * everything is ready.
3402	 */
3403	dev_set_uevent_suppress(&wq_dev->dev, true);
3404
3405	ret = device_register(&wq_dev->dev);
3406	if (ret) {
3407		kfree(wq_dev);
3408		wq->wq_dev = NULL;
3409		return ret;
3410	}
3411
3412	if (wq->flags & WQ_UNBOUND) {
3413		struct device_attribute *attr;
3414
3415		for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
3416			ret = device_create_file(&wq_dev->dev, attr);
3417			if (ret) {
3418				device_unregister(&wq_dev->dev);
3419				wq->wq_dev = NULL;
3420				return ret;
3421			}
3422		}
3423	}
3424
3425	kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
3426	return 0;
3427}
3428
3429/**
3430 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
3431 * @wq: the workqueue to unregister
3432 *
3433 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
3434 */
3435static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
3436{
3437	struct wq_device *wq_dev = wq->wq_dev;
3438
3439	if (!wq->wq_dev)
3440		return;
3441
3442	wq->wq_dev = NULL;
3443	device_unregister(&wq_dev->dev);
3444}
3445#else	/* CONFIG_SYSFS */
3446static void workqueue_sysfs_unregister(struct workqueue_struct *wq)	{ }
3447#endif	/* CONFIG_SYSFS */
3448
3449/**
3450 * free_workqueue_attrs - free a workqueue_attrs
3451 * @attrs: workqueue_attrs to free
3452 *
3453 * Undo alloc_workqueue_attrs().
3454 */
3455void free_workqueue_attrs(struct workqueue_attrs *attrs)
3456{
3457	if (attrs) {
3458		free_cpumask_var(attrs->cpumask);
3459		kfree(attrs);
3460	}
3461}
3462
3463/**
3464 * alloc_workqueue_attrs - allocate a workqueue_attrs
3465 * @gfp_mask: allocation mask to use
3466 *
3467 * Allocate a new workqueue_attrs, initialize with default settings and
3468 * return it.
3469 *
3470 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3471 */
3472struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3473{
3474	struct workqueue_attrs *attrs;
3475
3476	attrs = kzalloc(sizeof(*attrs), gfp_mask);
3477	if (!attrs)
3478		goto fail;
3479	if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3480		goto fail;
3481
3482	cpumask_copy(attrs->cpumask, cpu_possible_mask);
3483	return attrs;
3484fail:
3485	free_workqueue_attrs(attrs);
3486	return NULL;
3487}
3488
3489static void copy_workqueue_attrs(struct workqueue_attrs *to,
3490				 const struct workqueue_attrs *from)
3491{
3492	to->nice = from->nice;
3493	cpumask_copy(to->cpumask, from->cpumask);
3494	/*
3495	 * Unlike hash and equality test, this function doesn't ignore
3496	 * ->no_numa as it is used for both pool and wq attrs.  Instead,
3497	 * get_unbound_pool() explicitly clears ->no_numa after copying.
3498	 */
3499	to->no_numa = from->no_numa;
3500}
3501
3502/* hash value of the content of @attr */
3503static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3504{
3505	u32 hash = 0;
3506
3507	hash = jhash_1word(attrs->nice, hash);
3508	hash = jhash(cpumask_bits(attrs->cpumask),
3509		     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3510	return hash;
3511}
3512
3513/* content equality test */
3514static bool wqattrs_equal(const struct workqueue_attrs *a,
3515			  const struct workqueue_attrs *b)
3516{
3517	if (a->nice != b->nice)
3518		return false;
3519	if (!cpumask_equal(a->cpumask, b->cpumask))
3520		return false;
3521	return true;
3522}
3523
3524/**
3525 * init_worker_pool - initialize a newly zalloc'd worker_pool
3526 * @pool: worker_pool to initialize
3527 *
3528 * Initiailize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3529 *
3530 * Return: 0 on success, -errno on failure.  Even on failure, all fields
3531 * inside @pool proper are initialized and put_unbound_pool() can be called
3532 * on @pool safely to release it.
3533 */
3534static int init_worker_pool(struct worker_pool *pool)
3535{
3536	spin_lock_init(&pool->lock);
3537	pool->id = -1;
3538	pool->cpu = -1;
3539	pool->node = NUMA_NO_NODE;
3540	pool->flags |= POOL_DISASSOCIATED;
3541	INIT_LIST_HEAD(&pool->worklist);
3542	INIT_LIST_HEAD(&pool->idle_list);
3543	hash_init(pool->busy_hash);
3544
3545	init_timer_deferrable(&pool->idle_timer);
3546	pool->idle_timer.function = idle_worker_timeout;
3547	pool->idle_timer.data = (unsigned long)pool;
3548
3549	setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3550		    (unsigned long)pool);
3551
3552	mutex_init(&pool->manager_arb);
3553	mutex_init(&pool->manager_mutex);
3554	idr_init(&pool->worker_idr);
3555
3556	INIT_HLIST_NODE(&pool->hash_node);
3557	pool->refcnt = 1;
3558
3559	/* shouldn't fail above this point */
3560	pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3561	if (!pool->attrs)
3562		return -ENOMEM;
3563	return 0;
3564}
3565
3566static void rcu_free_pool(struct rcu_head *rcu)
3567{
3568	struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3569
3570	idr_destroy(&pool->worker_idr);
3571	free_workqueue_attrs(pool->attrs);
3572	kfree(pool);
3573}
3574
3575/**
3576 * put_unbound_pool - put a worker_pool
3577 * @pool: worker_pool to put
3578 *
3579 * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
3580 * safe manner.  get_unbound_pool() calls this function on its failure path
3581 * and this function should be able to release pools which went through,
3582 * successfully or not, init_worker_pool().
3583 *
3584 * Should be called with wq_pool_mutex held.
3585 */
3586static void put_unbound_pool(struct worker_pool *pool)
3587{
3588	struct worker *worker;
3589
3590	lockdep_assert_held(&wq_pool_mutex);
3591
3592	if (--pool->refcnt)
3593		return;
3594
3595	/* sanity checks */
3596	if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
3597	    WARN_ON(!list_empty(&pool->worklist)))
3598		return;
3599
3600	/* release id and unhash */
3601	if (pool->id >= 0)
3602		idr_remove(&worker_pool_idr, pool->id);
3603	hash_del(&pool->hash_node);
3604
3605	/*
3606	 * Become the manager and destroy all workers.  Grabbing
3607	 * manager_arb prevents @pool's workers from blocking on
3608	 * manager_mutex.
3609	 */
3610	mutex_lock(&pool->manager_arb);
3611	mutex_lock(&pool->manager_mutex);
3612	spin_lock_irq(&pool->lock);
3613
3614	while ((worker = first_worker(pool)))
3615		destroy_worker(worker);
3616	WARN_ON(pool->nr_workers || pool->nr_idle);
3617
3618	spin_unlock_irq(&pool->lock);
3619	mutex_unlock(&pool->manager_mutex);
3620	mutex_unlock(&pool->manager_arb);
3621
3622	/* shut down the timers */
3623	del_timer_sync(&pool->idle_timer);
3624	del_timer_sync(&pool->mayday_timer);
3625
3626	/* sched-RCU protected to allow dereferences from get_work_pool() */
3627	call_rcu_sched(&pool->rcu, rcu_free_pool);
3628}
3629
3630/**
3631 * get_unbound_pool - get a worker_pool with the specified attributes
3632 * @attrs: the attributes of the worker_pool to get
3633 *
3634 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3635 * reference count and return it.  If there already is a matching
3636 * worker_pool, it will be used; otherwise, this function attempts to
3637 * create a new one.
3638 *
3639 * Should be called with wq_pool_mutex held.
3640 *
3641 * Return: On success, a worker_pool with the same attributes as @attrs.
3642 * On failure, %NULL.
3643 */
3644static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3645{
3646	u32 hash = wqattrs_hash(attrs);
3647	struct worker_pool *pool;
3648	int node;
3649
3650	lockdep_assert_held(&wq_pool_mutex);
3651
3652	/* do we already have a matching pool? */
3653	hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3654		if (wqattrs_equal(pool->attrs, attrs)) {
3655			pool->refcnt++;
3656			goto out_unlock;
3657		}
3658	}
3659
3660	/* nope, create a new one */
3661	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
3662	if (!pool || init_worker_pool(pool) < 0)
3663		goto fail;
3664
3665	if (workqueue_freezing)
3666		pool->flags |= POOL_FREEZING;
3667
3668	lockdep_set_subclass(&pool->lock, 1);	/* see put_pwq() */
3669	copy_workqueue_attrs(pool->attrs, attrs);
3670
3671	/*
3672	 * no_numa isn't a worker_pool attribute, always clear it.  See
3673	 * 'struct workqueue_attrs' comments for detail.
3674	 */
3675	pool->attrs->no_numa = false;
3676
3677	/* if cpumask is contained inside a NUMA node, we belong to that node */
3678	if (wq_numa_enabled) {
3679		for_each_node(node) {
3680			if (cpumask_subset(pool->attrs->cpumask,
3681					   wq_numa_possible_cpumask[node])) {
3682				pool->node = node;
3683				break;
3684			}
3685		}
3686	}
3687
3688	if (worker_pool_assign_id(pool) < 0)
3689		goto fail;
3690
3691	/* create and start the initial worker */
3692	if (create_and_start_worker(pool) < 0)
3693		goto fail;
3694
3695	/* install */
3696	hash_add(unbound_pool_hash, &pool->hash_node, hash);
3697out_unlock:
3698	return pool;
3699fail:
3700	if (pool)
3701		put_unbound_pool(pool);
3702	return NULL;
3703}
3704
3705static void rcu_free_pwq(struct rcu_head *rcu)
3706{
3707	kmem_cache_free(pwq_cache,
3708			container_of(rcu, struct pool_workqueue, rcu));
3709}
3710
3711/*
3712 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3713 * and needs to be destroyed.
3714 */
3715static void pwq_unbound_release_workfn(struct work_struct *work)
3716{
3717	struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3718						  unbound_release_work);
3719	struct workqueue_struct *wq = pwq->wq;
3720	struct worker_pool *pool = pwq->pool;
3721	bool is_last;
3722
3723	if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3724		return;
3725
3726	/*
3727	 * Unlink @pwq.  Synchronization against wq->mutex isn't strictly
3728	 * necessary on release but do it anyway.  It's easier to verify
3729	 * and consistent with the linking path.
3730	 */
3731	mutex_lock(&wq->mutex);
3732	list_del_rcu(&pwq->pwqs_node);
3733	is_last = list_empty(&wq->pwqs);
3734	mutex_unlock(&wq->mutex);
3735
3736	mutex_lock(&wq_pool_mutex);
3737	put_unbound_pool(pool);
3738	mutex_unlock(&wq_pool_mutex);
3739
3740	call_rcu_sched(&pwq->rcu, rcu_free_pwq);
3741
3742	/*
3743	 * If we're the last pwq going away, @wq is already dead and no one
3744	 * is gonna access it anymore.  Free it.
3745	 */
3746	if (is_last) {
3747		free_workqueue_attrs(wq->unbound_attrs);
3748		kfree(wq);
3749	}
3750}
3751
3752/**
3753 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3754 * @pwq: target pool_workqueue
3755 *
3756 * If @pwq isn't freezing, set @pwq->max_active to the associated
3757 * workqueue's saved_max_active and activate delayed work items
3758 * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
3759 */
3760static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3761{
3762	struct workqueue_struct *wq = pwq->wq;
3763	bool freezable = wq->flags & WQ_FREEZABLE;
3764
3765	/* for @wq->saved_max_active */
3766	lockdep_assert_held(&wq->mutex);
3767
3768	/* fast exit for non-freezable wqs */
3769	if (!freezable && pwq->max_active == wq->saved_max_active)
3770		return;
3771
3772	spin_lock_irq(&pwq->pool->lock);
3773
3774	if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) {
3775		pwq->max_active = wq->saved_max_active;
3776
3777		while (!list_empty(&pwq->delayed_works) &&
3778		       pwq->nr_active < pwq->max_active)
3779			pwq_activate_first_delayed(pwq);
 
3780
3781		/*
3782		 * Need to kick a worker after thawed or an unbound wq's
3783		 * max_active is bumped.  It's a slow path.  Do it always.
 
3784		 */
3785		wake_up_worker(pwq->pool);
3786	} else {
3787		pwq->max_active = 0;
3788	}
3789
3790	spin_unlock_irq(&pwq->pool->lock);
3791}
3792
3793/* initialize newly alloced @pwq which is associated with @wq and @pool */
3794static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3795		     struct worker_pool *pool)
3796{
3797	BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3798
3799	memset(pwq, 0, sizeof(*pwq));
3800
3801	pwq->pool = pool;
3802	pwq->wq = wq;
3803	pwq->flush_color = -1;
3804	pwq->refcnt = 1;
3805	INIT_LIST_HEAD(&pwq->delayed_works);
3806	INIT_LIST_HEAD(&pwq->pwqs_node);
3807	INIT_LIST_HEAD(&pwq->mayday_node);
3808	INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3809}
3810
3811/* sync @pwq with the current state of its associated wq and link it */
3812static void link_pwq(struct pool_workqueue *pwq)
3813{
3814	struct workqueue_struct *wq = pwq->wq;
3815
3816	lockdep_assert_held(&wq->mutex);
3817
3818	/* may be called multiple times, ignore if already linked */
3819	if (!list_empty(&pwq->pwqs_node))
3820		return;
3821
3822	/*
3823	 * Set the matching work_color.  This is synchronized with
3824	 * wq->mutex to avoid confusing flush_workqueue().
3825	 */
3826	pwq->work_color = wq->work_color;
3827
3828	/* sync max_active to the current setting */
3829	pwq_adjust_max_active(pwq);
3830
3831	/* link in @pwq */
3832	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3833}
3834
3835/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3836static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3837					const struct workqueue_attrs *attrs)
3838{
3839	struct worker_pool *pool;
3840	struct pool_workqueue *pwq;
3841
3842	lockdep_assert_held(&wq_pool_mutex);
3843
3844	pool = get_unbound_pool(attrs);
3845	if (!pool)
3846		return NULL;
3847
3848	pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3849	if (!pwq) {
3850		put_unbound_pool(pool);
3851		return NULL;
3852	}
3853
3854	init_pwq(pwq, wq, pool);
3855	return pwq;
3856}
3857
3858/* undo alloc_unbound_pwq(), used only in the error path */
3859static void free_unbound_pwq(struct pool_workqueue *pwq)
3860{
3861	lockdep_assert_held(&wq_pool_mutex);
3862
3863	if (pwq) {
3864		put_unbound_pool(pwq->pool);
3865		kmem_cache_free(pwq_cache, pwq);
3866	}
3867}
3868
3869/**
3870 * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node
3871 * @attrs: the wq_attrs of interest
3872 * @node: the target NUMA node
3873 * @cpu_going_down: if >= 0, the CPU to consider as offline
3874 * @cpumask: outarg, the resulting cpumask
3875 *
3876 * Calculate the cpumask a workqueue with @attrs should use on @node.  If
3877 * @cpu_going_down is >= 0, that cpu is considered offline during
3878 * calculation.  The result is stored in @cpumask.
3879 *
3880 * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
3881 * enabled and @node has online CPUs requested by @attrs, the returned
3882 * cpumask is the intersection of the possible CPUs of @node and
3883 * @attrs->cpumask.
3884 *
3885 * The caller is responsible for ensuring that the cpumask of @node stays
3886 * stable.
3887 *
3888 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3889 * %false if equal.
3890 */
3891static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3892				 int cpu_going_down, cpumask_t *cpumask)
3893{
3894	if (!wq_numa_enabled || attrs->no_numa)
3895		goto use_dfl;
3896
3897	/* does @node have any online CPUs @attrs wants? */
3898	cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3899	if (cpu_going_down >= 0)
3900		cpumask_clear_cpu(cpu_going_down, cpumask);
3901
3902	if (cpumask_empty(cpumask))
3903		goto use_dfl;
3904
3905	/* yeap, return possible CPUs in @node that @attrs wants */
3906	cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3907	return !cpumask_equal(cpumask, attrs->cpumask);
3908
3909use_dfl:
3910	cpumask_copy(cpumask, attrs->cpumask);
3911	return false;
3912}
3913
3914/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3915static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3916						   int node,
3917						   struct pool_workqueue *pwq)
3918{
3919	struct pool_workqueue *old_pwq;
3920
3921	lockdep_assert_held(&wq->mutex);
3922
3923	/* link_pwq() can handle duplicate calls */
3924	link_pwq(pwq);
3925
3926	old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3927	rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3928	return old_pwq;
3929}
3930
3931/**
3932 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3933 * @wq: the target workqueue
3934 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3935 *
3936 * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
3937 * machines, this function maps a separate pwq to each NUMA node with
3938 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3939 * NUMA node it was issued on.  Older pwqs are released as in-flight work
3940 * items finish.  Note that a work item which repeatedly requeues itself
3941 * back-to-back will stay on its current pwq.
3942 *
3943 * Performs GFP_KERNEL allocations.
3944 *
3945 * Return: 0 on success and -errno on failure.
3946 */
3947int apply_workqueue_attrs(struct workqueue_struct *wq,
3948			  const struct workqueue_attrs *attrs)
3949{
3950	struct workqueue_attrs *new_attrs, *tmp_attrs;
3951	struct pool_workqueue **pwq_tbl, *dfl_pwq;
3952	int node, ret;
3953
3954	/* only unbound workqueues can change attributes */
3955	if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3956		return -EINVAL;
3957
3958	/* creating multiple pwqs breaks ordering guarantee */
3959	if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
3960		return -EINVAL;
3961
3962	pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL);
3963	new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3964	tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3965	if (!pwq_tbl || !new_attrs || !tmp_attrs)
3966		goto enomem;
3967
3968	/* make a copy of @attrs and sanitize it */
3969	copy_workqueue_attrs(new_attrs, attrs);
3970	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3971
3972	/*
3973	 * We may create multiple pwqs with differing cpumasks.  Make a
3974	 * copy of @new_attrs which will be modified and used to obtain
3975	 * pools.
3976	 */
3977	copy_workqueue_attrs(tmp_attrs, new_attrs);
3978
3979	/*
3980	 * CPUs should stay stable across pwq creations and installations.
3981	 * Pin CPUs, determine the target cpumask for each node and create
3982	 * pwqs accordingly.
3983	 */
3984	get_online_cpus();
3985
3986	mutex_lock(&wq_pool_mutex);
3987
3988	/*
3989	 * If something goes wrong during CPU up/down, we'll fall back to
3990	 * the default pwq covering whole @attrs->cpumask.  Always create
3991	 * it even if we don't use it immediately.
3992	 */
3993	dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3994	if (!dfl_pwq)
3995		goto enomem_pwq;
3996
3997	for_each_node(node) {
3998		if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) {
3999			pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
4000			if (!pwq_tbl[node])
4001				goto enomem_pwq;
4002		} else {
4003			dfl_pwq->refcnt++;
4004			pwq_tbl[node] = dfl_pwq;
4005		}
4006	}
4007
4008	mutex_unlock(&wq_pool_mutex);
4009
4010	/* all pwqs have been created successfully, let's install'em */
4011	mutex_lock(&wq->mutex);
4012
4013	copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
4014
4015	/* save the previous pwq and install the new one */
4016	for_each_node(node)
4017		pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
4018
4019	/* @dfl_pwq might not have been used, ensure it's linked */
4020	link_pwq(dfl_pwq);
4021	swap(wq->dfl_pwq, dfl_pwq);
4022
4023	mutex_unlock(&wq->mutex);
4024
4025	/* put the old pwqs */
4026	for_each_node(node)
4027		put_pwq_unlocked(pwq_tbl[node]);
4028	put_pwq_unlocked(dfl_pwq);
4029
4030	put_online_cpus();
4031	ret = 0;
4032	/* fall through */
4033out_free:
4034	free_workqueue_attrs(tmp_attrs);
4035	free_workqueue_attrs(new_attrs);
4036	kfree(pwq_tbl);
4037	return ret;
4038
4039enomem_pwq:
4040	free_unbound_pwq(dfl_pwq);
4041	for_each_node(node)
4042		if (pwq_tbl && pwq_tbl[node] != dfl_pwq)
4043			free_unbound_pwq(pwq_tbl[node]);
4044	mutex_unlock(&wq_pool_mutex);
4045	put_online_cpus();
4046enomem:
4047	ret = -ENOMEM;
4048	goto out_free;
4049}
4050
4051/**
4052 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
4053 * @wq: the target workqueue
4054 * @cpu: the CPU coming up or going down
4055 * @online: whether @cpu is coming up or going down
4056 *
4057 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4058 * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
4059 * @wq accordingly.
4060 *
4061 * If NUMA affinity can't be adjusted due to memory allocation failure, it
4062 * falls back to @wq->dfl_pwq which may not be optimal but is always
4063 * correct.
4064 *
4065 * Note that when the last allowed CPU of a NUMA node goes offline for a
4066 * workqueue with a cpumask spanning multiple nodes, the workers which were
4067 * already executing the work items for the workqueue will lose their CPU
4068 * affinity and may execute on any CPU.  This is similar to how per-cpu
4069 * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
4070 * affinity, it's the user's responsibility to flush the work item from
4071 * CPU_DOWN_PREPARE.
4072 */
4073static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4074				   bool online)
4075{
4076	int node = cpu_to_node(cpu);
4077	int cpu_off = online ? -1 : cpu;
4078	struct pool_workqueue *old_pwq = NULL, *pwq;
4079	struct workqueue_attrs *target_attrs;
4080	cpumask_t *cpumask;
4081
4082	lockdep_assert_held(&wq_pool_mutex);
4083
4084	if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND))
4085		return;
4086
4087	/*
4088	 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4089	 * Let's use a preallocated one.  The following buf is protected by
4090	 * CPU hotplug exclusion.
4091	 */
4092	target_attrs = wq_update_unbound_numa_attrs_buf;
4093	cpumask = target_attrs->cpumask;
4094
4095	mutex_lock(&wq->mutex);
4096	if (wq->unbound_attrs->no_numa)
4097		goto out_unlock;
4098
4099	copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4100	pwq = unbound_pwq_by_node(wq, node);
4101
4102	/*
4103	 * Let's determine what needs to be done.  If the target cpumask is
4104	 * different from wq's, we need to compare it to @pwq's and create
4105	 * a new one if they don't match.  If the target cpumask equals
4106	 * wq's, the default pwq should be used.  If @pwq is already the
4107	 * default one, nothing to do; otherwise, install the default one.
4108	 */
4109	if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) {
4110		if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4111			goto out_unlock;
4112	} else {
4113		if (pwq == wq->dfl_pwq)
4114			goto out_unlock;
4115		else
4116			goto use_dfl_pwq;
4117	}
4118
4119	mutex_unlock(&wq->mutex);
4120
4121	/* create a new pwq */
4122	pwq = alloc_unbound_pwq(wq, target_attrs);
4123	if (!pwq) {
4124		pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4125			   wq->name);
4126		mutex_lock(&wq->mutex);
4127		goto use_dfl_pwq;
4128	}
4129
4130	/*
4131	 * Install the new pwq.  As this function is called only from CPU
4132	 * hotplug callbacks and applying a new attrs is wrapped with
4133	 * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed
4134	 * inbetween.
4135	 */
4136	mutex_lock(&wq->mutex);
4137	old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4138	goto out_unlock;
4139
4140use_dfl_pwq:
4141	spin_lock_irq(&wq->dfl_pwq->pool->lock);
4142	get_pwq(wq->dfl_pwq);
4143	spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4144	old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4145out_unlock:
4146	mutex_unlock(&wq->mutex);
4147	put_pwq_unlocked(old_pwq);
4148}
4149
4150static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4151{
4152	bool highpri = wq->flags & WQ_HIGHPRI;
4153	int cpu, ret;
4154
4155	if (!(wq->flags & WQ_UNBOUND)) {
4156		wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4157		if (!wq->cpu_pwqs)
4158			return -ENOMEM;
4159
4160		for_each_possible_cpu(cpu) {
4161			struct pool_workqueue *pwq =
4162				per_cpu_ptr(wq->cpu_pwqs, cpu);
4163			struct worker_pool *cpu_pools =
4164				per_cpu(cpu_worker_pools, cpu);
4165
4166			init_pwq(pwq, wq, &cpu_pools[highpri]);
4167
4168			mutex_lock(&wq->mutex);
4169			link_pwq(pwq);
4170			mutex_unlock(&wq->mutex);
4171		}
4172		return 0;
4173	} else if (wq->flags & __WQ_ORDERED) {
4174		ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4175		/* there should only be single pwq for ordering guarantee */
4176		WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4177			      wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4178		     "ordering guarantee broken for workqueue %s\n", wq->name);
4179		return ret;
4180	} else {
4181		return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4182	}
4183}
4184
4185static int wq_clamp_max_active(int max_active, unsigned int flags,
4186			       const char *name)
4187{
4188	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4189
4190	if (max_active < 1 || max_active > lim)
4191		pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4192			max_active, name, 1, lim);
 
4193
4194	return clamp_val(max_active, 1, lim);
4195}
4196
4197struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
4198					       unsigned int flags,
4199					       int max_active,
4200					       struct lock_class_key *key,
4201					       const char *lock_name, ...)
4202{
4203	size_t tbl_size = 0;
4204	va_list args;
4205	struct workqueue_struct *wq;
4206	struct pool_workqueue *pwq;
4207
4208	/* see the comment above the definition of WQ_POWER_EFFICIENT */
4209	if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4210		flags |= WQ_UNBOUND;
 
 
 
4211
4212	/* allocate wq and format name */
 
 
 
4213	if (flags & WQ_UNBOUND)
4214		tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
4215
4216	wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4217	if (!wq)
4218		return NULL;
4219
4220	if (flags & WQ_UNBOUND) {
4221		wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
4222		if (!wq->unbound_attrs)
4223			goto err_free_wq;
4224	}
4225
4226	va_start(args, lock_name);
4227	vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4228	va_end(args);
4229
4230	max_active = max_active ?: WQ_DFL_ACTIVE;
4231	max_active = wq_clamp_max_active(max_active, flags, wq->name);
 
 
 
 
4232
4233	/* init wq */
4234	wq->flags = flags;
4235	wq->saved_max_active = max_active;
4236	mutex_init(&wq->mutex);
4237	atomic_set(&wq->nr_pwqs_to_flush, 0);
4238	INIT_LIST_HEAD(&wq->pwqs);
4239	INIT_LIST_HEAD(&wq->flusher_queue);
4240	INIT_LIST_HEAD(&wq->flusher_overflow);
4241	INIT_LIST_HEAD(&wq->maydays);
4242
 
4243	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
4244	INIT_LIST_HEAD(&wq->list);
4245
4246	if (alloc_and_link_pwqs(wq) < 0)
4247		goto err_free_wq;
4248
4249	/*
4250	 * Workqueues which may be used during memory reclaim should
4251	 * have a rescuer to guarantee forward progress.
4252	 */
4253	if (flags & WQ_MEM_RECLAIM) {
 
 
 
 
 
 
 
 
4254		struct worker *rescuer;
4255
4256		rescuer = alloc_worker();
 
 
 
4257		if (!rescuer)
4258			goto err_destroy;
4259
4260		rescuer->rescue_wq = wq;
4261		rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
4262					       wq->name);
4263		if (IS_ERR(rescuer->task)) {
4264			kfree(rescuer);
4265			goto err_destroy;
4266		}
4267
4268		wq->rescuer = rescuer;
4269		rescuer->task->flags |= PF_NO_SETAFFINITY;
4270		wake_up_process(rescuer->task);
4271	}
4272
4273	if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4274		goto err_destroy;
4275
4276	/*
4277	 * wq_pool_mutex protects global freeze state and workqueues list.
4278	 * Grab it, adjust max_active and add the new @wq to workqueues
4279	 * list.
4280	 */
4281	mutex_lock(&wq_pool_mutex);
4282
4283	mutex_lock(&wq->mutex);
4284	for_each_pwq(pwq, wq)
4285		pwq_adjust_max_active(pwq);
4286	mutex_unlock(&wq->mutex);
4287
4288	list_add(&wq->list, &workqueues);
4289
4290	mutex_unlock(&wq_pool_mutex);
4291
4292	return wq;
4293
4294err_free_wq:
4295	free_workqueue_attrs(wq->unbound_attrs);
4296	kfree(wq);
4297	return NULL;
4298err_destroy:
4299	destroy_workqueue(wq);
4300	return NULL;
4301}
4302EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
4303
4304/**
4305 * destroy_workqueue - safely terminate a workqueue
4306 * @wq: target workqueue
4307 *
4308 * Safely destroy a workqueue. All work currently pending will be done first.
4309 */
4310void destroy_workqueue(struct workqueue_struct *wq)
4311{
4312	struct pool_workqueue *pwq;
4313	int node;
4314
4315	/* drain it before proceeding with destruction */
4316	drain_workqueue(wq);
4317
4318	/* sanity checks */
4319	mutex_lock(&wq->mutex);
4320	for_each_pwq(pwq, wq) {
4321		int i;
4322
4323		for (i = 0; i < WORK_NR_COLORS; i++) {
4324			if (WARN_ON(pwq->nr_in_flight[i])) {
4325				mutex_unlock(&wq->mutex);
4326				return;
4327			}
4328		}
4329
4330		if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4331		    WARN_ON(pwq->nr_active) ||
4332		    WARN_ON(!list_empty(&pwq->delayed_works))) {
4333			mutex_unlock(&wq->mutex);
4334			return;
4335		}
4336	}
4337	mutex_unlock(&wq->mutex);
4338
4339	/*
4340	 * wq list is used to freeze wq, remove from list after
4341	 * flushing is complete in case freeze races us.
4342	 */
4343	mutex_lock(&wq_pool_mutex);
4344	list_del_init(&wq->list);
4345	mutex_unlock(&wq_pool_mutex);
 
 
 
 
 
4346
4347	workqueue_sysfs_unregister(wq);
 
 
 
 
4348
4349	if (wq->rescuer) {
4350		kthread_stop(wq->rescuer->task);
 
4351		kfree(wq->rescuer);
4352		wq->rescuer = NULL;
4353	}
4354
4355	if (!(wq->flags & WQ_UNBOUND)) {
4356		/*
4357		 * The base ref is never dropped on per-cpu pwqs.  Directly
4358		 * free the pwqs and wq.
4359		 */
4360		free_percpu(wq->cpu_pwqs);
4361		kfree(wq);
4362	} else {
4363		/*
4364		 * We're the sole accessor of @wq at this point.  Directly
4365		 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4366		 * @wq will be freed when the last pwq is released.
4367		 */
4368		for_each_node(node) {
4369			pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4370			RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4371			put_pwq_unlocked(pwq);
4372		}
4373
4374		/*
4375		 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
4376		 * put.  Don't access it afterwards.
4377		 */
4378		pwq = wq->dfl_pwq;
4379		wq->dfl_pwq = NULL;
4380		put_pwq_unlocked(pwq);
4381	}
4382}
4383EXPORT_SYMBOL_GPL(destroy_workqueue);
4384
4385/**
4386 * workqueue_set_max_active - adjust max_active of a workqueue
4387 * @wq: target workqueue
4388 * @max_active: new max_active value.
4389 *
4390 * Set max_active of @wq to @max_active.
4391 *
4392 * CONTEXT:
4393 * Don't call from IRQ context.
4394 */
4395void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4396{
4397	struct pool_workqueue *pwq;
4398
4399	/* disallow meddling with max_active for ordered workqueues */
4400	if (WARN_ON(wq->flags & __WQ_ORDERED))
4401		return;
4402
4403	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4404
4405	mutex_lock(&wq->mutex);
4406
4407	wq->saved_max_active = max_active;
4408
4409	for_each_pwq(pwq, wq)
4410		pwq_adjust_max_active(pwq);
4411
4412	mutex_unlock(&wq->mutex);
4413}
4414EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4415
4416/**
4417 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4418 *
4419 * Determine whether %current is a workqueue rescuer.  Can be used from
4420 * work functions to determine whether it's being run off the rescuer task.
4421 *
4422 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4423 */
4424bool current_is_workqueue_rescuer(void)
4425{
4426	struct worker *worker = current_wq_worker();
4427
4428	return worker && worker->rescue_wq;
4429}
 
4430
4431/**
4432 * workqueue_congested - test whether a workqueue is congested
4433 * @cpu: CPU in question
4434 * @wq: target workqueue
4435 *
4436 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4437 * no synchronization around this function and the test result is
4438 * unreliable and only useful as advisory hints or for debugging.
4439 *
4440 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4441 * Note that both per-cpu and unbound workqueues may be associated with
4442 * multiple pool_workqueues which have separate congested states.  A
4443 * workqueue being congested on one CPU doesn't mean the workqueue is also
4444 * contested on other CPUs / NUMA nodes.
4445 *
4446 * Return:
4447 * %true if congested, %false otherwise.
4448 */
4449bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4450{
4451	struct pool_workqueue *pwq;
4452	bool ret;
4453
4454	rcu_read_lock_sched();
4455
4456	if (cpu == WORK_CPU_UNBOUND)
4457		cpu = smp_processor_id();
4458
4459	if (!(wq->flags & WQ_UNBOUND))
4460		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4461	else
4462		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4463
4464	ret = !list_empty(&pwq->delayed_works);
4465	rcu_read_unlock_sched();
 
 
 
 
 
 
 
 
4466
4467	return ret;
4468}
4469EXPORT_SYMBOL_GPL(workqueue_congested);
4470
4471/**
4472 * work_busy - test whether a work is currently pending or running
4473 * @work: the work to be tested
4474 *
4475 * Test whether @work is currently pending or running.  There is no
4476 * synchronization around this function and the test result is
4477 * unreliable and only useful as advisory hints or for debugging.
 
 
4478 *
4479 * Return:
4480 * OR'd bitmask of WORK_BUSY_* bits.
4481 */
4482unsigned int work_busy(struct work_struct *work)
4483{
4484	struct worker_pool *pool;
4485	unsigned long flags;
4486	unsigned int ret = 0;
4487
 
 
 
 
 
4488	if (work_pending(work))
4489		ret |= WORK_BUSY_PENDING;
 
 
4490
4491	local_irq_save(flags);
4492	pool = get_work_pool(work);
4493	if (pool) {
4494		spin_lock(&pool->lock);
4495		if (find_worker_executing_work(pool, work))
4496			ret |= WORK_BUSY_RUNNING;
4497		spin_unlock(&pool->lock);
4498	}
4499	local_irq_restore(flags);
4500
4501	return ret;
4502}
4503EXPORT_SYMBOL_GPL(work_busy);
4504
4505/**
4506 * set_worker_desc - set description for the current work item
4507 * @fmt: printf-style format string
4508 * @...: arguments for the format string
4509 *
4510 * This function can be called by a running work function to describe what
4511 * the work item is about.  If the worker task gets dumped, this
4512 * information will be printed out together to help debugging.  The
4513 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4514 */
4515void set_worker_desc(const char *fmt, ...)
4516{
4517	struct worker *worker = current_wq_worker();
4518	va_list args;
4519
4520	if (worker) {
4521		va_start(args, fmt);
4522		vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4523		va_end(args);
4524		worker->desc_valid = true;
4525	}
4526}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4527
4528/**
4529 * print_worker_info - print out worker information and description
4530 * @log_lvl: the log level to use when printing
4531 * @task: target task
4532 *
4533 * If @task is a worker and currently executing a work item, print out the
4534 * name of the workqueue being serviced and worker description set with
4535 * set_worker_desc() by the currently executing work item.
4536 *
4537 * This function can be safely called on any task as long as the
4538 * task_struct itself is accessible.  While safe, this function isn't
4539 * synchronized and may print out mixups or garbages of limited length.
 
 
 
4540 */
4541void print_worker_info(const char *log_lvl, struct task_struct *task)
 
 
 
 
 
 
4542{
4543	work_func_t *fn = NULL;
4544	char name[WQ_NAME_LEN] = { };
4545	char desc[WORKER_DESC_LEN] = { };
4546	struct pool_workqueue *pwq = NULL;
4547	struct workqueue_struct *wq = NULL;
4548	bool desc_valid = false;
4549	struct worker *worker;
 
 
 
 
4550
4551	if (!(task->flags & PF_WQ_WORKER))
4552		return;
4553
 
4554	/*
4555	 * This function is called without any synchronization and @task
4556	 * could be in any state.  Be careful with dereferences.
 
4557	 */
4558	worker = probe_kthread_data(task);
 
 
4559
4560	/*
4561	 * Carefully copy the associated workqueue's workfn and name.  Keep
4562	 * the original last '\0' in case the original contains garbage.
4563	 */
4564	probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4565	probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4566	probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4567	probe_kernel_read(name, wq->name, sizeof(name) - 1);
4568
4569	/* copy worker description */
4570	probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4571	if (desc_valid)
4572		probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4573
4574	if (fn || name[0] || desc[0]) {
4575		printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4576		if (desc[0])
4577			pr_cont(" (%s)", desc);
4578		pr_cont("\n");
4579	}
4580}
4581
4582/*
4583 * CPU hotplug.
4584 *
4585 * There are two challenges in supporting CPU hotplug.  Firstly, there
4586 * are a lot of assumptions on strong associations among work, pwq and
4587 * pool which make migrating pending and scheduled works very
4588 * difficult to implement without impacting hot paths.  Secondly,
4589 * worker pools serve mix of short, long and very long running works making
4590 * blocked draining impractical.
4591 *
4592 * This is solved by allowing the pools to be disassociated from the CPU
4593 * running as an unbound one and allowing it to be reattached later if the
4594 * cpu comes back online.
4595 */
4596
4597static void wq_unbind_fn(struct work_struct *work)
4598{
4599	int cpu = smp_processor_id();
4600	struct worker_pool *pool;
4601	struct worker *worker;
4602	int wi;
4603
4604	for_each_cpu_worker_pool(pool, cpu) {
4605		WARN_ON_ONCE(cpu != smp_processor_id());
4606
4607		mutex_lock(&pool->manager_mutex);
4608		spin_lock_irq(&pool->lock);
 
 
 
 
 
 
 
4609
4610		/*
4611		 * We've blocked all manager operations.  Make all workers
4612		 * unbound and set DISASSOCIATED.  Before this, all workers
4613		 * except for the ones which are still executing works from
4614		 * before the last CPU down must be on the cpu.  After
4615		 * this, they may become diasporas.
4616		 */
4617		for_each_pool_worker(worker, wi, pool)
4618			worker->flags |= WORKER_UNBOUND;
4619
4620		pool->flags |= POOL_DISASSOCIATED;
 
 
4621
4622		spin_unlock_irq(&pool->lock);
4623		mutex_unlock(&pool->manager_mutex);
 
 
 
 
 
 
4624
4625		/*
4626		 * Call schedule() so that we cross rq->lock and thus can
4627		 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4628		 * This is necessary as scheduler callbacks may be invoked
4629		 * from other cpus.
4630		 */
4631		schedule();
 
 
 
 
 
 
 
4632
4633		/*
4634		 * Sched callbacks are disabled now.  Zap nr_running.
4635		 * After this, nr_running stays zero and need_more_worker()
4636		 * and keep_working() are always true as long as the
4637		 * worklist is not empty.  This pool now behaves as an
4638		 * unbound (in terms of concurrency management) pool which
4639		 * are served by workers tied to the pool.
4640		 */
4641		atomic_set(&pool->nr_running, 0);
4642
4643		/*
4644		 * With concurrency management just turned off, a busy
4645		 * worker blocking could lead to lengthy stalls.  Kick off
4646		 * unbound chain execution of currently pending work items.
4647		 */
4648		spin_lock_irq(&pool->lock);
4649		wake_up_worker(pool);
4650		spin_unlock_irq(&pool->lock);
4651	}
4652}
4653
4654/**
4655 * rebind_workers - rebind all workers of a pool to the associated CPU
4656 * @pool: pool of interest
4657 *
4658 * @pool->cpu is coming online.  Rebind all workers to the CPU.
4659 */
4660static void rebind_workers(struct worker_pool *pool)
4661{
4662	struct worker *worker;
4663	int wi;
4664
4665	lockdep_assert_held(&pool->manager_mutex);
 
 
 
4666
4667	/*
4668	 * Restore CPU affinity of all workers.  As all idle workers should
4669	 * be on the run-queue of the associated CPU before any local
4670	 * wake-ups for concurrency management happen, restore CPU affinty
4671	 * of all workers first and then clear UNBOUND.  As we're called
4672	 * from CPU_ONLINE, the following shouldn't fail.
4673	 */
4674	for_each_pool_worker(worker, wi, pool)
4675		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4676						  pool->attrs->cpumask) < 0);
 
 
 
4677
4678	spin_lock_irq(&pool->lock);
 
 
 
 
 
 
 
4679
4680	for_each_pool_worker(worker, wi, pool) {
4681		unsigned int worker_flags = worker->flags;
4682
4683		/*
4684		 * A bound idle worker should actually be on the runqueue
4685		 * of the associated CPU for local wake-ups targeting it to
4686		 * work.  Kick all idle workers so that they migrate to the
4687		 * associated CPU.  Doing this in the same loop as
4688		 * replacing UNBOUND with REBOUND is safe as no worker will
4689		 * be bound before @pool->lock is released.
4690		 */
4691		if (worker_flags & WORKER_IDLE)
4692			wake_up_process(worker->task);
4693
4694		/*
4695		 * We want to clear UNBOUND but can't directly call
4696		 * worker_clr_flags() or adjust nr_running.  Atomically
4697		 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4698		 * @worker will clear REBOUND using worker_clr_flags() when
4699		 * it initiates the next execution cycle thus restoring
4700		 * concurrency management.  Note that when or whether
4701		 * @worker clears REBOUND doesn't affect correctness.
4702		 *
4703		 * ACCESS_ONCE() is necessary because @worker->flags may be
4704		 * tested without holding any lock in
4705		 * wq_worker_waking_up().  Without it, NOT_RUNNING test may
4706		 * fail incorrectly leading to premature concurrency
4707		 * management operations.
4708		 */
4709		WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4710		worker_flags |= WORKER_REBOUND;
4711		worker_flags &= ~WORKER_UNBOUND;
4712		ACCESS_ONCE(worker->flags) = worker_flags;
4713	}
4714
4715	spin_unlock_irq(&pool->lock);
 
 
 
 
 
 
 
 
4716}
4717
4718/**
4719 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4720 * @pool: unbound pool of interest
4721 * @cpu: the CPU which is coming up
4722 *
4723 * An unbound pool may end up with a cpumask which doesn't have any online
4724 * CPUs.  When a worker of such pool get scheduled, the scheduler resets
4725 * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
4726 * online CPU before, cpus_allowed of all its workers should be restored.
 
4727 */
4728static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
 
 
4729{
4730	static cpumask_t cpumask;
4731	struct worker *worker;
4732	int wi;
4733
4734	lockdep_assert_held(&pool->manager_mutex);
4735
4736	/* is @cpu allowed for @pool? */
4737	if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4738		return;
4739
4740	/* is @cpu the only online CPU? */
4741	cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4742	if (cpumask_weight(&cpumask) != 1)
4743		return;
4744
4745	/* as we're called from CPU_ONLINE, the following shouldn't fail */
4746	for_each_pool_worker(worker, wi, pool)
4747		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4748						  pool->attrs->cpumask) < 0);
4749}
4750
4751/*
4752 * Workqueues should be brought up before normal priority CPU notifiers.
4753 * This will be registered high priority CPU notifier.
4754 */
4755static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4756					       unsigned long action,
4757					       void *hcpu)
4758{
4759	int cpu = (unsigned long)hcpu;
4760	struct worker_pool *pool;
4761	struct workqueue_struct *wq;
4762	int pi;
 
 
 
4763
4764	switch (action & ~CPU_TASKS_FROZEN) {
 
 
 
 
 
 
 
4765	case CPU_UP_PREPARE:
4766		for_each_cpu_worker_pool(pool, cpu) {
4767			if (pool->nr_workers)
4768				continue;
4769			if (create_and_start_worker(pool) < 0)
4770				return NOTIFY_BAD;
 
4771		}
4772		break;
4773
4774	case CPU_DOWN_FAILED:
4775	case CPU_ONLINE:
4776		mutex_lock(&wq_pool_mutex);
4777
4778		for_each_pool(pool, pi) {
4779			mutex_lock(&pool->manager_mutex);
 
 
 
 
 
 
 
 
 
 
 
4780
4781			if (pool->cpu == cpu) {
4782				spin_lock_irq(&pool->lock);
4783				pool->flags &= ~POOL_DISASSOCIATED;
4784				spin_unlock_irq(&pool->lock);
4785
4786				rebind_workers(pool);
4787			} else if (pool->cpu < 0) {
4788				restore_unbound_workers_cpumask(pool, cpu);
4789			}
4790
4791			mutex_unlock(&pool->manager_mutex);
4792		}
 
 
 
 
 
4793
4794		/* update NUMA affinity of unbound workqueues */
4795		list_for_each_entry(wq, &workqueues, list)
4796			wq_update_unbound_numa(wq, cpu, true);
 
 
 
 
 
4797
4798		mutex_unlock(&wq_pool_mutex);
 
 
 
 
 
 
 
 
 
 
4799		break;
4800	}
4801	return NOTIFY_OK;
4802}
4803
4804/*
4805 * Workqueues should be brought down after normal priority CPU notifiers.
4806 * This will be registered as low priority CPU notifier.
4807 */
4808static int workqueue_cpu_down_callback(struct notifier_block *nfb,
4809						 unsigned long action,
4810						 void *hcpu)
4811{
4812	int cpu = (unsigned long)hcpu;
4813	struct work_struct unbind_work;
4814	struct workqueue_struct *wq;
4815
4816	switch (action & ~CPU_TASKS_FROZEN) {
4817	case CPU_DOWN_PREPARE:
4818		/* unbinding per-cpu workers should happen on the local CPU */
4819		INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4820		queue_work_on(cpu, system_highpri_wq, &unbind_work);
4821
4822		/* update NUMA affinity of unbound workqueues */
4823		mutex_lock(&wq_pool_mutex);
4824		list_for_each_entry(wq, &workqueues, list)
4825			wq_update_unbound_numa(wq, cpu, false);
4826		mutex_unlock(&wq_pool_mutex);
4827
4828		/* wait for per-cpu unbinding to finish */
4829		flush_work(&unbind_work);
4830		destroy_work_on_stack(&unbind_work);
4831		break;
4832	}
4833	return NOTIFY_OK;
4834}
4835
4836#ifdef CONFIG_SMP
4837
4838struct work_for_cpu {
4839	struct work_struct work;
4840	long (*fn)(void *);
4841	void *arg;
4842	long ret;
4843};
4844
4845static void work_for_cpu_fn(struct work_struct *work)
4846{
4847	struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
4848
4849	wfc->ret = wfc->fn(wfc->arg);
 
 
4850}
4851
4852/**
4853 * work_on_cpu - run a function in user context on a particular cpu
4854 * @cpu: the cpu to run on
4855 * @fn: the function to run
4856 * @arg: the function arg
4857 *
 
4858 * It is up to the caller to ensure that the cpu doesn't go offline.
4859 * The caller must not hold any locks which would prevent @fn from completing.
4860 *
4861 * Return: The value @fn returns.
4862 */
4863long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4864{
4865	struct work_for_cpu wfc = { .fn = fn, .arg = arg };
 
 
 
 
 
4866
4867	INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4868	schedule_work_on(cpu, &wfc.work);
4869	flush_work(&wfc.work);
4870	destroy_work_on_stack(&wfc.work);
 
 
4871	return wfc.ret;
4872}
4873EXPORT_SYMBOL_GPL(work_on_cpu);
4874#endif /* CONFIG_SMP */
4875
4876#ifdef CONFIG_FREEZER
4877
4878/**
4879 * freeze_workqueues_begin - begin freezing workqueues
4880 *
4881 * Start freezing workqueues.  After this function returns, all freezable
4882 * workqueues will queue new works to their delayed_works list instead of
4883 * pool->worklist.
4884 *
4885 * CONTEXT:
4886 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4887 */
4888void freeze_workqueues_begin(void)
4889{
4890	struct worker_pool *pool;
4891	struct workqueue_struct *wq;
4892	struct pool_workqueue *pwq;
4893	int pi;
4894
4895	mutex_lock(&wq_pool_mutex);
4896
4897	WARN_ON_ONCE(workqueue_freezing);
4898	workqueue_freezing = true;
4899
4900	/* set FREEZING */
4901	for_each_pool(pool, pi) {
4902		spin_lock_irq(&pool->lock);
4903		WARN_ON_ONCE(pool->flags & POOL_FREEZING);
4904		pool->flags |= POOL_FREEZING;
4905		spin_unlock_irq(&pool->lock);
4906	}
4907
4908	list_for_each_entry(wq, &workqueues, list) {
4909		mutex_lock(&wq->mutex);
4910		for_each_pwq(pwq, wq)
4911			pwq_adjust_max_active(pwq);
4912		mutex_unlock(&wq->mutex);
 
 
 
 
4913	}
4914
4915	mutex_unlock(&wq_pool_mutex);
4916}
4917
4918/**
4919 * freeze_workqueues_busy - are freezable workqueues still busy?
4920 *
4921 * Check whether freezing is complete.  This function must be called
4922 * between freeze_workqueues_begin() and thaw_workqueues().
4923 *
4924 * CONTEXT:
4925 * Grabs and releases wq_pool_mutex.
4926 *
4927 * Return:
4928 * %true if some freezable workqueues are still busy.  %false if freezing
4929 * is complete.
4930 */
4931bool freeze_workqueues_busy(void)
4932{
 
4933	bool busy = false;
4934	struct workqueue_struct *wq;
4935	struct pool_workqueue *pwq;
4936
4937	mutex_lock(&wq_pool_mutex);
4938
4939	WARN_ON_ONCE(!workqueue_freezing);
4940
4941	list_for_each_entry(wq, &workqueues, list) {
4942		if (!(wq->flags & WQ_FREEZABLE))
4943			continue;
4944		/*
4945		 * nr_active is monotonically decreasing.  It's safe
4946		 * to peek without lock.
4947		 */
4948		rcu_read_lock_sched();
4949		for_each_pwq(pwq, wq) {
4950			WARN_ON_ONCE(pwq->nr_active < 0);
4951			if (pwq->nr_active) {
 
 
 
 
4952				busy = true;
4953				rcu_read_unlock_sched();
4954				goto out_unlock;
4955			}
4956		}
4957		rcu_read_unlock_sched();
4958	}
4959out_unlock:
4960	mutex_unlock(&wq_pool_mutex);
4961	return busy;
4962}
4963
4964/**
4965 * thaw_workqueues - thaw workqueues
4966 *
4967 * Thaw workqueues.  Normal queueing is restored and all collected
4968 * frozen works are transferred to their respective pool worklists.
4969 *
4970 * CONTEXT:
4971 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4972 */
4973void thaw_workqueues(void)
4974{
4975	struct workqueue_struct *wq;
4976	struct pool_workqueue *pwq;
4977	struct worker_pool *pool;
4978	int pi;
4979
4980	mutex_lock(&wq_pool_mutex);
4981
4982	if (!workqueue_freezing)
4983		goto out_unlock;
4984
4985	/* clear FREEZING */
4986	for_each_pool(pool, pi) {
4987		spin_lock_irq(&pool->lock);
4988		WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
4989		pool->flags &= ~POOL_FREEZING;
4990		spin_unlock_irq(&pool->lock);
4991	}
4992
4993	/* restore max_active and repopulate worklist */
4994	list_for_each_entry(wq, &workqueues, list) {
4995		mutex_lock(&wq->mutex);
4996		for_each_pwq(pwq, wq)
4997			pwq_adjust_max_active(pwq);
4998		mutex_unlock(&wq->mutex);
4999	}
5000
5001	workqueue_freezing = false;
5002out_unlock:
5003	mutex_unlock(&wq_pool_mutex);
5004}
5005#endif /* CONFIG_FREEZER */
5006
5007static void __init wq_numa_init(void)
5008{
5009	cpumask_var_t *tbl;
5010	int node, cpu;
5011
5012	/* determine NUMA pwq table len - highest node id + 1 */
5013	for_each_node(node)
5014		wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1);
5015
5016	if (num_possible_nodes() <= 1)
5017		return;
5018
5019	if (wq_disable_numa) {
5020		pr_info("workqueue: NUMA affinity support disabled\n");
5021		return;
5022	}
5023
5024	wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
5025	BUG_ON(!wq_update_unbound_numa_attrs_buf);
 
 
5026
5027	/*
5028	 * We want masks of possible CPUs of each node which isn't readily
5029	 * available.  Build one from cpu_to_node() which should have been
5030	 * fully initialized by now.
5031	 */
5032	tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL);
5033	BUG_ON(!tbl);
5034
5035	for_each_node(node)
5036		BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5037				node_online(node) ? node : NUMA_NO_NODE));
5038
5039	for_each_possible_cpu(cpu) {
5040		node = cpu_to_node(cpu);
5041		if (WARN_ON(node == NUMA_NO_NODE)) {
5042			pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5043			/* happens iff arch is bonkers, let's just proceed */
5044			return;
5045		}
5046		cpumask_set_cpu(cpu, tbl[node]);
5047	}
5048
5049	wq_numa_possible_cpumask = tbl;
5050	wq_numa_enabled = true;
 
5051}
 
5052
5053static int __init init_workqueues(void)
5054{
5055	int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5056	int i, cpu;
5057
5058	WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5059
5060	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
 
5061
5062	cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
5063	hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
5064
5065	wq_numa_init();
5066
5067	/* initialize CPU pools */
5068	for_each_possible_cpu(cpu) {
5069		struct worker_pool *pool;
5070
5071		i = 0;
5072		for_each_cpu_worker_pool(pool, cpu) {
5073			BUG_ON(init_worker_pool(pool));
5074			pool->cpu = cpu;
5075			cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5076			pool->attrs->nice = std_nice[i++];
5077			pool->node = cpu_to_node(cpu);
5078
5079			/* alloc pool ID */
5080			mutex_lock(&wq_pool_mutex);
5081			BUG_ON(worker_pool_assign_id(pool));
5082			mutex_unlock(&wq_pool_mutex);
5083		}
5084	}
5085
5086	/* create the initial worker */
5087	for_each_online_cpu(cpu) {
5088		struct worker_pool *pool;
5089
5090		for_each_cpu_worker_pool(pool, cpu) {
5091			pool->flags &= ~POOL_DISASSOCIATED;
5092			BUG_ON(create_and_start_worker(pool) < 0);
5093		}
 
 
 
 
 
 
 
5094	}
5095
5096	/* create default unbound and ordered wq attrs */
5097	for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5098		struct workqueue_attrs *attrs;
5099
5100		BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5101		attrs->nice = std_nice[i];
5102		unbound_std_wq_attrs[i] = attrs;
5103
5104		/*
5105		 * An ordered wq should have only one pwq as ordering is
5106		 * guaranteed by max_active which is enforced by pwqs.
5107		 * Turn off NUMA so that dfl_pwq is used for all nodes.
5108		 */
5109		BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5110		attrs->nice = std_nice[i];
5111		attrs->no_numa = true;
5112		ordered_wq_attrs[i] = attrs;
5113	}
5114
5115	system_wq = alloc_workqueue("events", 0, 0);
5116	system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5117	system_long_wq = alloc_workqueue("events_long", 0, 0);
 
5118	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5119					    WQ_UNBOUND_MAX_ACTIVE);
5120	system_freezable_wq = alloc_workqueue("events_freezable",
5121					      WQ_FREEZABLE, 0);
5122	system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5123					      WQ_POWER_EFFICIENT, 0);
5124	system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5125					      WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5126					      0);
5127	BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5128	       !system_unbound_wq || !system_freezable_wq ||
5129	       !system_power_efficient_wq ||
5130	       !system_freezable_power_efficient_wq);
5131	return 0;
5132}
5133early_initcall(init_workqueues);
v3.1
   1/*
   2 * kernel/workqueue.c - generic async execution with shared worker pool
   3 *
   4 * Copyright (C) 2002		Ingo Molnar
   5 *
   6 *   Derived from the taskqueue/keventd code by:
   7 *     David Woodhouse <dwmw2@infradead.org>
   8 *     Andrew Morton
   9 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10 *     Theodore Ts'o <tytso@mit.edu>
  11 *
  12 * Made to use alloc_percpu by Christoph Lameter.
  13 *
  14 * Copyright (C) 2010		SUSE Linux Products GmbH
  15 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
  16 *
  17 * This is the generic async execution mechanism.  Work items as are
  18 * executed in process context.  The worker pool is shared and
  19 * automatically managed.  There is one worker pool for each CPU and
  20 * one extra for works which are better served by workers which are
  21 * not bound to any specific CPU.
 
  22 *
  23 * Please read Documentation/workqueue.txt for details.
  24 */
  25
  26#include <linux/module.h>
  27#include <linux/kernel.h>
  28#include <linux/sched.h>
  29#include <linux/init.h>
  30#include <linux/signal.h>
  31#include <linux/completion.h>
  32#include <linux/workqueue.h>
  33#include <linux/slab.h>
  34#include <linux/cpu.h>
  35#include <linux/notifier.h>
  36#include <linux/kthread.h>
  37#include <linux/hardirq.h>
  38#include <linux/mempolicy.h>
  39#include <linux/freezer.h>
  40#include <linux/kallsyms.h>
  41#include <linux/debug_locks.h>
  42#include <linux/lockdep.h>
  43#include <linux/idr.h>
 
 
 
 
 
 
  44
  45#include "workqueue_sched.h"
  46
  47enum {
  48	/* global_cwq flags */
  49	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
  50	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */
  51	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
  52	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
  53	GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */
 
 
 
 
 
 
 
 
 
 
 
 
 
  54
  55	/* worker flags */
  56	WORKER_STARTED		= 1 << 0,	/* started */
  57	WORKER_DIE		= 1 << 1,	/* die die die */
  58	WORKER_IDLE		= 1 << 2,	/* is idle */
  59	WORKER_PREP		= 1 << 3,	/* preparing to run works */
  60	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
  61	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
  62	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
  63	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
 
  64
  65	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
  66				  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
  67
  68	/* gcwq->trustee_state */
  69	TRUSTEE_START		= 0,		/* start */
  70	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
  71	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
  72	TRUSTEE_RELEASE		= 3,		/* release workers */
  73	TRUSTEE_DONE		= 4,		/* trustee is done */
  74
 
  75	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
  76	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
  77	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
  78
  79	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
  80	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
  81
  82	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  83						/* call for help after 10ms
  84						   (min two ticks) */
  85	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
  86	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
  87	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
  88
  89	/*
  90	 * Rescue workers are used only on emergencies and shared by
  91	 * all cpus.  Give -20.
  92	 */
  93	RESCUER_NICE_LEVEL	= -20,
 
 
 
  94};
  95
  96/*
  97 * Structure fields follow one of the following exclusion rules.
  98 *
  99 * I: Modifiable by initialization/destruction paths and read-only for
 100 *    everyone else.
 101 *
 102 * P: Preemption protected.  Disabling preemption is enough and should
 103 *    only be modified and accessed from the local cpu.
 104 *
 105 * L: gcwq->lock protected.  Access with gcwq->lock held.
 106 *
 107 * X: During normal operation, modification requires gcwq->lock and
 108 *    should be done only from local cpu.  Either disabling preemption
 109 *    on local cpu or grabbing gcwq->lock is enough for read access.
 110 *    If GCWQ_DISASSOCIATED is set, it's identical to L.
 111 *
 112 * F: wq->flush_mutex protected.
 
 113 *
 114 * W: workqueue_lock protected.
 
 
 
 
 
 
 
 
 115 */
 116
 117struct global_cwq;
 118
 119/*
 120 * The poor guys doing the actual heavy lifting.  All on-duty workers
 121 * are either serving the manager role, on idle list or on busy hash.
 122 */
 123struct worker {
 124	/* on idle list while idle, on busy hash table while busy */
 125	union {
 126		struct list_head	entry;	/* L: while idle */
 127		struct hlist_node	hentry;	/* L: while busy */
 128	};
 129
 130	struct work_struct	*current_work;	/* L: work being processed */
 131	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
 132	struct list_head	scheduled;	/* L: scheduled works */
 133	struct task_struct	*task;		/* I: worker task */
 134	struct global_cwq	*gcwq;		/* I: the associated gcwq */
 135	/* 64 bytes boundary on 64bit, 32 on 32bit */
 136	unsigned long		last_active;	/* L: last active timestamp */
 137	unsigned int		flags;		/* X: flags */
 138	int			id;		/* I: worker id */
 139	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
 140};
 141
 142/*
 143 * Global per-cpu workqueue.  There's one and only one for each cpu
 144 * and all works are queued and processed here regardless of their
 145 * target workqueues.
 146 */
 147struct global_cwq {
 148	spinlock_t		lock;		/* the gcwq lock */
 149	struct list_head	worklist;	/* L: list of pending works */
 150	unsigned int		cpu;		/* I: the associated cpu */
 151	unsigned int		flags;		/* L: GCWQ_* flags */
 152
 153	int			nr_workers;	/* L: total number of workers */
 154	int			nr_idle;	/* L: currently idle ones */
 155
 156	/* workers are chained either in the idle_list or busy_hash */
 157	struct list_head	idle_list;	/* X: list of idle workers */
 158	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
 
 
 
 
 159						/* L: hash of busy workers */
 160
 161	struct timer_list	idle_timer;	/* L: worker idle timeout */
 162	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */
 
 
 
 
 
 
 163
 164	struct ida		worker_ida;	/* L: for worker IDs */
 
 
 
 
 
 165
 166	struct task_struct	*trustee;	/* L: for gcwq shutdown */
 167	unsigned int		trustee_state;	/* L: trustee state */
 168	wait_queue_head_t	trustee_wait;	/* trustee wait */
 169	struct worker		*first_idle;	/* L: first idle worker */
 
 170} ____cacheline_aligned_in_smp;
 171
 172/*
 173 * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
 174 * work_struct->data are used for flags and thus cwqs need to be
 175 * aligned at two's power of the number of flag bits.
 
 176 */
 177struct cpu_workqueue_struct {
 178	struct global_cwq	*gcwq;		/* I: the associated gcwq */
 179	struct workqueue_struct *wq;		/* I: the owning workqueue */
 180	int			work_color;	/* L: current color */
 181	int			flush_color;	/* L: flushing color */
 
 182	int			nr_in_flight[WORK_NR_COLORS];
 183						/* L: nr of in_flight works */
 184	int			nr_active;	/* L: nr of active works */
 185	int			max_active;	/* L: max active works */
 186	struct list_head	delayed_works;	/* L: delayed works */
 187};
 
 
 
 
 
 
 
 
 
 
 
 188
 189/*
 190 * Structure used to wait for workqueue flush.
 191 */
 192struct wq_flusher {
 193	struct list_head	list;		/* F: list of flushers */
 194	int			flush_color;	/* F: flush color waiting for */
 195	struct completion	done;		/* flush completion */
 196};
 197
 198/*
 199 * All cpumasks are assumed to be always set on UP and thus can't be
 200 * used to determine whether there's something to be done.
 201 */
 202#ifdef CONFIG_SMP
 203typedef cpumask_var_t mayday_mask_t;
 204#define mayday_test_and_set_cpu(cpu, mask)	\
 205	cpumask_test_and_set_cpu((cpu), (mask))
 206#define mayday_clear_cpu(cpu, mask)		cpumask_clear_cpu((cpu), (mask))
 207#define for_each_mayday_cpu(cpu, mask)		for_each_cpu((cpu), (mask))
 208#define alloc_mayday_mask(maskp, gfp)		zalloc_cpumask_var((maskp), (gfp))
 209#define free_mayday_mask(mask)			free_cpumask_var((mask))
 210#else
 211typedef unsigned long mayday_mask_t;
 212#define mayday_test_and_set_cpu(cpu, mask)	test_and_set_bit(0, &(mask))
 213#define mayday_clear_cpu(cpu, mask)		clear_bit(0, &(mask))
 214#define for_each_mayday_cpu(cpu, mask)		if ((cpu) = 0, (mask))
 215#define alloc_mayday_mask(maskp, gfp)		true
 216#define free_mayday_mask(mask)			do { } while (0)
 217#endif
 218
 219/*
 220 * The externally visible workqueue abstraction is an array of
 221 * per-CPU workqueues:
 222 */
 223struct workqueue_struct {
 224	unsigned int		flags;		/* W: WQ_* flags */
 225	union {
 226		struct cpu_workqueue_struct __percpu	*pcpu;
 227		struct cpu_workqueue_struct		*single;
 228		unsigned long				v;
 229	} cpu_wq;				/* I: cwq's */
 230	struct list_head	list;		/* W: list of all workqueues */
 231
 232	struct mutex		flush_mutex;	/* protects wq flushing */
 233	int			work_color;	/* F: current work color */
 234	int			flush_color;	/* F: current flush color */
 235	atomic_t		nr_cwqs_to_flush; /* flush in progress */
 236	struct wq_flusher	*first_flusher;	/* F: first flusher */
 237	struct list_head	flusher_queue;	/* F: flush waiters */
 238	struct list_head	flusher_overflow; /* F: flush overflow list */
 239
 240	mayday_mask_t		mayday_mask;	/* cpus requesting rescue */
 
 
 
 
 
 
 
 
 241	struct worker		*rescuer;	/* I: rescue worker */
 242
 243	int			nr_drainers;	/* W: drain in progress */
 244	int			saved_max_active; /* W: saved cwq max_active */
 245	const char		*name;		/* I: workqueue name */
 
 
 
 
 
 
 246#ifdef CONFIG_LOCKDEP
 247	struct lockdep_map	lockdep_map;
 248#endif
 
 
 
 
 
 
 249};
 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251struct workqueue_struct *system_wq __read_mostly;
 
 
 
 252struct workqueue_struct *system_long_wq __read_mostly;
 253struct workqueue_struct *system_nrt_wq __read_mostly;
 254struct workqueue_struct *system_unbound_wq __read_mostly;
 
 255struct workqueue_struct *system_freezable_wq __read_mostly;
 256EXPORT_SYMBOL_GPL(system_wq);
 257EXPORT_SYMBOL_GPL(system_long_wq);
 258EXPORT_SYMBOL_GPL(system_nrt_wq);
 259EXPORT_SYMBOL_GPL(system_unbound_wq);
 260EXPORT_SYMBOL_GPL(system_freezable_wq);
 
 
 
 
 
 
 
 
 261
 262#define CREATE_TRACE_POINTS
 263#include <trace/events/workqueue.h>
 264
 265#define for_each_busy_worker(worker, i, pos, gcwq)			\
 266	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
 267		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
 
 
 
 
 
 
 268
 269static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
 270				  unsigned int sw)
 271{
 272	if (cpu < nr_cpu_ids) {
 273		if (sw & 1) {
 274			cpu = cpumask_next(cpu, mask);
 275			if (cpu < nr_cpu_ids)
 276				return cpu;
 277		}
 278		if (sw & 2)
 279			return WORK_CPU_UNBOUND;
 280	}
 281	return WORK_CPU_NONE;
 282}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 283
 284static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
 285				struct workqueue_struct *wq)
 286{
 287	return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
 288}
 
 
 
 
 
 
 
 
 
 
 289
 290/*
 291 * CPU iterators
 292 *
 293 * An extra gcwq is defined for an invalid cpu number
 294 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
 295 * specific CPU.  The following iterators are similar to
 296 * for_each_*_cpu() iterators but also considers the unbound gcwq.
 297 *
 298 * for_each_gcwq_cpu()		: possible CPUs + WORK_CPU_UNBOUND
 299 * for_each_online_gcwq_cpu()	: online CPUs + WORK_CPU_UNBOUND
 300 * for_each_cwq_cpu()		: possible CPUs for bound workqueues,
 301 *				  WORK_CPU_UNBOUND for unbound workqueues
 302 */
 303#define for_each_gcwq_cpu(cpu)						\
 304	for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\
 305	     (cpu) < WORK_CPU_NONE;					\
 306	     (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
 307
 308#define for_each_online_gcwq_cpu(cpu)					\
 309	for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\
 310	     (cpu) < WORK_CPU_NONE;					\
 311	     (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
 312
 313#define for_each_cwq_cpu(cpu, wq)					\
 314	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\
 315	     (cpu) < WORK_CPU_NONE;					\
 316	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
 317
 318#ifdef CONFIG_DEBUG_OBJECTS_WORK
 319
 320static struct debug_obj_descr work_debug_descr;
 321
 322static void *work_debug_hint(void *addr)
 323{
 324	return ((struct work_struct *) addr)->func;
 325}
 326
 327/*
 328 * fixup_init is called when:
 329 * - an active object is initialized
 330 */
 331static int work_fixup_init(void *addr, enum debug_obj_state state)
 332{
 333	struct work_struct *work = addr;
 334
 335	switch (state) {
 336	case ODEBUG_STATE_ACTIVE:
 337		cancel_work_sync(work);
 338		debug_object_init(work, &work_debug_descr);
 339		return 1;
 340	default:
 341		return 0;
 342	}
 343}
 344
 345/*
 346 * fixup_activate is called when:
 347 * - an active object is activated
 348 * - an unknown object is activated (might be a statically initialized object)
 349 */
 350static int work_fixup_activate(void *addr, enum debug_obj_state state)
 351{
 352	struct work_struct *work = addr;
 353
 354	switch (state) {
 355
 356	case ODEBUG_STATE_NOTAVAILABLE:
 357		/*
 358		 * This is not really a fixup. The work struct was
 359		 * statically initialized. We just make sure that it
 360		 * is tracked in the object tracker.
 361		 */
 362		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
 363			debug_object_init(work, &work_debug_descr);
 364			debug_object_activate(work, &work_debug_descr);
 365			return 0;
 366		}
 367		WARN_ON_ONCE(1);
 368		return 0;
 369
 370	case ODEBUG_STATE_ACTIVE:
 371		WARN_ON(1);
 372
 373	default:
 374		return 0;
 375	}
 376}
 377
 378/*
 379 * fixup_free is called when:
 380 * - an active object is freed
 381 */
 382static int work_fixup_free(void *addr, enum debug_obj_state state)
 383{
 384	struct work_struct *work = addr;
 385
 386	switch (state) {
 387	case ODEBUG_STATE_ACTIVE:
 388		cancel_work_sync(work);
 389		debug_object_free(work, &work_debug_descr);
 390		return 1;
 391	default:
 392		return 0;
 393	}
 394}
 395
 396static struct debug_obj_descr work_debug_descr = {
 397	.name		= "work_struct",
 398	.debug_hint	= work_debug_hint,
 399	.fixup_init	= work_fixup_init,
 400	.fixup_activate	= work_fixup_activate,
 401	.fixup_free	= work_fixup_free,
 402};
 403
 404static inline void debug_work_activate(struct work_struct *work)
 405{
 406	debug_object_activate(work, &work_debug_descr);
 407}
 408
 409static inline void debug_work_deactivate(struct work_struct *work)
 410{
 411	debug_object_deactivate(work, &work_debug_descr);
 412}
 413
 414void __init_work(struct work_struct *work, int onstack)
 415{
 416	if (onstack)
 417		debug_object_init_on_stack(work, &work_debug_descr);
 418	else
 419		debug_object_init(work, &work_debug_descr);
 420}
 421EXPORT_SYMBOL_GPL(__init_work);
 422
 423void destroy_work_on_stack(struct work_struct *work)
 424{
 425	debug_object_free(work, &work_debug_descr);
 426}
 427EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 428
 
 
 
 
 
 
 
 429#else
 430static inline void debug_work_activate(struct work_struct *work) { }
 431static inline void debug_work_deactivate(struct work_struct *work) { }
 432#endif
 433
 434/* Serializes the accesses to the list of workqueues. */
 435static DEFINE_SPINLOCK(workqueue_lock);
 436static LIST_HEAD(workqueues);
 437static bool workqueue_freezing;		/* W: have wqs started freezing? */
 438
 439/*
 440 * The almighty global cpu workqueues.  nr_running is the only field
 441 * which is expected to be used frequently by other cpus via
 442 * try_to_wake_up().  Put it in a separate cacheline.
 443 */
 444static DEFINE_PER_CPU(struct global_cwq, global_cwq);
 445static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
 
 446
 447/*
 448 * Global cpu workqueue and nr_running counter for unbound gcwq.  The
 449 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
 450 * workers have WORKER_UNBOUND set.
 451 */
 452static struct global_cwq unbound_global_cwq;
 453static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);	/* always 0 */
 454
 455static int worker_thread(void *__worker);
 456
 457static struct global_cwq *get_gcwq(unsigned int cpu)
 458{
 459	if (cpu != WORK_CPU_UNBOUND)
 460		return &per_cpu(global_cwq, cpu);
 461	else
 462		return &unbound_global_cwq;
 463}
 464
 465static atomic_t *get_gcwq_nr_running(unsigned int cpu)
 
 
 
 
 
 
 
 
 
 
 
 
 466{
 467	if (cpu != WORK_CPU_UNBOUND)
 468		return &per_cpu(gcwq_nr_running, cpu);
 469	else
 470		return &unbound_gcwq_nr_running;
 471}
 472
 473static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
 474					    struct workqueue_struct *wq)
 475{
 476	if (!(wq->flags & WQ_UNBOUND)) {
 477		if (likely(cpu < nr_cpu_ids)) {
 478#ifdef CONFIG_SMP
 479			return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
 480#else
 481			return wq->cpu_wq.single;
 482#endif
 483		}
 484	} else if (likely(cpu == WORK_CPU_UNBOUND))
 485		return wq->cpu_wq.single;
 486	return NULL;
 487}
 488
 489static unsigned int work_color_to_flags(int color)
 490{
 491	return color << WORK_STRUCT_COLOR_SHIFT;
 492}
 493
 494static int get_work_color(struct work_struct *work)
 495{
 496	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 497		((1 << WORK_STRUCT_COLOR_BITS) - 1);
 498}
 499
 500static int work_next_color(int color)
 501{
 502	return (color + 1) % WORK_NR_COLORS;
 503}
 504
 505/*
 506 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
 507 * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
 508 * cleared and the work data contains the cpu number it was last on.
 509 *
 510 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
 511 * cwq, cpu or clear work->data.  These functions should only be
 512 * called while the work is owned - ie. while the PENDING bit is set.
 513 *
 514 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
 515 * corresponding to a work.  gcwq is available once the work has been
 516 * queued anywhere after initialization.  cwq is available only from
 517 * queueing until execution starts.
 
 
 
 
 
 
 518 */
 519static inline void set_work_data(struct work_struct *work, unsigned long data,
 520				 unsigned long flags)
 521{
 522	BUG_ON(!work_pending(work));
 523	atomic_long_set(&work->data, data | flags | work_static(work));
 524}
 525
 526static void set_work_cwq(struct work_struct *work,
 527			 struct cpu_workqueue_struct *cwq,
 528			 unsigned long extra_flags)
 529{
 530	set_work_data(work, (unsigned long)cwq,
 531		      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
 532}
 533
 534static void set_work_cpu(struct work_struct *work, unsigned int cpu)
 
 535{
 536	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 537}
 538
 539static void clear_work_data(struct work_struct *work)
 540{
 541	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
 
 542}
 543
 544static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
 545{
 546	unsigned long data = atomic_long_read(&work->data);
 547
 548	if (data & WORK_STRUCT_CWQ)
 549		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 550	else
 551		return NULL;
 552}
 553
 554static struct global_cwq *get_work_gcwq(struct work_struct *work)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555{
 556	unsigned long data = atomic_long_read(&work->data);
 557	unsigned int cpu;
 
 
 558
 559	if (data & WORK_STRUCT_CWQ)
 560		return ((struct cpu_workqueue_struct *)
 561			(data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
 562
 563	cpu = data >> WORK_STRUCT_FLAG_BITS;
 564	if (cpu == WORK_CPU_NONE)
 565		return NULL;
 566
 567	BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
 568	return get_gcwq(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569}
 570
 571/*
 572 * Policy functions.  These define the policies on how the global
 573 * worker pool is managed.  Unless noted otherwise, these functions
 574 * assume that they're being called with gcwq->lock held.
 575 */
 576
 577static bool __need_more_worker(struct global_cwq *gcwq)
 578{
 579	return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
 580		gcwq->flags & GCWQ_HIGHPRI_PENDING;
 581}
 582
 583/*
 584 * Need to wake up a worker?  Called from anything but currently
 585 * running workers.
 
 
 
 
 586 */
 587static bool need_more_worker(struct global_cwq *gcwq)
 588{
 589	return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
 590}
 591
 592/* Can I start working?  Called from busy but !running workers. */
 593static bool may_start_working(struct global_cwq *gcwq)
 594{
 595	return gcwq->nr_idle;
 596}
 597
 598/* Do I need to keep working?  Called from currently running workers. */
 599static bool keep_working(struct global_cwq *gcwq)
 600{
 601	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
 602
 603	return !list_empty(&gcwq->worklist) &&
 604		(atomic_read(nr_running) <= 1 ||
 605		 gcwq->flags & GCWQ_HIGHPRI_PENDING);
 606}
 607
 608/* Do we need a new worker?  Called from manager. */
 609static bool need_to_create_worker(struct global_cwq *gcwq)
 610{
 611	return need_more_worker(gcwq) && !may_start_working(gcwq);
 612}
 613
 614/* Do I need to be the manager? */
 615static bool need_to_manage_workers(struct global_cwq *gcwq)
 616{
 617	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
 
 618}
 619
 620/* Do we have too many workers and should some go away? */
 621static bool too_many_workers(struct global_cwq *gcwq)
 622{
 623	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
 624	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
 625	int nr_busy = gcwq->nr_workers - nr_idle;
 
 
 
 
 
 
 
 626
 627	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 628}
 629
 630/*
 631 * Wake up functions.
 632 */
 633
 634/* Return the first worker.  Safe with preemption disabled */
 635static struct worker *first_worker(struct global_cwq *gcwq)
 636{
 637	if (unlikely(list_empty(&gcwq->idle_list)))
 638		return NULL;
 639
 640	return list_first_entry(&gcwq->idle_list, struct worker, entry);
 641}
 642
 643/**
 644 * wake_up_worker - wake up an idle worker
 645 * @gcwq: gcwq to wake worker for
 646 *
 647 * Wake up the first idle worker of @gcwq.
 648 *
 649 * CONTEXT:
 650 * spin_lock_irq(gcwq->lock).
 651 */
 652static void wake_up_worker(struct global_cwq *gcwq)
 653{
 654	struct worker *worker = first_worker(gcwq);
 655
 656	if (likely(worker))
 657		wake_up_process(worker->task);
 658}
 659
 660/**
 661 * wq_worker_waking_up - a worker is waking up
 662 * @task: task waking up
 663 * @cpu: CPU @task is waking up to
 664 *
 665 * This function is called during try_to_wake_up() when a worker is
 666 * being awoken.
 667 *
 668 * CONTEXT:
 669 * spin_lock_irq(rq->lock)
 670 */
 671void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
 672{
 673	struct worker *worker = kthread_data(task);
 674
 675	if (!(worker->flags & WORKER_NOT_RUNNING))
 676		atomic_inc(get_gcwq_nr_running(cpu));
 
 
 677}
 678
 679/**
 680 * wq_worker_sleeping - a worker is going to sleep
 681 * @task: task going to sleep
 682 * @cpu: CPU in question, must be the current CPU number
 683 *
 684 * This function is called during schedule() when a busy worker is
 685 * going to sleep.  Worker on the same cpu can be woken up by
 686 * returning pointer to its task.
 687 *
 688 * CONTEXT:
 689 * spin_lock_irq(rq->lock)
 690 *
 691 * RETURNS:
 692 * Worker task on @cpu to wake up, %NULL if none.
 693 */
 694struct task_struct *wq_worker_sleeping(struct task_struct *task,
 695				       unsigned int cpu)
 696{
 697	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
 698	struct global_cwq *gcwq = get_gcwq(cpu);
 699	atomic_t *nr_running = get_gcwq_nr_running(cpu);
 700
 
 
 
 
 
 701	if (worker->flags & WORKER_NOT_RUNNING)
 702		return NULL;
 703
 
 
 704	/* this can only happen on the local cpu */
 705	BUG_ON(cpu != raw_smp_processor_id());
 
 706
 707	/*
 708	 * The counterpart of the following dec_and_test, implied mb,
 709	 * worklist not empty test sequence is in insert_work().
 710	 * Please read comment there.
 711	 *
 712	 * NOT_RUNNING is clear.  This means that trustee is not in
 713	 * charge and we're running on the local cpu w/ rq lock held
 714	 * and preemption disabled, which in turn means that none else
 715	 * could be manipulating idle_list, so dereferencing idle_list
 716	 * without gcwq lock is safe.
 717	 */
 718	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
 719		to_wakeup = first_worker(gcwq);
 
 720	return to_wakeup ? to_wakeup->task : NULL;
 721}
 722
 723/**
 724 * worker_set_flags - set worker flags and adjust nr_running accordingly
 725 * @worker: self
 726 * @flags: flags to set
 727 * @wakeup: wakeup an idle worker if necessary
 728 *
 729 * Set @flags in @worker->flags and adjust nr_running accordingly.  If
 730 * nr_running becomes zero and @wakeup is %true, an idle worker is
 731 * woken up.
 732 *
 733 * CONTEXT:
 734 * spin_lock_irq(gcwq->lock)
 735 */
 736static inline void worker_set_flags(struct worker *worker, unsigned int flags,
 737				    bool wakeup)
 738{
 739	struct global_cwq *gcwq = worker->gcwq;
 740
 741	WARN_ON_ONCE(worker->task != current);
 742
 743	/*
 744	 * If transitioning into NOT_RUNNING, adjust nr_running and
 745	 * wake up an idle worker as necessary if requested by
 746	 * @wakeup.
 747	 */
 748	if ((flags & WORKER_NOT_RUNNING) &&
 749	    !(worker->flags & WORKER_NOT_RUNNING)) {
 750		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
 751
 752		if (wakeup) {
 753			if (atomic_dec_and_test(nr_running) &&
 754			    !list_empty(&gcwq->worklist))
 755				wake_up_worker(gcwq);
 756		} else
 757			atomic_dec(nr_running);
 758	}
 759
 760	worker->flags |= flags;
 761}
 762
 763/**
 764 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 765 * @worker: self
 766 * @flags: flags to clear
 767 *
 768 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 769 *
 770 * CONTEXT:
 771 * spin_lock_irq(gcwq->lock)
 772 */
 773static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 774{
 775	struct global_cwq *gcwq = worker->gcwq;
 776	unsigned int oflags = worker->flags;
 777
 778	WARN_ON_ONCE(worker->task != current);
 779
 780	worker->flags &= ~flags;
 781
 782	/*
 783	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 784	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 785	 * of multiple flags, not a single flag.
 786	 */
 787	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 788		if (!(worker->flags & WORKER_NOT_RUNNING))
 789			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
 790}
 791
 792/**
 793 * busy_worker_head - return the busy hash head for a work
 794 * @gcwq: gcwq of interest
 795 * @work: work to be hashed
 796 *
 797 * Return hash head of @gcwq for @work.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798 *
 799 * CONTEXT:
 800 * spin_lock_irq(gcwq->lock).
 801 *
 802 * RETURNS:
 803 * Pointer to the hash head.
 
 804 */
 805static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
 806					   struct work_struct *work)
 807{
 808	const int base_shift = ilog2(sizeof(struct work_struct));
 809	unsigned long v = (unsigned long)work;
 810
 811	/* simple shift and fold hash, do we need something better? */
 812	v >>= base_shift;
 813	v += v >> BUSY_WORKER_HASH_ORDER;
 814	v &= BUSY_WORKER_HASH_MASK;
 
 815
 816	return &gcwq->busy_hash[v];
 817}
 818
 819/**
 820 * __find_worker_executing_work - find worker which is executing a work
 821 * @gcwq: gcwq of interest
 822 * @bwh: hash head as returned by busy_worker_head()
 823 * @work: work to find worker for
 
 
 
 
 824 *
 825 * Find a worker which is executing @work on @gcwq.  @bwh should be
 826 * the hash head obtained by calling busy_worker_head() with the same
 827 * work.
 828 *
 829 * CONTEXT:
 830 * spin_lock_irq(gcwq->lock).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 831 *
 832 * RETURNS:
 833 * Pointer to worker which is executing @work if found, NULL
 834 * otherwise.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835 */
 836static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
 837						   struct hlist_head *bwh,
 838						   struct work_struct *work)
 839{
 840	struct worker *worker;
 841	struct hlist_node *tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 842
 843	hlist_for_each_entry(worker, tmp, bwh, hentry)
 844		if (worker->current_work == work)
 845			return worker;
 846	return NULL;
 847}
 848
 849/**
 850 * find_worker_executing_work - find worker which is executing a work
 851 * @gcwq: gcwq of interest
 852 * @work: work to find worker for
 853 *
 854 * Find a worker which is executing @work on @gcwq.  This function is
 855 * identical to __find_worker_executing_work() except that this
 856 * function calculates @bwh itself.
 857 *
 858 * CONTEXT:
 859 * spin_lock_irq(gcwq->lock).
 860 *
 861 * RETURNS:
 862 * Pointer to worker which is executing @work if found, NULL
 863 * otherwise.
 864 */
 865static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
 866						 struct work_struct *work)
 867{
 868	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
 869					    work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870}
 871
 872/**
 873 * gcwq_determine_ins_pos - find insertion position
 874 * @gcwq: gcwq of interest
 875 * @cwq: cwq a work is being queued for
 
 
 
 
 
 
 
 
 
 
 
 876 *
 877 * A work for @cwq is about to be queued on @gcwq, determine insertion
 878 * position for the work.  If @cwq is for HIGHPRI wq, the work is
 879 * queued at the head of the queue but in FIFO order with respect to
 880 * other HIGHPRI works; otherwise, at the end of the queue.  This
 881 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
 882 * there are HIGHPRI works pending.
 883 *
 884 * CONTEXT:
 885 * spin_lock_irq(gcwq->lock).
 886 *
 887 * RETURNS:
 888 * Pointer to inserstion position.
 889 */
 890static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
 891					       struct cpu_workqueue_struct *cwq)
 892{
 893	struct work_struct *twork;
 
 894
 895	if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
 896		return &gcwq->worklist;
 897
 898	list_for_each_entry(twork, &gcwq->worklist, entry) {
 899		struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
 
 900
 901		if (!(tcwq->wq->flags & WQ_HIGHPRI))
 902			break;
 
 
 
 
 
 903	}
 904
 905	gcwq->flags |= GCWQ_HIGHPRI_PENDING;
 906	return &twork->entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 907}
 908
 909/**
 910 * insert_work - insert a work into gcwq
 911 * @cwq: cwq @work belongs to
 912 * @work: work to insert
 913 * @head: insertion point
 914 * @extra_flags: extra WORK_STRUCT_* flags to set
 915 *
 916 * Insert @work which belongs to @cwq into @gcwq after @head.
 917 * @extra_flags is or'd to work_struct flags.
 918 *
 919 * CONTEXT:
 920 * spin_lock_irq(gcwq->lock).
 921 */
 922static void insert_work(struct cpu_workqueue_struct *cwq,
 923			struct work_struct *work, struct list_head *head,
 924			unsigned int extra_flags)
 925{
 926	struct global_cwq *gcwq = cwq->gcwq;
 927
 928	/* we own @work, set data and link */
 929	set_work_cwq(work, cwq, extra_flags);
 930
 931	/*
 932	 * Ensure that we get the right work->data if we see the
 933	 * result of list_add() below, see try_to_grab_pending().
 934	 */
 935	smp_wmb();
 936
 937	list_add_tail(&work->entry, head);
 
 938
 939	/*
 940	 * Ensure either worker_sched_deactivated() sees the above
 941	 * list_add_tail() or we see zero nr_running to avoid workers
 942	 * lying around lazily while there are works to be processed.
 943	 */
 944	smp_mb();
 945
 946	if (__need_more_worker(gcwq))
 947		wake_up_worker(gcwq);
 948}
 949
 950/*
 951 * Test whether @work is being queued from another work executing on the
 952 * same workqueue.  This is rather expensive and should only be used from
 953 * cold paths.
 954 */
 955static bool is_chained_work(struct workqueue_struct *wq)
 956{
 957	unsigned long flags;
 958	unsigned int cpu;
 959
 960	for_each_gcwq_cpu(cpu) {
 961		struct global_cwq *gcwq = get_gcwq(cpu);
 962		struct worker *worker;
 963		struct hlist_node *pos;
 964		int i;
 965
 966		spin_lock_irqsave(&gcwq->lock, flags);
 967		for_each_busy_worker(worker, i, pos, gcwq) {
 968			if (worker->task != current)
 969				continue;
 970			spin_unlock_irqrestore(&gcwq->lock, flags);
 971			/*
 972			 * I'm @worker, no locking necessary.  See if @work
 973			 * is headed to the same workqueue.
 974			 */
 975			return worker->current_cwq->wq == wq;
 976		}
 977		spin_unlock_irqrestore(&gcwq->lock, flags);
 978	}
 979	return false;
 980}
 981
 982static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 983			 struct work_struct *work)
 984{
 985	struct global_cwq *gcwq;
 986	struct cpu_workqueue_struct *cwq;
 987	struct list_head *worklist;
 988	unsigned int work_flags;
 989	unsigned long flags;
 
 
 
 
 
 
 
 
 990
 991	debug_work_activate(work);
 992
 993	/* if dying, only works from the same workqueue are allowed */
 994	if (unlikely(wq->flags & WQ_DRAINING) &&
 995	    WARN_ON_ONCE(!is_chained_work(wq)))
 996		return;
 
 
 
 
 
 
 
 
 
 997
 998	/* determine gcwq to use */
 999	if (!(wq->flags & WQ_UNBOUND)) {
1000		struct global_cwq *last_gcwq;
 
 
 
 
 
1001
1002		if (unlikely(cpu == WORK_CPU_UNBOUND))
1003			cpu = raw_smp_processor_id();
1004
1005		/*
1006		 * It's multi cpu.  If @wq is non-reentrant and @work
1007		 * was previously on a different cpu, it might still
1008		 * be running there, in which case the work needs to
1009		 * be queued on that cpu to guarantee non-reentrance.
1010		 */
1011		gcwq = get_gcwq(cpu);
1012		if (wq->flags & WQ_NON_REENTRANT &&
1013		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1014			struct worker *worker;
1015
1016			spin_lock_irqsave(&last_gcwq->lock, flags);
 
 
 
 
 
 
 
 
 
1017
1018			worker = find_worker_executing_work(last_gcwq, work);
1019
1020			if (worker && worker->current_cwq->wq == wq)
1021				gcwq = last_gcwq;
1022			else {
1023				/* meh... not running there, queue here */
1024				spin_unlock_irqrestore(&last_gcwq->lock, flags);
1025				spin_lock_irqsave(&gcwq->lock, flags);
1026			}
1027		} else
1028			spin_lock_irqsave(&gcwq->lock, flags);
1029	} else {
1030		gcwq = get_gcwq(WORK_CPU_UNBOUND);
1031		spin_lock_irqsave(&gcwq->lock, flags);
 
 
 
1032	}
1033
1034	/* gcwq determined, get cwq and queue */
1035	cwq = get_cwq(gcwq->cpu, wq);
1036	trace_workqueue_queue_work(cpu, cwq, work);
1037
1038	BUG_ON(!list_empty(&work->entry));
 
 
 
1039
1040	cwq->nr_in_flight[cwq->work_color]++;
1041	work_flags = work_color_to_flags(cwq->work_color);
1042
1043	if (likely(cwq->nr_active < cwq->max_active)) {
1044		trace_workqueue_activate_work(work);
1045		cwq->nr_active++;
1046		worklist = gcwq_determine_ins_pos(gcwq, cwq);
1047	} else {
1048		work_flags |= WORK_STRUCT_DELAYED;
1049		worklist = &cwq->delayed_works;
1050	}
1051
1052	insert_work(cwq, work, worklist, work_flags);
1053
1054	spin_unlock_irqrestore(&gcwq->lock, flags);
1055}
1056
1057/**
1058 * queue_work - queue work on a workqueue
1059 * @wq: workqueue to use
1060 * @work: work to queue
1061 *
1062 * Returns 0 if @work was already on a queue, non-zero otherwise.
1063 *
1064 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1065 * it can be processed by another CPU.
1066 */
1067int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1068{
1069	int ret;
1070
1071	ret = queue_work_on(get_cpu(), wq, work);
1072	put_cpu();
1073
1074	return ret;
1075}
1076EXPORT_SYMBOL_GPL(queue_work);
1077
1078/**
1079 * queue_work_on - queue work on specific cpu
1080 * @cpu: CPU number to execute work on
1081 * @wq: workqueue to use
1082 * @work: work to queue
1083 *
1084 * Returns 0 if @work was already on a queue, non-zero otherwise.
1085 *
1086 * We queue the work to a specific CPU, the caller must ensure it
1087 * can't go away.
 
 
1088 */
1089int
1090queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1091{
1092	int ret = 0;
 
 
 
1093
1094	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1095		__queue_work(cpu, wq, work);
1096		ret = 1;
1097	}
 
 
1098	return ret;
1099}
1100EXPORT_SYMBOL_GPL(queue_work_on);
1101
1102static void delayed_work_timer_fn(unsigned long __data)
1103{
1104	struct delayed_work *dwork = (struct delayed_work *)__data;
1105	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1106
1107	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
 
1108}
 
1109
1110/**
1111 * queue_delayed_work - queue work on a workqueue after delay
1112 * @wq: workqueue to use
1113 * @dwork: delayable work to queue
1114 * @delay: number of jiffies to wait before queueing
1115 *
1116 * Returns 0 if @work was already on a queue, non-zero otherwise.
1117 */
1118int queue_delayed_work(struct workqueue_struct *wq,
1119			struct delayed_work *dwork, unsigned long delay)
1120{
1121	if (delay == 0)
1122		return queue_work(wq, &dwork->work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1123
1124	return queue_delayed_work_on(-1, wq, dwork, delay);
 
 
 
 
 
 
 
1125}
1126EXPORT_SYMBOL_GPL(queue_delayed_work);
1127
1128/**
1129 * queue_delayed_work_on - queue work on specific CPU after delay
1130 * @cpu: CPU number to execute work on
1131 * @wq: workqueue to use
1132 * @dwork: work to queue
1133 * @delay: number of jiffies to wait before queueing
1134 *
1135 * Returns 0 if @work was already on a queue, non-zero otherwise.
 
 
1136 */
1137int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1138			struct delayed_work *dwork, unsigned long delay)
1139{
1140	int ret = 0;
1141	struct timer_list *timer = &dwork->timer;
1142	struct work_struct *work = &dwork->work;
 
 
 
 
 
1143
1144	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1145		unsigned int lcpu;
 
 
1146
1147		BUG_ON(timer_pending(timer));
1148		BUG_ON(!list_empty(&work->entry));
 
 
1149
1150		timer_stats_timer_set_start_info(&dwork->timer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151
1152		/*
1153		 * This stores cwq for the moment, for the timer_fn.
1154		 * Note that the work's gcwq is preserved to allow
1155		 * reentrance detection for delayed works.
1156		 */
1157		if (!(wq->flags & WQ_UNBOUND)) {
1158			struct global_cwq *gcwq = get_work_gcwq(work);
1159
1160			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1161				lcpu = gcwq->cpu;
1162			else
1163				lcpu = raw_smp_processor_id();
1164		} else
1165			lcpu = WORK_CPU_UNBOUND;
1166
1167		set_work_cwq(work, get_cwq(lcpu, wq), 0);
1168
1169		timer->expires = jiffies + delay;
1170		timer->data = (unsigned long)dwork;
1171		timer->function = delayed_work_timer_fn;
1172
1173		if (unlikely(cpu >= 0))
1174			add_timer_on(timer, cpu);
1175		else
1176			add_timer(timer);
1177		ret = 1;
1178	}
1179	return ret;
1180}
1181EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1182
1183/**
1184 * worker_enter_idle - enter idle state
1185 * @worker: worker which is entering idle state
1186 *
1187 * @worker is entering idle state.  Update stats and idle timer if
1188 * necessary.
1189 *
1190 * LOCKING:
1191 * spin_lock_irq(gcwq->lock).
1192 */
1193static void worker_enter_idle(struct worker *worker)
1194{
1195	struct global_cwq *gcwq = worker->gcwq;
1196
1197	BUG_ON(worker->flags & WORKER_IDLE);
1198	BUG_ON(!list_empty(&worker->entry) &&
1199	       (worker->hentry.next || worker->hentry.pprev));
 
1200
1201	/* can't use worker_set_flags(), also called from start_worker() */
1202	worker->flags |= WORKER_IDLE;
1203	gcwq->nr_idle++;
1204	worker->last_active = jiffies;
1205
1206	/* idle_list is LIFO */
1207	list_add(&worker->entry, &gcwq->idle_list);
1208
1209	if (likely(!(worker->flags & WORKER_ROGUE))) {
1210		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1211			mod_timer(&gcwq->idle_timer,
1212				  jiffies + IDLE_WORKER_TIMEOUT);
1213	} else
1214		wake_up_all(&gcwq->trustee_wait);
1215
1216	/* sanity check nr_running */
1217	WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1218		     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
 
 
1219}
1220
1221/**
1222 * worker_leave_idle - leave idle state
1223 * @worker: worker which is leaving idle state
1224 *
1225 * @worker is leaving idle state.  Update stats.
1226 *
1227 * LOCKING:
1228 * spin_lock_irq(gcwq->lock).
1229 */
1230static void worker_leave_idle(struct worker *worker)
1231{
1232	struct global_cwq *gcwq = worker->gcwq;
1233
1234	BUG_ON(!(worker->flags & WORKER_IDLE));
 
1235	worker_clr_flags(worker, WORKER_IDLE);
1236	gcwq->nr_idle--;
1237	list_del_init(&worker->entry);
1238}
1239
1240/**
1241 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1242 * @worker: self
 
 
1243 *
1244 * Works which are scheduled while the cpu is online must at least be
1245 * scheduled to a worker which is bound to the cpu so that if they are
1246 * flushed from cpu callbacks while cpu is going down, they are
1247 * guaranteed to execute on the cpu.
1248 *
1249 * This function is to be used by rogue workers and rescuers to bind
1250 * themselves to the target cpu and may race with cpu going down or
1251 * coming online.  kthread_bind() can't be used because it may put the
1252 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1253 * verbatim as it's best effort and blocking and gcwq may be
1254 * [dis]associated in the meantime.
1255 *
1256 * This function tries set_cpus_allowed() and locks gcwq and verifies
1257 * the binding against GCWQ_DISASSOCIATED which is set during
1258 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1259 * idle state or fetches works without dropping lock, it can guarantee
1260 * the scheduling requirement described in the first paragraph.
1261 *
1262 * CONTEXT:
1263 * Might sleep.  Called without any lock but returns with gcwq->lock
1264 * held.
1265 *
1266 * RETURNS:
1267 * %true if the associated gcwq is online (@worker is successfully
1268 * bound), %false if offline.
1269 */
1270static bool worker_maybe_bind_and_lock(struct worker *worker)
1271__acquires(&gcwq->lock)
1272{
1273	struct global_cwq *gcwq = worker->gcwq;
1274	struct task_struct *task = worker->task;
1275
1276	while (true) {
1277		/*
1278		 * The following call may fail, succeed or succeed
1279		 * without actually migrating the task to the cpu if
1280		 * it races with cpu hotunplug operation.  Verify
1281		 * against GCWQ_DISASSOCIATED.
1282		 */
1283		if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1284			set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1285
1286		spin_lock_irq(&gcwq->lock);
1287		if (gcwq->flags & GCWQ_DISASSOCIATED)
1288			return false;
1289		if (task_cpu(task) == gcwq->cpu &&
1290		    cpumask_equal(&current->cpus_allowed,
1291				  get_cpu_mask(gcwq->cpu)))
1292			return true;
1293		spin_unlock_irq(&gcwq->lock);
1294
1295		/*
1296		 * We've raced with CPU hot[un]plug.  Give it a breather
1297		 * and retry migration.  cond_resched() is required here;
1298		 * otherwise, we might deadlock against cpu_stop trying to
1299		 * bring down the CPU on non-preemptive kernel.
1300		 */
1301		cpu_relax();
1302		cond_resched();
1303	}
1304}
1305
1306/*
1307 * Function for worker->rebind_work used to rebind rogue busy workers
1308 * to the associated cpu which is coming back online.  This is
1309 * scheduled by cpu up but can race with other cpu hotplug operations
1310 * and may be executed twice without intervening cpu down.
1311 */
1312static void worker_rebind_fn(struct work_struct *work)
1313{
1314	struct worker *worker = container_of(work, struct worker, rebind_work);
1315	struct global_cwq *gcwq = worker->gcwq;
1316
1317	if (worker_maybe_bind_and_lock(worker))
1318		worker_clr_flags(worker, WORKER_REBIND);
1319
1320	spin_unlock_irq(&gcwq->lock);
1321}
1322
1323static struct worker *alloc_worker(void)
1324{
1325	struct worker *worker;
1326
1327	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1328	if (worker) {
1329		INIT_LIST_HEAD(&worker->entry);
1330		INIT_LIST_HEAD(&worker->scheduled);
1331		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1332		/* on creation a worker is in !idle && prep state */
1333		worker->flags = WORKER_PREP;
1334	}
1335	return worker;
1336}
1337
1338/**
1339 * create_worker - create a new workqueue worker
1340 * @gcwq: gcwq the new worker will belong to
1341 * @bind: whether to set affinity to @cpu or not
1342 *
1343 * Create a new worker which is bound to @gcwq.  The returned worker
1344 * can be started by calling start_worker() or destroyed using
1345 * destroy_worker().
1346 *
1347 * CONTEXT:
1348 * Might sleep.  Does GFP_KERNEL allocations.
1349 *
1350 * RETURNS:
1351 * Pointer to the newly created worker.
1352 */
1353static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1354{
1355	bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1356	struct worker *worker = NULL;
1357	int id = -1;
 
 
 
1358
1359	spin_lock_irq(&gcwq->lock);
1360	while (ida_get_new(&gcwq->worker_ida, &id)) {
1361		spin_unlock_irq(&gcwq->lock);
1362		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1363			goto fail;
1364		spin_lock_irq(&gcwq->lock);
1365	}
1366	spin_unlock_irq(&gcwq->lock);
 
 
 
 
 
1367
1368	worker = alloc_worker();
1369	if (!worker)
1370		goto fail;
1371
1372	worker->gcwq = gcwq;
1373	worker->id = id;
1374
1375	if (!on_unbound_cpu)
1376		worker->task = kthread_create_on_node(worker_thread,
1377						      worker,
1378						      cpu_to_node(gcwq->cpu),
1379						      "kworker/%u:%d", gcwq->cpu, id);
1380	else
1381		worker->task = kthread_create(worker_thread, worker,
1382					      "kworker/u:%d", id);
 
 
1383	if (IS_ERR(worker->task))
1384		goto fail;
1385
 
 
 
 
 
 
 
 
 
 
 
1386	/*
1387	 * A rogue worker will become a regular one if CPU comes
1388	 * online later on.  Make sure every worker has
1389	 * PF_THREAD_BOUND set.
1390	 */
1391	if (bind && !on_unbound_cpu)
1392		kthread_bind(worker->task, gcwq->cpu);
1393	else {
1394		worker->task->flags |= PF_THREAD_BOUND;
1395		if (on_unbound_cpu)
1396			worker->flags |= WORKER_UNBOUND;
1397	}
1398
1399	return worker;
 
1400fail:
1401	if (id >= 0) {
1402		spin_lock_irq(&gcwq->lock);
1403		ida_remove(&gcwq->worker_ida, id);
1404		spin_unlock_irq(&gcwq->lock);
1405	}
1406	kfree(worker);
1407	return NULL;
1408}
1409
1410/**
1411 * start_worker - start a newly created worker
1412 * @worker: worker to start
1413 *
1414 * Make the gcwq aware of @worker and start it.
1415 *
1416 * CONTEXT:
1417 * spin_lock_irq(gcwq->lock).
1418 */
1419static void start_worker(struct worker *worker)
1420{
1421	worker->flags |= WORKER_STARTED;
1422	worker->gcwq->nr_workers++;
1423	worker_enter_idle(worker);
1424	wake_up_process(worker->task);
1425}
1426
1427/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1428 * destroy_worker - destroy a workqueue worker
1429 * @worker: worker to be destroyed
1430 *
1431 * Destroy @worker and adjust @gcwq stats accordingly.
1432 *
1433 * CONTEXT:
1434 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1435 */
1436static void destroy_worker(struct worker *worker)
1437{
1438	struct global_cwq *gcwq = worker->gcwq;
1439	int id = worker->id;
 
 
1440
1441	/* sanity check frenzy */
1442	BUG_ON(worker->current_work);
1443	BUG_ON(!list_empty(&worker->scheduled));
 
1444
1445	if (worker->flags & WORKER_STARTED)
1446		gcwq->nr_workers--;
1447	if (worker->flags & WORKER_IDLE)
1448		gcwq->nr_idle--;
 
 
 
 
 
 
1449
1450	list_del_init(&worker->entry);
1451	worker->flags |= WORKER_DIE;
1452
1453	spin_unlock_irq(&gcwq->lock);
 
 
1454
1455	kthread_stop(worker->task);
 
1456	kfree(worker);
1457
1458	spin_lock_irq(&gcwq->lock);
1459	ida_remove(&gcwq->worker_ida, id);
1460}
1461
1462static void idle_worker_timeout(unsigned long __gcwq)
1463{
1464	struct global_cwq *gcwq = (void *)__gcwq;
1465
1466	spin_lock_irq(&gcwq->lock);
1467
1468	if (too_many_workers(gcwq)) {
1469		struct worker *worker;
1470		unsigned long expires;
1471
1472		/* idle_list is kept in LIFO order, check the last one */
1473		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1474		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1475
1476		if (time_before(jiffies, expires))
1477			mod_timer(&gcwq->idle_timer, expires);
1478		else {
1479			/* it's been idle for too long, wake up manager */
1480			gcwq->flags |= GCWQ_MANAGE_WORKERS;
1481			wake_up_worker(gcwq);
1482		}
1483	}
1484
1485	spin_unlock_irq(&gcwq->lock);
1486}
1487
1488static bool send_mayday(struct work_struct *work)
1489{
1490	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1491	struct workqueue_struct *wq = cwq->wq;
1492	unsigned int cpu;
 
1493
1494	if (!(wq->flags & WQ_RESCUER))
1495		return false;
1496
1497	/* mayday mayday mayday */
1498	cpu = cwq->gcwq->cpu;
1499	/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1500	if (cpu == WORK_CPU_UNBOUND)
1501		cpu = 0;
1502	if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
 
 
 
1503		wake_up_process(wq->rescuer->task);
1504	return true;
1505}
1506
1507static void gcwq_mayday_timeout(unsigned long __gcwq)
1508{
1509	struct global_cwq *gcwq = (void *)__gcwq;
1510	struct work_struct *work;
1511
1512	spin_lock_irq(&gcwq->lock);
 
1513
1514	if (need_to_create_worker(gcwq)) {
1515		/*
1516		 * We've been trying to create a new worker but
1517		 * haven't been successful.  We might be hitting an
1518		 * allocation deadlock.  Send distress signals to
1519		 * rescuers.
1520		 */
1521		list_for_each_entry(work, &gcwq->worklist, entry)
1522			send_mayday(work);
1523	}
1524
1525	spin_unlock_irq(&gcwq->lock);
 
1526
1527	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1528}
1529
1530/**
1531 * maybe_create_worker - create a new worker if necessary
1532 * @gcwq: gcwq to create a new worker for
1533 *
1534 * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1535 * have at least one idle worker on return from this function.  If
1536 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1537 * sent to all rescuers with works scheduled on @gcwq to resolve
1538 * possible allocation deadlock.
1539 *
1540 * On return, need_to_create_worker() is guaranteed to be false and
1541 * may_start_working() true.
1542 *
1543 * LOCKING:
1544 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1545 * multiple times.  Does GFP_KERNEL allocations.  Called only from
1546 * manager.
1547 *
1548 * RETURNS:
1549 * false if no action was taken and gcwq->lock stayed locked, true
1550 * otherwise.
1551 */
1552static bool maybe_create_worker(struct global_cwq *gcwq)
1553__releases(&gcwq->lock)
1554__acquires(&gcwq->lock)
1555{
1556	if (!need_to_create_worker(gcwq))
1557		return false;
1558restart:
1559	spin_unlock_irq(&gcwq->lock);
1560
1561	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1562	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1563
1564	while (true) {
1565		struct worker *worker;
1566
1567		worker = create_worker(gcwq, true);
1568		if (worker) {
1569			del_timer_sync(&gcwq->mayday_timer);
1570			spin_lock_irq(&gcwq->lock);
1571			start_worker(worker);
1572			BUG_ON(need_to_create_worker(gcwq));
 
1573			return true;
1574		}
1575
1576		if (!need_to_create_worker(gcwq))
1577			break;
1578
1579		__set_current_state(TASK_INTERRUPTIBLE);
1580		schedule_timeout(CREATE_COOLDOWN);
1581
1582		if (!need_to_create_worker(gcwq))
1583			break;
1584	}
1585
1586	del_timer_sync(&gcwq->mayday_timer);
1587	spin_lock_irq(&gcwq->lock);
1588	if (need_to_create_worker(gcwq))
1589		goto restart;
1590	return true;
1591}
1592
1593/**
1594 * maybe_destroy_worker - destroy workers which have been idle for a while
1595 * @gcwq: gcwq to destroy workers for
1596 *
1597 * Destroy @gcwq workers which have been idle for longer than
1598 * IDLE_WORKER_TIMEOUT.
1599 *
1600 * LOCKING:
1601 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1602 * multiple times.  Called only from manager.
1603 *
1604 * RETURNS:
1605 * false if no action was taken and gcwq->lock stayed locked, true
1606 * otherwise.
1607 */
1608static bool maybe_destroy_workers(struct global_cwq *gcwq)
1609{
1610	bool ret = false;
1611
1612	while (too_many_workers(gcwq)) {
1613		struct worker *worker;
1614		unsigned long expires;
1615
1616		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1617		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1618
1619		if (time_before(jiffies, expires)) {
1620			mod_timer(&gcwq->idle_timer, expires);
1621			break;
1622		}
1623
1624		destroy_worker(worker);
1625		ret = true;
1626	}
1627
1628	return ret;
1629}
1630
1631/**
1632 * manage_workers - manage worker pool
1633 * @worker: self
1634 *
1635 * Assume the manager role and manage gcwq worker pool @worker belongs
1636 * to.  At any given time, there can be only zero or one manager per
1637 * gcwq.  The exclusion is handled automatically by this function.
1638 *
1639 * The caller can safely start processing works on false return.  On
1640 * true return, it's guaranteed that need_to_create_worker() is false
1641 * and may_start_working() is true.
1642 *
1643 * CONTEXT:
1644 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1645 * multiple times.  Does GFP_KERNEL allocations.
1646 *
1647 * RETURNS:
1648 * false if no action was taken and gcwq->lock stayed locked, true if
1649 * some action was taken.
 
 
 
1650 */
1651static bool manage_workers(struct worker *worker)
1652{
1653	struct global_cwq *gcwq = worker->gcwq;
1654	bool ret = false;
1655
1656	if (gcwq->flags & GCWQ_MANAGING_WORKERS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1657		return ret;
1658
1659	gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1660	gcwq->flags |= GCWQ_MANAGING_WORKERS;
1661
1662	/*
1663	 * Destroy and then create so that may_start_working() is true
1664	 * on return.
1665	 */
1666	ret |= maybe_destroy_workers(gcwq);
1667	ret |= maybe_create_worker(gcwq);
 
 
 
 
1668
1669	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1670
1671	/*
1672	 * The trustee might be waiting to take over the manager
1673	 * position, tell it we're done.
1674	 */
1675	if (unlikely(gcwq->trustee))
1676		wake_up_all(&gcwq->trustee_wait);
1677
 
 
1678	return ret;
1679}
1680
1681/**
1682 * move_linked_works - move linked works to a list
1683 * @work: start of series of works to be scheduled
1684 * @head: target list to append @work to
1685 * @nextp: out paramter for nested worklist walking
1686 *
1687 * Schedule linked works starting from @work to @head.  Work series to
1688 * be scheduled starts at @work and includes any consecutive work with
1689 * WORK_STRUCT_LINKED set in its predecessor.
1690 *
1691 * If @nextp is not NULL, it's updated to point to the next work of
1692 * the last scheduled work.  This allows move_linked_works() to be
1693 * nested inside outer list_for_each_entry_safe().
1694 *
1695 * CONTEXT:
1696 * spin_lock_irq(gcwq->lock).
1697 */
1698static void move_linked_works(struct work_struct *work, struct list_head *head,
1699			      struct work_struct **nextp)
1700{
1701	struct work_struct *n;
1702
1703	/*
1704	 * Linked worklist will always end before the end of the list,
1705	 * use NULL for list head.
1706	 */
1707	list_for_each_entry_safe_from(work, n, NULL, entry) {
1708		list_move_tail(&work->entry, head);
1709		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1710			break;
1711	}
1712
1713	/*
1714	 * If we're already inside safe list traversal and have moved
1715	 * multiple works to the scheduled queue, the next position
1716	 * needs to be updated.
1717	 */
1718	if (nextp)
1719		*nextp = n;
1720}
1721
1722static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1723{
1724	struct work_struct *work = list_first_entry(&cwq->delayed_works,
1725						    struct work_struct, entry);
1726	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1727
1728	trace_workqueue_activate_work(work);
1729	move_linked_works(work, pos, NULL);
1730	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1731	cwq->nr_active++;
1732}
1733
1734/**
1735 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1736 * @cwq: cwq of interest
1737 * @color: color of work which left the queue
1738 * @delayed: for a delayed work
1739 *
1740 * A work either has completed or is removed from pending queue,
1741 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1742 *
1743 * CONTEXT:
1744 * spin_lock_irq(gcwq->lock).
1745 */
1746static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1747				 bool delayed)
1748{
1749	/* ignore uncolored works */
1750	if (color == WORK_NO_COLOR)
1751		return;
1752
1753	cwq->nr_in_flight[color]--;
1754
1755	if (!delayed) {
1756		cwq->nr_active--;
1757		if (!list_empty(&cwq->delayed_works)) {
1758			/* one down, submit a delayed one */
1759			if (cwq->nr_active < cwq->max_active)
1760				cwq_activate_first_delayed(cwq);
1761		}
1762	}
1763
1764	/* is flush in progress and are we at the flushing tip? */
1765	if (likely(cwq->flush_color != color))
1766		return;
1767
1768	/* are there still in-flight works? */
1769	if (cwq->nr_in_flight[color])
1770		return;
1771
1772	/* this cwq is done, clear flush_color */
1773	cwq->flush_color = -1;
1774
1775	/*
1776	 * If this was the last cwq, wake up the first flusher.  It
1777	 * will handle the rest.
1778	 */
1779	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1780		complete(&cwq->wq->first_flusher->done);
1781}
1782
1783/**
1784 * process_one_work - process single work
1785 * @worker: self
1786 * @work: work to process
1787 *
1788 * Process @work.  This function contains all the logics necessary to
1789 * process a single work including synchronization against and
1790 * interaction with other workers on the same cpu, queueing and
1791 * flushing.  As long as context requirement is met, any worker can
1792 * call this function to process a work.
1793 *
1794 * CONTEXT:
1795 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1796 */
1797static void process_one_work(struct worker *worker, struct work_struct *work)
1798__releases(&gcwq->lock)
1799__acquires(&gcwq->lock)
1800{
1801	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1802	struct global_cwq *gcwq = cwq->gcwq;
1803	struct hlist_head *bwh = busy_worker_head(gcwq, work);
1804	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1805	work_func_t f = work->func;
1806	int work_color;
1807	struct worker *collision;
1808#ifdef CONFIG_LOCKDEP
1809	/*
1810	 * It is permissible to free the struct work_struct from
1811	 * inside the function that is called from it, this we need to
1812	 * take into account for lockdep too.  To avoid bogus "held
1813	 * lock freed" warnings as well as problems when looking into
1814	 * work->lockdep_map, make a copy and use that here.
1815	 */
1816	struct lockdep_map lockdep_map = work->lockdep_map;
 
 
1817#endif
1818	/*
 
 
 
 
 
 
 
 
 
1819	 * A single work shouldn't be executed concurrently by
1820	 * multiple workers on a single cpu.  Check whether anyone is
1821	 * already processing the work.  If so, defer the work to the
1822	 * currently executing one.
1823	 */
1824	collision = __find_worker_executing_work(gcwq, bwh, work);
1825	if (unlikely(collision)) {
1826		move_linked_works(work, &collision->scheduled, NULL);
1827		return;
1828	}
1829
1830	/* claim and process */
1831	debug_work_deactivate(work);
1832	hlist_add_head(&worker->hentry, bwh);
1833	worker->current_work = work;
1834	worker->current_cwq = cwq;
 
1835	work_color = get_work_color(work);
1836
1837	/* record the current cpu number in the work data and dequeue */
1838	set_work_cpu(work, gcwq->cpu);
1839	list_del_init(&work->entry);
1840
1841	/*
1842	 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1843	 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1844	 */
1845	if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1846		struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1847						struct work_struct, entry);
1848
1849		if (!list_empty(&gcwq->worklist) &&
1850		    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1851			wake_up_worker(gcwq);
1852		else
1853			gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1854	}
1855
1856	/*
1857	 * CPU intensive works don't participate in concurrency
1858	 * management.  They're the scheduler's responsibility.
 
 
1859	 */
1860	if (unlikely(cpu_intensive))
1861		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1862
1863	spin_unlock_irq(&gcwq->lock);
1864
1865	work_clear_pending(work);
1866	lock_map_acquire_read(&cwq->wq->lockdep_map);
1867	lock_map_acquire(&lockdep_map);
1868	trace_workqueue_execute_start(work);
1869	f(work);
1870	/*
1871	 * While we must be careful to not use "work" after this, the trace
1872	 * point will only record its address.
1873	 */
1874	trace_workqueue_execute_end(work);
1875	lock_map_release(&lockdep_map);
1876	lock_map_release(&cwq->wq->lockdep_map);
1877
1878	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1879		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1880		       "%s/0x%08x/%d\n",
1881		       current->comm, preempt_count(), task_pid_nr(current));
1882		printk(KERN_ERR "    last function: ");
1883		print_symbol("%s\n", (unsigned long)f);
1884		debug_show_held_locks(current);
1885		dump_stack();
1886	}
1887
1888	spin_lock_irq(&gcwq->lock);
 
 
 
 
 
 
 
 
 
1889
1890	/* clear cpu intensive status */
1891	if (unlikely(cpu_intensive))
1892		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1893
1894	/* we're done with it, release */
1895	hlist_del_init(&worker->hentry);
1896	worker->current_work = NULL;
1897	worker->current_cwq = NULL;
1898	cwq_dec_nr_in_flight(cwq, work_color, false);
 
 
1899}
1900
1901/**
1902 * process_scheduled_works - process scheduled works
1903 * @worker: self
1904 *
1905 * Process all scheduled works.  Please note that the scheduled list
1906 * may change while processing a work, so this function repeatedly
1907 * fetches a work from the top and executes it.
1908 *
1909 * CONTEXT:
1910 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1911 * multiple times.
1912 */
1913static void process_scheduled_works(struct worker *worker)
1914{
1915	while (!list_empty(&worker->scheduled)) {
1916		struct work_struct *work = list_first_entry(&worker->scheduled,
1917						struct work_struct, entry);
1918		process_one_work(worker, work);
1919	}
1920}
1921
1922/**
1923 * worker_thread - the worker thread function
1924 * @__worker: self
1925 *
1926 * The gcwq worker thread function.  There's a single dynamic pool of
1927 * these per each cpu.  These workers process all works regardless of
1928 * their specific target workqueue.  The only exception is works which
1929 * belong to workqueues with a rescuer which will be explained in
1930 * rescuer_thread().
 
 
1931 */
1932static int worker_thread(void *__worker)
1933{
1934	struct worker *worker = __worker;
1935	struct global_cwq *gcwq = worker->gcwq;
1936
1937	/* tell the scheduler that this is a workqueue worker */
1938	worker->task->flags |= PF_WQ_WORKER;
1939woke_up:
1940	spin_lock_irq(&gcwq->lock);
1941
1942	/* DIE can be set only while we're idle, checking here is enough */
1943	if (worker->flags & WORKER_DIE) {
1944		spin_unlock_irq(&gcwq->lock);
 
1945		worker->task->flags &= ~PF_WQ_WORKER;
1946		return 0;
1947	}
1948
1949	worker_leave_idle(worker);
1950recheck:
1951	/* no more worker necessary? */
1952	if (!need_more_worker(gcwq))
1953		goto sleep;
1954
1955	/* do we need to manage? */
1956	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1957		goto recheck;
1958
1959	/*
1960	 * ->scheduled list can only be filled while a worker is
1961	 * preparing to process a work or actually processing it.
1962	 * Make sure nobody diddled with it while I was sleeping.
1963	 */
1964	BUG_ON(!list_empty(&worker->scheduled));
1965
1966	/*
1967	 * When control reaches this point, we're guaranteed to have
1968	 * at least one idle worker or that someone else has already
1969	 * assumed the manager role.
 
 
1970	 */
1971	worker_clr_flags(worker, WORKER_PREP);
1972
1973	do {
1974		struct work_struct *work =
1975			list_first_entry(&gcwq->worklist,
1976					 struct work_struct, entry);
1977
1978		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1979			/* optimization path, not strictly necessary */
1980			process_one_work(worker, work);
1981			if (unlikely(!list_empty(&worker->scheduled)))
1982				process_scheduled_works(worker);
1983		} else {
1984			move_linked_works(work, &worker->scheduled, NULL);
1985			process_scheduled_works(worker);
1986		}
1987	} while (keep_working(gcwq));
1988
1989	worker_set_flags(worker, WORKER_PREP, false);
1990sleep:
1991	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1992		goto recheck;
1993
1994	/*
1995	 * gcwq->lock is held and there's no work to process and no
1996	 * need to manage, sleep.  Workers are woken up only while
1997	 * holding gcwq->lock or from local cpu, so setting the
1998	 * current state before releasing gcwq->lock is enough to
1999	 * prevent losing any event.
2000	 */
2001	worker_enter_idle(worker);
2002	__set_current_state(TASK_INTERRUPTIBLE);
2003	spin_unlock_irq(&gcwq->lock);
2004	schedule();
2005	goto woke_up;
2006}
2007
2008/**
2009 * rescuer_thread - the rescuer thread function
2010 * @__wq: the associated workqueue
2011 *
2012 * Workqueue rescuer thread function.  There's one rescuer for each
2013 * workqueue which has WQ_RESCUER set.
2014 *
2015 * Regular work processing on a gcwq may block trying to create a new
2016 * worker which uses GFP_KERNEL allocation which has slight chance of
2017 * developing into deadlock if some works currently on the same queue
2018 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2019 * the problem rescuer solves.
2020 *
2021 * When such condition is possible, the gcwq summons rescuers of all
2022 * workqueues which have works queued on the gcwq and let them process
2023 * those works so that forward progress can be guaranteed.
2024 *
2025 * This should happen rarely.
 
 
2026 */
2027static int rescuer_thread(void *__wq)
2028{
2029	struct workqueue_struct *wq = __wq;
2030	struct worker *rescuer = wq->rescuer;
2031	struct list_head *scheduled = &rescuer->scheduled;
2032	bool is_unbound = wq->flags & WQ_UNBOUND;
2033	unsigned int cpu;
2034
2035	set_user_nice(current, RESCUER_NICE_LEVEL);
 
 
 
 
 
 
2036repeat:
2037	set_current_state(TASK_INTERRUPTIBLE);
2038
2039	if (kthread_should_stop())
2040		return 0;
2041
2042	/*
2043	 * See whether any cpu is asking for help.  Unbounded
2044	 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
 
 
 
 
2045	 */
2046	for_each_mayday_cpu(cpu, wq->mayday_mask) {
2047		unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2048		struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2049		struct global_cwq *gcwq = cwq->gcwq;
 
 
 
 
 
2050		struct work_struct *work, *n;
2051
2052		__set_current_state(TASK_RUNNING);
2053		mayday_clear_cpu(cpu, wq->mayday_mask);
 
 
2054
2055		/* migrate to the target cpu if possible */
2056		rescuer->gcwq = gcwq;
2057		worker_maybe_bind_and_lock(rescuer);
2058
2059		/*
2060		 * Slurp in all works issued via this workqueue and
2061		 * process'em.
2062		 */
2063		BUG_ON(!list_empty(&rescuer->scheduled));
2064		list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2065			if (get_work_cwq(work) == cwq)
2066				move_linked_works(work, scheduled, &n);
2067
2068		process_scheduled_works(rescuer);
2069
2070		/*
2071		 * Leave this gcwq.  If keep_working() is %true, notify a
 
 
 
 
 
 
2072		 * regular worker; otherwise, we end up with 0 concurrency
2073		 * and stalling the execution.
2074		 */
2075		if (keep_working(gcwq))
2076			wake_up_worker(gcwq);
 
 
 
 
 
 
 
2077
2078		spin_unlock_irq(&gcwq->lock);
 
 
 
2079	}
2080
 
 
2081	schedule();
2082	goto repeat;
2083}
2084
2085struct wq_barrier {
2086	struct work_struct	work;
2087	struct completion	done;
2088};
2089
2090static void wq_barrier_func(struct work_struct *work)
2091{
2092	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2093	complete(&barr->done);
2094}
2095
2096/**
2097 * insert_wq_barrier - insert a barrier work
2098 * @cwq: cwq to insert barrier into
2099 * @barr: wq_barrier to insert
2100 * @target: target work to attach @barr to
2101 * @worker: worker currently executing @target, NULL if @target is not executing
2102 *
2103 * @barr is linked to @target such that @barr is completed only after
2104 * @target finishes execution.  Please note that the ordering
2105 * guarantee is observed only with respect to @target and on the local
2106 * cpu.
2107 *
2108 * Currently, a queued barrier can't be canceled.  This is because
2109 * try_to_grab_pending() can't determine whether the work to be
2110 * grabbed is at the head of the queue and thus can't clear LINKED
2111 * flag of the previous work while there must be a valid next work
2112 * after a work with LINKED flag set.
2113 *
2114 * Note that when @worker is non-NULL, @target may be modified
2115 * underneath us, so we can't reliably determine cwq from @target.
2116 *
2117 * CONTEXT:
2118 * spin_lock_irq(gcwq->lock).
2119 */
2120static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2121			      struct wq_barrier *barr,
2122			      struct work_struct *target, struct worker *worker)
2123{
2124	struct list_head *head;
2125	unsigned int linked = 0;
2126
2127	/*
2128	 * debugobject calls are safe here even with gcwq->lock locked
2129	 * as we know for sure that this will not trigger any of the
2130	 * checks and call back into the fixup functions where we
2131	 * might deadlock.
2132	 */
2133	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2134	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2135	init_completion(&barr->done);
2136
2137	/*
2138	 * If @target is currently being executed, schedule the
2139	 * barrier to the worker; otherwise, put it after @target.
2140	 */
2141	if (worker)
2142		head = worker->scheduled.next;
2143	else {
2144		unsigned long *bits = work_data_bits(target);
2145
2146		head = target->entry.next;
2147		/* there can already be other linked works, inherit and set */
2148		linked = *bits & WORK_STRUCT_LINKED;
2149		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2150	}
2151
2152	debug_work_activate(&barr->work);
2153	insert_work(cwq, &barr->work, head,
2154		    work_color_to_flags(WORK_NO_COLOR) | linked);
2155}
2156
2157/**
2158 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2159 * @wq: workqueue being flushed
2160 * @flush_color: new flush color, < 0 for no-op
2161 * @work_color: new work color, < 0 for no-op
2162 *
2163 * Prepare cwqs for workqueue flushing.
2164 *
2165 * If @flush_color is non-negative, flush_color on all cwqs should be
2166 * -1.  If no cwq has in-flight commands at the specified color, all
2167 * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
2168 * has in flight commands, its cwq->flush_color is set to
2169 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2170 * wakeup logic is armed and %true is returned.
2171 *
2172 * The caller should have initialized @wq->first_flusher prior to
2173 * calling this function with non-negative @flush_color.  If
2174 * @flush_color is negative, no flush color update is done and %false
2175 * is returned.
2176 *
2177 * If @work_color is non-negative, all cwqs should have the same
2178 * work_color which is previous to @work_color and all will be
2179 * advanced to @work_color.
2180 *
2181 * CONTEXT:
2182 * mutex_lock(wq->flush_mutex).
2183 *
2184 * RETURNS:
2185 * %true if @flush_color >= 0 and there's something to flush.  %false
2186 * otherwise.
2187 */
2188static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2189				      int flush_color, int work_color)
2190{
2191	bool wait = false;
2192	unsigned int cpu;
2193
2194	if (flush_color >= 0) {
2195		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2196		atomic_set(&wq->nr_cwqs_to_flush, 1);
2197	}
2198
2199	for_each_cwq_cpu(cpu, wq) {
2200		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2201		struct global_cwq *gcwq = cwq->gcwq;
2202
2203		spin_lock_irq(&gcwq->lock);
2204
2205		if (flush_color >= 0) {
2206			BUG_ON(cwq->flush_color != -1);
2207
2208			if (cwq->nr_in_flight[flush_color]) {
2209				cwq->flush_color = flush_color;
2210				atomic_inc(&wq->nr_cwqs_to_flush);
2211				wait = true;
2212			}
2213		}
2214
2215		if (work_color >= 0) {
2216			BUG_ON(work_color != work_next_color(cwq->work_color));
2217			cwq->work_color = work_color;
2218		}
2219
2220		spin_unlock_irq(&gcwq->lock);
2221	}
2222
2223	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2224		complete(&wq->first_flusher->done);
2225
2226	return wait;
2227}
2228
2229/**
2230 * flush_workqueue - ensure that any scheduled work has run to completion.
2231 * @wq: workqueue to flush
2232 *
2233 * Forces execution of the workqueue and blocks until its completion.
2234 * This is typically used in driver shutdown handlers.
2235 *
2236 * We sleep until all works which were queued on entry have been handled,
2237 * but we are not livelocked by new incoming ones.
2238 */
2239void flush_workqueue(struct workqueue_struct *wq)
2240{
2241	struct wq_flusher this_flusher = {
2242		.list = LIST_HEAD_INIT(this_flusher.list),
2243		.flush_color = -1,
2244		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2245	};
2246	int next_color;
2247
2248	lock_map_acquire(&wq->lockdep_map);
2249	lock_map_release(&wq->lockdep_map);
2250
2251	mutex_lock(&wq->flush_mutex);
2252
2253	/*
2254	 * Start-to-wait phase
2255	 */
2256	next_color = work_next_color(wq->work_color);
2257
2258	if (next_color != wq->flush_color) {
2259		/*
2260		 * Color space is not full.  The current work_color
2261		 * becomes our flush_color and work_color is advanced
2262		 * by one.
2263		 */
2264		BUG_ON(!list_empty(&wq->flusher_overflow));
2265		this_flusher.flush_color = wq->work_color;
2266		wq->work_color = next_color;
2267
2268		if (!wq->first_flusher) {
2269			/* no flush in progress, become the first flusher */
2270			BUG_ON(wq->flush_color != this_flusher.flush_color);
2271
2272			wq->first_flusher = &this_flusher;
2273
2274			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2275						       wq->work_color)) {
2276				/* nothing to flush, done */
2277				wq->flush_color = next_color;
2278				wq->first_flusher = NULL;
2279				goto out_unlock;
2280			}
2281		} else {
2282			/* wait in queue */
2283			BUG_ON(wq->flush_color == this_flusher.flush_color);
2284			list_add_tail(&this_flusher.list, &wq->flusher_queue);
2285			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2286		}
2287	} else {
2288		/*
2289		 * Oops, color space is full, wait on overflow queue.
2290		 * The next flush completion will assign us
2291		 * flush_color and transfer to flusher_queue.
2292		 */
2293		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2294	}
2295
2296	mutex_unlock(&wq->flush_mutex);
2297
2298	wait_for_completion(&this_flusher.done);
2299
2300	/*
2301	 * Wake-up-and-cascade phase
2302	 *
2303	 * First flushers are responsible for cascading flushes and
2304	 * handling overflow.  Non-first flushers can simply return.
2305	 */
2306	if (wq->first_flusher != &this_flusher)
2307		return;
2308
2309	mutex_lock(&wq->flush_mutex);
2310
2311	/* we might have raced, check again with mutex held */
2312	if (wq->first_flusher != &this_flusher)
2313		goto out_unlock;
2314
2315	wq->first_flusher = NULL;
2316
2317	BUG_ON(!list_empty(&this_flusher.list));
2318	BUG_ON(wq->flush_color != this_flusher.flush_color);
2319
2320	while (true) {
2321		struct wq_flusher *next, *tmp;
2322
2323		/* complete all the flushers sharing the current flush color */
2324		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2325			if (next->flush_color != wq->flush_color)
2326				break;
2327			list_del_init(&next->list);
2328			complete(&next->done);
2329		}
2330
2331		BUG_ON(!list_empty(&wq->flusher_overflow) &&
2332		       wq->flush_color != work_next_color(wq->work_color));
2333
2334		/* this flush_color is finished, advance by one */
2335		wq->flush_color = work_next_color(wq->flush_color);
2336
2337		/* one color has been freed, handle overflow queue */
2338		if (!list_empty(&wq->flusher_overflow)) {
2339			/*
2340			 * Assign the same color to all overflowed
2341			 * flushers, advance work_color and append to
2342			 * flusher_queue.  This is the start-to-wait
2343			 * phase for these overflowed flushers.
2344			 */
2345			list_for_each_entry(tmp, &wq->flusher_overflow, list)
2346				tmp->flush_color = wq->work_color;
2347
2348			wq->work_color = work_next_color(wq->work_color);
2349
2350			list_splice_tail_init(&wq->flusher_overflow,
2351					      &wq->flusher_queue);
2352			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2353		}
2354
2355		if (list_empty(&wq->flusher_queue)) {
2356			BUG_ON(wq->flush_color != wq->work_color);
2357			break;
2358		}
2359
2360		/*
2361		 * Need to flush more colors.  Make the next flusher
2362		 * the new first flusher and arm cwqs.
2363		 */
2364		BUG_ON(wq->flush_color == wq->work_color);
2365		BUG_ON(wq->flush_color != next->flush_color);
2366
2367		list_del_init(&next->list);
2368		wq->first_flusher = next;
2369
2370		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2371			break;
2372
2373		/*
2374		 * Meh... this color is already done, clear first
2375		 * flusher and repeat cascading.
2376		 */
2377		wq->first_flusher = NULL;
2378	}
2379
2380out_unlock:
2381	mutex_unlock(&wq->flush_mutex);
2382}
2383EXPORT_SYMBOL_GPL(flush_workqueue);
2384
2385/**
2386 * drain_workqueue - drain a workqueue
2387 * @wq: workqueue to drain
2388 *
2389 * Wait until the workqueue becomes empty.  While draining is in progress,
2390 * only chain queueing is allowed.  IOW, only currently pending or running
2391 * work items on @wq can queue further work items on it.  @wq is flushed
2392 * repeatedly until it becomes empty.  The number of flushing is detemined
2393 * by the depth of chaining and should be relatively short.  Whine if it
2394 * takes too long.
2395 */
2396void drain_workqueue(struct workqueue_struct *wq)
2397{
2398	unsigned int flush_cnt = 0;
2399	unsigned int cpu;
2400
2401	/*
2402	 * __queue_work() needs to test whether there are drainers, is much
2403	 * hotter than drain_workqueue() and already looks at @wq->flags.
2404	 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2405	 */
2406	spin_lock(&workqueue_lock);
2407	if (!wq->nr_drainers++)
2408		wq->flags |= WQ_DRAINING;
2409	spin_unlock(&workqueue_lock);
2410reflush:
2411	flush_workqueue(wq);
2412
2413	for_each_cwq_cpu(cpu, wq) {
2414		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
2415		bool drained;
2416
2417		spin_lock_irq(&cwq->gcwq->lock);
2418		drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2419		spin_unlock_irq(&cwq->gcwq->lock);
2420
2421		if (drained)
2422			continue;
2423
2424		if (++flush_cnt == 10 ||
2425		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2426			pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
2427				   wq->name, flush_cnt);
 
 
2428		goto reflush;
2429	}
2430
2431	spin_lock(&workqueue_lock);
2432	if (!--wq->nr_drainers)
2433		wq->flags &= ~WQ_DRAINING;
2434	spin_unlock(&workqueue_lock);
2435}
2436EXPORT_SYMBOL_GPL(drain_workqueue);
2437
2438static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2439			     bool wait_executing)
2440{
2441	struct worker *worker = NULL;
2442	struct global_cwq *gcwq;
2443	struct cpu_workqueue_struct *cwq;
2444
2445	might_sleep();
2446	gcwq = get_work_gcwq(work);
2447	if (!gcwq)
 
 
 
2448		return false;
 
2449
2450	spin_lock_irq(&gcwq->lock);
2451	if (!list_empty(&work->entry)) {
2452		/*
2453		 * See the comment near try_to_grab_pending()->smp_rmb().
2454		 * If it was re-queued to a different gcwq under us, we
2455		 * are not going to wait.
2456		 */
2457		smp_rmb();
2458		cwq = get_work_cwq(work);
2459		if (unlikely(!cwq || gcwq != cwq->gcwq))
2460			goto already_gone;
2461	} else if (wait_executing) {
2462		worker = find_worker_executing_work(gcwq, work);
2463		if (!worker)
2464			goto already_gone;
2465		cwq = worker->current_cwq;
2466	} else
2467		goto already_gone;
2468
2469	insert_wq_barrier(cwq, barr, work, worker);
2470	spin_unlock_irq(&gcwq->lock);
2471
2472	/*
2473	 * If @max_active is 1 or rescuer is in use, flushing another work
2474	 * item on the same workqueue may lead to deadlock.  Make sure the
2475	 * flusher is not running on the same workqueue by verifying write
2476	 * access.
2477	 */
2478	if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2479		lock_map_acquire(&cwq->wq->lockdep_map);
2480	else
2481		lock_map_acquire_read(&cwq->wq->lockdep_map);
2482	lock_map_release(&cwq->wq->lockdep_map);
2483
2484	return true;
2485already_gone:
2486	spin_unlock_irq(&gcwq->lock);
2487	return false;
2488}
2489
2490/**
2491 * flush_work - wait for a work to finish executing the last queueing instance
2492 * @work: the work to flush
2493 *
2494 * Wait until @work has finished execution.  This function considers
2495 * only the last queueing instance of @work.  If @work has been
2496 * enqueued across different CPUs on a non-reentrant workqueue or on
2497 * multiple workqueues, @work might still be executing on return on
2498 * some of the CPUs from earlier queueing.
2499 *
2500 * If @work was queued only on a non-reentrant, ordered or unbound
2501 * workqueue, @work is guaranteed to be idle on return if it hasn't
2502 * been requeued since flush started.
2503 *
2504 * RETURNS:
2505 * %true if flush_work() waited for the work to finish execution,
2506 * %false if it was already idle.
2507 */
2508bool flush_work(struct work_struct *work)
2509{
2510	struct wq_barrier barr;
2511
2512	if (start_flush_work(work, &barr, true)) {
2513		wait_for_completion(&barr.done);
2514		destroy_work_on_stack(&barr.work);
2515		return true;
2516	} else
2517		return false;
2518}
2519EXPORT_SYMBOL_GPL(flush_work);
2520
2521static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2522{
2523	struct wq_barrier barr;
2524	struct worker *worker;
2525
2526	spin_lock_irq(&gcwq->lock);
2527
2528	worker = find_worker_executing_work(gcwq, work);
2529	if (unlikely(worker))
2530		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2531
2532	spin_unlock_irq(&gcwq->lock);
2533
2534	if (unlikely(worker)) {
2535		wait_for_completion(&barr.done);
2536		destroy_work_on_stack(&barr.work);
2537		return true;
2538	} else
2539		return false;
2540}
2541
2542static bool wait_on_work(struct work_struct *work)
2543{
2544	bool ret = false;
2545	int cpu;
2546
2547	might_sleep();
2548
2549	lock_map_acquire(&work->lockdep_map);
2550	lock_map_release(&work->lockdep_map);
2551
2552	for_each_gcwq_cpu(cpu)
2553		ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2554	return ret;
2555}
2556
2557/**
2558 * flush_work_sync - wait until a work has finished execution
2559 * @work: the work to flush
2560 *
2561 * Wait until @work has finished execution.  On return, it's
2562 * guaranteed that all queueing instances of @work which happened
2563 * before this function is called are finished.  In other words, if
2564 * @work hasn't been requeued since this function was called, @work is
2565 * guaranteed to be idle on return.
2566 *
2567 * RETURNS:
2568 * %true if flush_work_sync() waited for the work to finish execution,
2569 * %false if it was already idle.
2570 */
2571bool flush_work_sync(struct work_struct *work)
2572{
2573	struct wq_barrier barr;
2574	bool pending, waited;
2575
2576	/* we'll wait for executions separately, queue barr only if pending */
2577	pending = start_flush_work(work, &barr, false);
2578
2579	/* wait for executions to finish */
2580	waited = wait_on_work(work);
2581
2582	/* wait for the pending one */
2583	if (pending) {
2584		wait_for_completion(&barr.done);
2585		destroy_work_on_stack(&barr.work);
2586	}
2587
2588	return pending || waited;
2589}
2590EXPORT_SYMBOL_GPL(flush_work_sync);
2591
2592/*
2593 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2594 * so this work can't be re-armed in any way.
2595 */
2596static int try_to_grab_pending(struct work_struct *work)
2597{
2598	struct global_cwq *gcwq;
2599	int ret = -1;
2600
2601	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2602		return 0;
2603
2604	/*
2605	 * The queueing is in progress, or it is already queued. Try to
2606	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2607	 */
2608	gcwq = get_work_gcwq(work);
2609	if (!gcwq)
2610		return ret;
2611
2612	spin_lock_irq(&gcwq->lock);
2613	if (!list_empty(&work->entry)) {
2614		/*
2615		 * This work is queued, but perhaps we locked the wrong gcwq.
2616		 * In that case we must see the new value after rmb(), see
2617		 * insert_work()->wmb().
2618		 */
2619		smp_rmb();
2620		if (gcwq == get_work_gcwq(work)) {
2621			debug_work_deactivate(work);
2622			list_del_init(&work->entry);
2623			cwq_dec_nr_in_flight(get_work_cwq(work),
2624				get_work_color(work),
2625				*work_data_bits(work) & WORK_STRUCT_DELAYED);
2626			ret = 1;
2627		}
2628	}
2629	spin_unlock_irq(&gcwq->lock);
2630
2631	return ret;
2632}
 
2633
2634static bool __cancel_work_timer(struct work_struct *work,
2635				struct timer_list* timer)
2636{
 
2637	int ret;
2638
2639	do {
2640		ret = (timer && likely(del_timer(timer)));
2641		if (!ret)
2642			ret = try_to_grab_pending(work);
2643		wait_on_work(work);
 
 
 
2644	} while (unlikely(ret < 0));
2645
 
 
 
 
 
2646	clear_work_data(work);
2647	return ret;
2648}
2649
2650/**
2651 * cancel_work_sync - cancel a work and wait for it to finish
2652 * @work: the work to cancel
2653 *
2654 * Cancel @work and wait for its execution to finish.  This function
2655 * can be used even if the work re-queues itself or migrates to
2656 * another workqueue.  On return from this function, @work is
2657 * guaranteed to be not pending or executing on any CPU.
2658 *
2659 * cancel_work_sync(&delayed_work->work) must not be used for
2660 * delayed_work's.  Use cancel_delayed_work_sync() instead.
2661 *
2662 * The caller must ensure that the workqueue on which @work was last
2663 * queued can't be destroyed before this function returns.
2664 *
2665 * RETURNS:
2666 * %true if @work was pending, %false otherwise.
2667 */
2668bool cancel_work_sync(struct work_struct *work)
2669{
2670	return __cancel_work_timer(work, NULL);
2671}
2672EXPORT_SYMBOL_GPL(cancel_work_sync);
2673
2674/**
2675 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2676 * @dwork: the delayed work to flush
2677 *
2678 * Delayed timer is cancelled and the pending work is queued for
2679 * immediate execution.  Like flush_work(), this function only
2680 * considers the last queueing instance of @dwork.
2681 *
2682 * RETURNS:
2683 * %true if flush_work() waited for the work to finish execution,
2684 * %false if it was already idle.
2685 */
2686bool flush_delayed_work(struct delayed_work *dwork)
2687{
 
2688	if (del_timer_sync(&dwork->timer))
2689		__queue_work(raw_smp_processor_id(),
2690			     get_work_cwq(&dwork->work)->wq, &dwork->work);
2691	return flush_work(&dwork->work);
2692}
2693EXPORT_SYMBOL(flush_delayed_work);
2694
2695/**
2696 * flush_delayed_work_sync - wait for a dwork to finish
2697 * @dwork: the delayed work to flush
 
 
 
 
 
2698 *
2699 * Delayed timer is cancelled and the pending work is queued for
2700 * execution immediately.  Other than timer handling, its behavior
2701 * is identical to flush_work_sync().
 
2702 *
2703 * RETURNS:
2704 * %true if flush_work_sync() waited for the work to finish execution,
2705 * %false if it was already idle.
2706 */
2707bool flush_delayed_work_sync(struct delayed_work *dwork)
2708{
2709	if (del_timer_sync(&dwork->timer))
2710		__queue_work(raw_smp_processor_id(),
2711			     get_work_cwq(&dwork->work)->wq, &dwork->work);
2712	return flush_work_sync(&dwork->work);
 
 
 
 
 
 
 
 
 
 
2713}
2714EXPORT_SYMBOL(flush_delayed_work_sync);
2715
2716/**
2717 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2718 * @dwork: the delayed work cancel
2719 *
2720 * This is cancel_work_sync() for delayed works.
2721 *
2722 * RETURNS:
2723 * %true if @dwork was pending, %false otherwise.
2724 */
2725bool cancel_delayed_work_sync(struct delayed_work *dwork)
2726{
2727	return __cancel_work_timer(&dwork->work, &dwork->timer);
2728}
2729EXPORT_SYMBOL(cancel_delayed_work_sync);
2730
2731/**
2732 * schedule_work - put work task in global workqueue
2733 * @work: job to be done
2734 *
2735 * Returns zero if @work was already on the kernel-global workqueue and
2736 * non-zero otherwise.
2737 *
2738 * This puts a job in the kernel-global workqueue if it was not already
2739 * queued and leaves it in the same position on the kernel-global
2740 * workqueue otherwise.
2741 */
2742int schedule_work(struct work_struct *work)
2743{
2744	return queue_work(system_wq, work);
2745}
2746EXPORT_SYMBOL(schedule_work);
2747
2748/*
2749 * schedule_work_on - put work task on a specific cpu
2750 * @cpu: cpu to put the work task on
2751 * @work: job to be done
2752 *
2753 * This puts a job on a specific cpu
2754 */
2755int schedule_work_on(int cpu, struct work_struct *work)
2756{
2757	return queue_work_on(cpu, system_wq, work);
2758}
2759EXPORT_SYMBOL(schedule_work_on);
2760
2761/**
2762 * schedule_delayed_work - put work task in global workqueue after delay
2763 * @dwork: job to be done
2764 * @delay: number of jiffies to wait or 0 for immediate execution
2765 *
2766 * After waiting for a given time this puts a job in the kernel-global
2767 * workqueue.
2768 */
2769int schedule_delayed_work(struct delayed_work *dwork,
2770					unsigned long delay)
2771{
2772	return queue_delayed_work(system_wq, dwork, delay);
2773}
2774EXPORT_SYMBOL(schedule_delayed_work);
2775
2776/**
2777 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2778 * @cpu: cpu to use
2779 * @dwork: job to be done
2780 * @delay: number of jiffies to wait
2781 *
2782 * After waiting for a given time this puts a job in the kernel-global
2783 * workqueue on the specified CPU.
2784 */
2785int schedule_delayed_work_on(int cpu,
2786			struct delayed_work *dwork, unsigned long delay)
2787{
2788	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2789}
2790EXPORT_SYMBOL(schedule_delayed_work_on);
2791
2792/**
2793 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2794 * @func: the function to call
2795 *
2796 * schedule_on_each_cpu() executes @func on each online CPU using the
2797 * system workqueue and blocks until all CPUs have completed.
2798 * schedule_on_each_cpu() is very slow.
2799 *
2800 * RETURNS:
2801 * 0 on success, -errno on failure.
2802 */
2803int schedule_on_each_cpu(work_func_t func)
2804{
2805	int cpu;
2806	struct work_struct __percpu *works;
2807
2808	works = alloc_percpu(struct work_struct);
2809	if (!works)
2810		return -ENOMEM;
2811
2812	get_online_cpus();
2813
2814	for_each_online_cpu(cpu) {
2815		struct work_struct *work = per_cpu_ptr(works, cpu);
2816
2817		INIT_WORK(work, func);
2818		schedule_work_on(cpu, work);
2819	}
2820
2821	for_each_online_cpu(cpu)
2822		flush_work(per_cpu_ptr(works, cpu));
2823
2824	put_online_cpus();
2825	free_percpu(works);
2826	return 0;
2827}
2828
2829/**
2830 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2831 *
2832 * Forces execution of the kernel-global workqueue and blocks until its
2833 * completion.
2834 *
2835 * Think twice before calling this function!  It's very easy to get into
2836 * trouble if you don't take great care.  Either of the following situations
2837 * will lead to deadlock:
2838 *
2839 *	One of the work items currently on the workqueue needs to acquire
2840 *	a lock held by your code or its caller.
2841 *
2842 *	Your code is running in the context of a work routine.
2843 *
2844 * They will be detected by lockdep when they occur, but the first might not
2845 * occur very often.  It depends on what work items are on the workqueue and
2846 * what locks they need, which you have no control over.
2847 *
2848 * In most situations flushing the entire workqueue is overkill; you merely
2849 * need to know that a particular work item isn't queued and isn't running.
2850 * In such cases you should use cancel_delayed_work_sync() or
2851 * cancel_work_sync() instead.
2852 */
2853void flush_scheduled_work(void)
2854{
2855	flush_workqueue(system_wq);
2856}
2857EXPORT_SYMBOL(flush_scheduled_work);
2858
2859/**
2860 * execute_in_process_context - reliably execute the routine with user context
2861 * @fn:		the function to execute
2862 * @ew:		guaranteed storage for the execute work structure (must
2863 *		be available when the work executes)
2864 *
2865 * Executes the function immediately if process context is available,
2866 * otherwise schedules the function for delayed execution.
2867 *
2868 * Returns:	0 - function was executed
2869 *		1 - function was scheduled for execution
2870 */
2871int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2872{
2873	if (!in_interrupt()) {
2874		fn(&ew->work);
2875		return 0;
2876	}
2877
2878	INIT_WORK(&ew->work, fn);
2879	schedule_work(&ew->work);
2880
2881	return 1;
2882}
2883EXPORT_SYMBOL_GPL(execute_in_process_context);
2884
2885int keventd_up(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2886{
2887	return system_wq != NULL;
 
 
2888}
2889
2890static int alloc_cwqs(struct workqueue_struct *wq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2891{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2892	/*
2893	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2894	 * Make sure that the alignment isn't lower than that of
2895	 * unsigned long long.
2896	 */
2897	const size_t size = sizeof(struct cpu_workqueue_struct);
2898	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2899				   __alignof__(unsigned long long));
2900#ifdef CONFIG_SMP
2901	bool percpu = !(wq->flags & WQ_UNBOUND);
2902#else
2903	bool percpu = false;
2904#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2905
2906	if (percpu)
2907		wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2908	else {
2909		void *ptr;
2910
2911		/*
2912		 * Allocate enough room to align cwq and put an extra
2913		 * pointer at the end pointing back to the originally
2914		 * allocated pointer which will be used for free.
2915		 */
2916		ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2917		if (ptr) {
2918			wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2919			*(void **)(wq->cpu_wq.single + 1) = ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2920		}
2921	}
2922
2923	/* just in case, make sure it's actually aligned */
2924	BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2925	return wq->cpu_wq.v ? 0 : -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2926}
2927
2928static void free_cwqs(struct workqueue_struct *wq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2929{
2930#ifdef CONFIG_SMP
2931	bool percpu = !(wq->flags & WQ_UNBOUND);
2932#else
2933	bool percpu = false;
2934#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2935
2936	if (percpu)
2937		free_percpu(wq->cpu_wq.pcpu);
2938	else if (wq->cpu_wq.single) {
2939		/* the pointer to free is stored right after the cwq */
2940		kfree(*(void **)(wq->cpu_wq.single + 1));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2941	}
2942}
2943
2944static int wq_clamp_max_active(int max_active, unsigned int flags,
2945			       const char *name)
2946{
2947	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2948
2949	if (max_active < 1 || max_active > lim)
2950		printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2951		       "is out of range, clamping between %d and %d\n",
2952		       max_active, name, 1, lim);
2953
2954	return clamp_val(max_active, 1, lim);
2955}
2956
2957struct workqueue_struct *__alloc_workqueue_key(const char *name,
2958					       unsigned int flags,
2959					       int max_active,
2960					       struct lock_class_key *key,
2961					       const char *lock_name)
2962{
 
 
2963	struct workqueue_struct *wq;
2964	unsigned int cpu;
2965
2966	/*
2967	 * Workqueues which may be used during memory reclaim should
2968	 * have a rescuer to guarantee forward progress.
2969	 */
2970	if (flags & WQ_MEM_RECLAIM)
2971		flags |= WQ_RESCUER;
2972
2973	/*
2974	 * Unbound workqueues aren't concurrency managed and should be
2975	 * dispatched to workers immediately.
2976	 */
2977	if (flags & WQ_UNBOUND)
2978		flags |= WQ_HIGHPRI;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2979
2980	max_active = max_active ?: WQ_DFL_ACTIVE;
2981	max_active = wq_clamp_max_active(max_active, flags, name);
2982
2983	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
2984	if (!wq)
2985		goto err;
2986
 
2987	wq->flags = flags;
2988	wq->saved_max_active = max_active;
2989	mutex_init(&wq->flush_mutex);
2990	atomic_set(&wq->nr_cwqs_to_flush, 0);
 
2991	INIT_LIST_HEAD(&wq->flusher_queue);
2992	INIT_LIST_HEAD(&wq->flusher_overflow);
 
2993
2994	wq->name = name;
2995	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2996	INIT_LIST_HEAD(&wq->list);
2997
2998	if (alloc_cwqs(wq) < 0)
2999		goto err;
3000
3001	for_each_cwq_cpu(cpu, wq) {
3002		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3003		struct global_cwq *gcwq = get_gcwq(cpu);
3004
3005		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
3006		cwq->gcwq = gcwq;
3007		cwq->wq = wq;
3008		cwq->flush_color = -1;
3009		cwq->max_active = max_active;
3010		INIT_LIST_HEAD(&cwq->delayed_works);
3011	}
3012
3013	if (flags & WQ_RESCUER) {
3014		struct worker *rescuer;
3015
3016		if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
3017			goto err;
3018
3019		wq->rescuer = rescuer = alloc_worker();
3020		if (!rescuer)
3021			goto err;
3022
3023		rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
3024		if (IS_ERR(rescuer->task))
3025			goto err;
 
 
 
 
3026
3027		rescuer->task->flags |= PF_THREAD_BOUND;
 
3028		wake_up_process(rescuer->task);
3029	}
3030
 
 
 
3031	/*
3032	 * workqueue_lock protects global freeze state and workqueues
3033	 * list.  Grab it, set max_active accordingly and add the new
3034	 * workqueue to workqueues list.
3035	 */
3036	spin_lock(&workqueue_lock);
3037
3038	if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3039		for_each_cwq_cpu(cpu, wq)
3040			get_cwq(cpu, wq)->max_active = 0;
 
3041
3042	list_add(&wq->list, &workqueues);
3043
3044	spin_unlock(&workqueue_lock);
3045
3046	return wq;
3047err:
3048	if (wq) {
3049		free_cwqs(wq);
3050		free_mayday_mask(wq->mayday_mask);
3051		kfree(wq->rescuer);
3052		kfree(wq);
3053	}
3054	return NULL;
3055}
3056EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3057
3058/**
3059 * destroy_workqueue - safely terminate a workqueue
3060 * @wq: target workqueue
3061 *
3062 * Safely destroy a workqueue. All work currently pending will be done first.
3063 */
3064void destroy_workqueue(struct workqueue_struct *wq)
3065{
3066	unsigned int cpu;
 
3067
3068	/* drain it before proceeding with destruction */
3069	drain_workqueue(wq);
3070
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3071	/*
3072	 * wq list is used to freeze wq, remove from list after
3073	 * flushing is complete in case freeze races us.
3074	 */
3075	spin_lock(&workqueue_lock);
3076	list_del(&wq->list);
3077	spin_unlock(&workqueue_lock);
3078
3079	/* sanity check */
3080	for_each_cwq_cpu(cpu, wq) {
3081		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3082		int i;
3083
3084		for (i = 0; i < WORK_NR_COLORS; i++)
3085			BUG_ON(cwq->nr_in_flight[i]);
3086		BUG_ON(cwq->nr_active);
3087		BUG_ON(!list_empty(&cwq->delayed_works));
3088	}
3089
3090	if (wq->flags & WQ_RESCUER) {
3091		kthread_stop(wq->rescuer->task);
3092		free_mayday_mask(wq->mayday_mask);
3093		kfree(wq->rescuer);
 
3094	}
3095
3096	free_cwqs(wq);
3097	kfree(wq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3098}
3099EXPORT_SYMBOL_GPL(destroy_workqueue);
3100
3101/**
3102 * workqueue_set_max_active - adjust max_active of a workqueue
3103 * @wq: target workqueue
3104 * @max_active: new max_active value.
3105 *
3106 * Set max_active of @wq to @max_active.
3107 *
3108 * CONTEXT:
3109 * Don't call from IRQ context.
3110 */
3111void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3112{
3113	unsigned int cpu;
 
 
 
 
3114
3115	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3116
3117	spin_lock(&workqueue_lock);
3118
3119	wq->saved_max_active = max_active;
3120
3121	for_each_cwq_cpu(cpu, wq) {
3122		struct global_cwq *gcwq = get_gcwq(cpu);
3123
3124		spin_lock_irq(&gcwq->lock);
 
 
3125
3126		if (!(wq->flags & WQ_FREEZABLE) ||
3127		    !(gcwq->flags & GCWQ_FREEZING))
3128			get_cwq(gcwq->cpu, wq)->max_active = max_active;
3129
3130		spin_unlock_irq(&gcwq->lock);
3131	}
 
 
 
 
 
3132
3133	spin_unlock(&workqueue_lock);
3134}
3135EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3136
3137/**
3138 * workqueue_congested - test whether a workqueue is congested
3139 * @cpu: CPU in question
3140 * @wq: target workqueue
3141 *
3142 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
3143 * no synchronization around this function and the test result is
3144 * unreliable and only useful as advisory hints or for debugging.
3145 *
3146 * RETURNS:
 
 
 
 
 
 
3147 * %true if congested, %false otherwise.
3148 */
3149bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3150{
3151	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
 
 
 
 
 
3152
3153	return !list_empty(&cwq->delayed_works);
3154}
3155EXPORT_SYMBOL_GPL(workqueue_congested);
 
3156
3157/**
3158 * work_cpu - return the last known associated cpu for @work
3159 * @work: the work of interest
3160 *
3161 * RETURNS:
3162 * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
3163 */
3164unsigned int work_cpu(struct work_struct *work)
3165{
3166	struct global_cwq *gcwq = get_work_gcwq(work);
3167
3168	return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3169}
3170EXPORT_SYMBOL_GPL(work_cpu);
3171
3172/**
3173 * work_busy - test whether a work is currently pending or running
3174 * @work: the work to be tested
3175 *
3176 * Test whether @work is currently pending or running.  There is no
3177 * synchronization around this function and the test result is
3178 * unreliable and only useful as advisory hints or for debugging.
3179 * Especially for reentrant wqs, the pending state might hide the
3180 * running state.
3181 *
3182 * RETURNS:
3183 * OR'd bitmask of WORK_BUSY_* bits.
3184 */
3185unsigned int work_busy(struct work_struct *work)
3186{
3187	struct global_cwq *gcwq = get_work_gcwq(work);
3188	unsigned long flags;
3189	unsigned int ret = 0;
3190
3191	if (!gcwq)
3192		return false;
3193
3194	spin_lock_irqsave(&gcwq->lock, flags);
3195
3196	if (work_pending(work))
3197		ret |= WORK_BUSY_PENDING;
3198	if (find_worker_executing_work(gcwq, work))
3199		ret |= WORK_BUSY_RUNNING;
3200
3201	spin_unlock_irqrestore(&gcwq->lock, flags);
 
 
 
 
 
 
 
 
3202
3203	return ret;
3204}
3205EXPORT_SYMBOL_GPL(work_busy);
3206
3207/*
3208 * CPU hotplug.
3209 *
3210 * There are two challenges in supporting CPU hotplug.  Firstly, there
3211 * are a lot of assumptions on strong associations among work, cwq and
3212 * gcwq which make migrating pending and scheduled works very
3213 * difficult to implement without impacting hot paths.  Secondly,
3214 * gcwqs serve mix of short, long and very long running works making
3215 * blocked draining impractical.
3216 *
3217 * This is solved by allowing a gcwq to be detached from CPU, running
3218 * it with unbound (rogue) workers and allowing it to be reattached
3219 * later if the cpu comes back online.  A separate thread is created
3220 * to govern a gcwq in such state and is called the trustee of the
3221 * gcwq.
3222 *
3223 * Trustee states and their descriptions.
3224 *
3225 * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
3226 *		new trustee is started with this state.
3227 *
3228 * IN_CHARGE	Once started, trustee will enter this state after
3229 *		assuming the manager role and making all existing
3230 *		workers rogue.  DOWN_PREPARE waits for trustee to
3231 *		enter this state.  After reaching IN_CHARGE, trustee
3232 *		tries to execute the pending worklist until it's empty
3233 *		and the state is set to BUTCHER, or the state is set
3234 *		to RELEASE.
3235 *
3236 * BUTCHER	Command state which is set by the cpu callback after
3237 *		the cpu has went down.  Once this state is set trustee
3238 *		knows that there will be no new works on the worklist
3239 *		and once the worklist is empty it can proceed to
3240 *		killing idle workers.
3241 *
3242 * RELEASE	Command state which is set by the cpu callback if the
3243 *		cpu down has been canceled or it has come online
3244 *		again.  After recognizing this state, trustee stops
3245 *		trying to drain or butcher and clears ROGUE, rebinds
3246 *		all remaining workers back to the cpu and releases
3247 *		manager role.
3248 *
3249 * DONE		Trustee will enter this state after BUTCHER or RELEASE
3250 *		is complete.
3251 *
3252 *          trustee                 CPU                draining
3253 *         took over                down               complete
3254 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3255 *                        |                     |                  ^
3256 *                        | CPU is back online  v   return workers |
3257 *                         ----------------> RELEASE --------------
3258 */
 
 
 
 
3259
3260/**
3261 * trustee_wait_event_timeout - timed event wait for trustee
3262 * @cond: condition to wait for
3263 * @timeout: timeout in jiffies
3264 *
3265 * wait_event_timeout() for trustee to use.  Handles locking and
3266 * checks for RELEASE request.
3267 *
3268 * CONTEXT:
3269 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3270 * multiple times.  To be used by trustee.
3271 *
3272 * RETURNS:
3273 * Positive indicating left time if @cond is satisfied, 0 if timed
3274 * out, -1 if canceled.
3275 */
3276#define trustee_wait_event_timeout(cond, timeout) ({			\
3277	long __ret = (timeout);						\
3278	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
3279	       __ret) {							\
3280		spin_unlock_irq(&gcwq->lock);				\
3281		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
3282			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
3283			__ret);						\
3284		spin_lock_irq(&gcwq->lock);				\
3285	}								\
3286	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
3287})
3288
3289/**
3290 * trustee_wait_event - event wait for trustee
3291 * @cond: condition to wait for
 
3292 *
3293 * wait_event() for trustee to use.  Automatically handles locking and
3294 * checks for CANCEL request.
 
3295 *
3296 * CONTEXT:
3297 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3298 * multiple times.  To be used by trustee.
3299 *
3300 * RETURNS:
3301 * 0 if @cond is satisfied, -1 if canceled.
3302 */
3303#define trustee_wait_event(cond) ({					\
3304	long __ret1;							\
3305	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3306	__ret1 < 0 ? -1 : 0;						\
3307})
3308
3309static int __cpuinit trustee_thread(void *__gcwq)
3310{
3311	struct global_cwq *gcwq = __gcwq;
 
 
 
 
 
3312	struct worker *worker;
3313	struct work_struct *work;
3314	struct hlist_node *pos;
3315	long rc;
3316	int i;
3317
3318	BUG_ON(gcwq->cpu != smp_processor_id());
 
3319
3320	spin_lock_irq(&gcwq->lock);
3321	/*
3322	 * Claim the manager position and make all workers rogue.
3323	 * Trustee must be bound to the target cpu and can't be
3324	 * cancelled.
3325	 */
3326	BUG_ON(gcwq->cpu != smp_processor_id());
3327	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3328	BUG_ON(rc < 0);
3329
3330	gcwq->flags |= GCWQ_MANAGING_WORKERS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3331
3332	list_for_each_entry(worker, &gcwq->idle_list, entry)
3333		worker->flags |= WORKER_ROGUE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3334
3335	for_each_busy_worker(worker, i, pos, gcwq)
3336		worker->flags |= WORKER_ROGUE;
3337
3338	/*
3339	 * Call schedule() so that we cross rq->lock and thus can
3340	 * guarantee sched callbacks see the rogue flag.  This is
3341	 * necessary as scheduler callbacks may be invoked from other
3342	 * cpus.
3343	 */
3344	spin_unlock_irq(&gcwq->lock);
3345	schedule();
3346	spin_lock_irq(&gcwq->lock);
3347
3348	/*
3349	 * Sched callbacks are disabled now.  Zap nr_running.  After
3350	 * this, nr_running stays zero and need_more_worker() and
3351	 * keep_working() are always true as long as the worklist is
3352	 * not empty.
3353	 */
3354	atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
 
 
3355
3356	spin_unlock_irq(&gcwq->lock);
3357	del_timer_sync(&gcwq->idle_timer);
3358	spin_lock_irq(&gcwq->lock);
3359
3360	/*
3361	 * We're now in charge.  Notify and proceed to drain.  We need
3362	 * to keep the gcwq running during the whole CPU down
3363	 * procedure as other cpu hotunplug callbacks may need to
3364	 * flush currently running tasks.
3365	 */
3366	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3367	wake_up_all(&gcwq->trustee_wait);
3368
3369	/*
3370	 * The original cpu is in the process of dying and may go away
3371	 * anytime now.  When that happens, we and all workers would
3372	 * be migrated to other cpus.  Try draining any left work.  We
3373	 * want to get it over with ASAP - spam rescuers, wake up as
3374	 * many idlers as necessary and create new ones till the
3375	 * worklist is empty.  Note that if the gcwq is frozen, there
3376	 * may be frozen works in freezable cwqs.  Don't declare
3377	 * completion while frozen.
3378	 */
3379	while (gcwq->nr_workers != gcwq->nr_idle ||
3380	       gcwq->flags & GCWQ_FREEZING ||
3381	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3382		int nr_works = 0;
3383
3384		list_for_each_entry(work, &gcwq->worklist, entry) {
3385			send_mayday(work);
3386			nr_works++;
3387		}
 
 
 
 
 
3388
3389		list_for_each_entry(worker, &gcwq->idle_list, entry) {
3390			if (!nr_works--)
3391				break;
3392			wake_up_process(worker->task);
3393		}
 
 
 
 
 
3394
3395		if (need_to_create_worker(gcwq)) {
3396			spin_unlock_irq(&gcwq->lock);
3397			worker = create_worker(gcwq, false);
3398			spin_lock_irq(&gcwq->lock);
3399			if (worker) {
3400				worker->flags |= WORKER_ROGUE;
3401				start_worker(worker);
3402			}
3403		}
 
3404
3405		/* give a breather */
3406		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3407			break;
3408	}
3409
3410	/*
3411	 * Either all works have been scheduled and cpu is down, or
3412	 * cpu down has already been canceled.  Wait for and butcher
3413	 * all workers till we're canceled.
 
 
3414	 */
3415	do {
3416		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3417		while (!list_empty(&gcwq->idle_list))
3418			destroy_worker(list_first_entry(&gcwq->idle_list,
3419							struct worker, entry));
3420	} while (gcwq->nr_workers && rc >= 0);
3421
3422	/*
3423	 * At this point, either draining has completed and no worker
3424	 * is left, or cpu down has been canceled or the cpu is being
3425	 * brought back up.  There shouldn't be any idle one left.
3426	 * Tell the remaining busy ones to rebind once it finishes the
3427	 * currently scheduled works by scheduling the rebind_work.
3428	 */
3429	WARN_ON(!list_empty(&gcwq->idle_list));
3430
3431	for_each_busy_worker(worker, i, pos, gcwq) {
3432		struct work_struct *rebind_work = &worker->rebind_work;
3433
3434		/*
3435		 * Rebind_work may race with future cpu hotplug
3436		 * operations.  Use a separate flag to mark that
3437		 * rebinding is scheduled.
 
 
 
3438		 */
3439		worker->flags |= WORKER_REBIND;
3440		worker->flags &= ~WORKER_ROGUE;
3441
3442		/* queue rebind_work, wq doesn't matter, use the default one */
3443		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3444				     work_data_bits(rebind_work)))
3445			continue;
3446
3447		debug_work_activate(rebind_work);
3448		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3449			    worker->scheduled.next,
3450			    work_color_to_flags(WORK_NO_COLOR));
 
 
 
 
 
 
 
 
 
 
3451	}
3452
3453	/* relinquish manager role */
3454	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3455
3456	/* notify completion */
3457	gcwq->trustee = NULL;
3458	gcwq->trustee_state = TRUSTEE_DONE;
3459	wake_up_all(&gcwq->trustee_wait);
3460	spin_unlock_irq(&gcwq->lock);
3461	return 0;
3462}
3463
3464/**
3465 * wait_trustee_state - wait for trustee to enter the specified state
3466 * @gcwq: gcwq the trustee of interest belongs to
3467 * @state: target state to wait for
3468 *
3469 * Wait for the trustee to reach @state.  DONE is already matched.
3470 *
3471 * CONTEXT:
3472 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3473 * multiple times.  To be used by cpu_callback.
3474 */
3475static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3476__releases(&gcwq->lock)
3477__acquires(&gcwq->lock)
3478{
3479	if (!(gcwq->trustee_state == state ||
3480	      gcwq->trustee_state == TRUSTEE_DONE)) {
3481		spin_unlock_irq(&gcwq->lock);
3482		__wait_event(gcwq->trustee_wait,
3483			     gcwq->trustee_state == state ||
3484			     gcwq->trustee_state == TRUSTEE_DONE);
3485		spin_lock_irq(&gcwq->lock);
3486	}
 
 
 
 
 
 
 
 
 
 
 
3487}
3488
3489static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3490						unsigned long action,
3491						void *hcpu)
 
 
 
 
3492{
3493	unsigned int cpu = (unsigned long)hcpu;
3494	struct global_cwq *gcwq = get_gcwq(cpu);
3495	struct task_struct *new_trustee = NULL;
3496	struct worker *uninitialized_var(new_worker);
3497	unsigned long flags;
3498
3499	action &= ~CPU_TASKS_FROZEN;
3500
3501	switch (action) {
3502	case CPU_DOWN_PREPARE:
3503		new_trustee = kthread_create(trustee_thread, gcwq,
3504					     "workqueue_trustee/%d\n", cpu);
3505		if (IS_ERR(new_trustee))
3506			return notifier_from_errno(PTR_ERR(new_trustee));
3507		kthread_bind(new_trustee, cpu);
3508		/* fall through */
3509	case CPU_UP_PREPARE:
3510		BUG_ON(gcwq->first_idle);
3511		new_worker = create_worker(gcwq, false);
3512		if (!new_worker) {
3513			if (new_trustee)
3514				kthread_stop(new_trustee);
3515			return NOTIFY_BAD;
3516		}
3517	}
3518
3519	/* some are called w/ irq disabled, don't disturb irq status */
3520	spin_lock_irqsave(&gcwq->lock, flags);
 
3521
3522	switch (action) {
3523	case CPU_DOWN_PREPARE:
3524		/* initialize trustee and tell it to acquire the gcwq */
3525		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3526		gcwq->trustee = new_trustee;
3527		gcwq->trustee_state = TRUSTEE_START;
3528		wake_up_process(gcwq->trustee);
3529		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3530		/* fall through */
3531	case CPU_UP_PREPARE:
3532		BUG_ON(gcwq->first_idle);
3533		gcwq->first_idle = new_worker;
3534		break;
3535
3536	case CPU_DYING:
3537		/*
3538		 * Before this, the trustee and all workers except for
3539		 * the ones which are still executing works from
3540		 * before the last CPU down must be on the cpu.  After
3541		 * this, they'll all be diasporas.
3542		 */
3543		gcwq->flags |= GCWQ_DISASSOCIATED;
3544		break;
3545
3546	case CPU_POST_DEAD:
3547		gcwq->trustee_state = TRUSTEE_BUTCHER;
3548		/* fall through */
3549	case CPU_UP_CANCELED:
3550		destroy_worker(gcwq->first_idle);
3551		gcwq->first_idle = NULL;
3552		break;
3553
3554	case CPU_DOWN_FAILED:
3555	case CPU_ONLINE:
3556		gcwq->flags &= ~GCWQ_DISASSOCIATED;
3557		if (gcwq->trustee_state != TRUSTEE_DONE) {
3558			gcwq->trustee_state = TRUSTEE_RELEASE;
3559			wake_up_process(gcwq->trustee);
3560			wait_trustee_state(gcwq, TRUSTEE_DONE);
3561		}
3562
3563		/*
3564		 * Trustee is done and there might be no worker left.
3565		 * Put the first_idle in and request a real manager to
3566		 * take a look.
3567		 */
3568		spin_unlock_irq(&gcwq->lock);
3569		kthread_bind(gcwq->first_idle->task, cpu);
3570		spin_lock_irq(&gcwq->lock);
3571		gcwq->flags |= GCWQ_MANAGE_WORKERS;
3572		start_worker(gcwq->first_idle);
3573		gcwq->first_idle = NULL;
3574		break;
3575	}
 
 
3576
3577	spin_unlock_irqrestore(&gcwq->lock, flags);
 
 
 
 
 
 
 
 
 
 
3578
3579	return notifier_from_errno(0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3580}
3581
3582#ifdef CONFIG_SMP
3583
3584struct work_for_cpu {
3585	struct completion completion;
3586	long (*fn)(void *);
3587	void *arg;
3588	long ret;
3589};
3590
3591static int do_work_for_cpu(void *_wfc)
3592{
3593	struct work_for_cpu *wfc = _wfc;
 
3594	wfc->ret = wfc->fn(wfc->arg);
3595	complete(&wfc->completion);
3596	return 0;
3597}
3598
3599/**
3600 * work_on_cpu - run a function in user context on a particular cpu
3601 * @cpu: the cpu to run on
3602 * @fn: the function to run
3603 * @arg: the function arg
3604 *
3605 * This will return the value @fn returns.
3606 * It is up to the caller to ensure that the cpu doesn't go offline.
3607 * The caller must not hold any locks which would prevent @fn from completing.
 
 
3608 */
3609long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3610{
3611	struct task_struct *sub_thread;
3612	struct work_for_cpu wfc = {
3613		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3614		.fn = fn,
3615		.arg = arg,
3616	};
3617
3618	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3619	if (IS_ERR(sub_thread))
3620		return PTR_ERR(sub_thread);
3621	kthread_bind(sub_thread, cpu);
3622	wake_up_process(sub_thread);
3623	wait_for_completion(&wfc.completion);
3624	return wfc.ret;
3625}
3626EXPORT_SYMBOL_GPL(work_on_cpu);
3627#endif /* CONFIG_SMP */
3628
3629#ifdef CONFIG_FREEZER
3630
3631/**
3632 * freeze_workqueues_begin - begin freezing workqueues
3633 *
3634 * Start freezing workqueues.  After this function returns, all freezable
3635 * workqueues will queue new works to their frozen_works list instead of
3636 * gcwq->worklist.
3637 *
3638 * CONTEXT:
3639 * Grabs and releases workqueue_lock and gcwq->lock's.
3640 */
3641void freeze_workqueues_begin(void)
3642{
3643	unsigned int cpu;
 
 
 
3644
3645	spin_lock(&workqueue_lock);
3646
3647	BUG_ON(workqueue_freezing);
3648	workqueue_freezing = true;
3649
3650	for_each_gcwq_cpu(cpu) {
3651		struct global_cwq *gcwq = get_gcwq(cpu);
3652		struct workqueue_struct *wq;
3653
3654		spin_lock_irq(&gcwq->lock);
3655
3656		BUG_ON(gcwq->flags & GCWQ_FREEZING);
3657		gcwq->flags |= GCWQ_FREEZING;
3658
3659		list_for_each_entry(wq, &workqueues, list) {
3660			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3661
3662			if (cwq && wq->flags & WQ_FREEZABLE)
3663				cwq->max_active = 0;
3664		}
3665
3666		spin_unlock_irq(&gcwq->lock);
3667	}
3668
3669	spin_unlock(&workqueue_lock);
3670}
3671
3672/**
3673 * freeze_workqueues_busy - are freezable workqueues still busy?
3674 *
3675 * Check whether freezing is complete.  This function must be called
3676 * between freeze_workqueues_begin() and thaw_workqueues().
3677 *
3678 * CONTEXT:
3679 * Grabs and releases workqueue_lock.
3680 *
3681 * RETURNS:
3682 * %true if some freezable workqueues are still busy.  %false if freezing
3683 * is complete.
3684 */
3685bool freeze_workqueues_busy(void)
3686{
3687	unsigned int cpu;
3688	bool busy = false;
 
 
3689
3690	spin_lock(&workqueue_lock);
3691
3692	BUG_ON(!workqueue_freezing);
3693
3694	for_each_gcwq_cpu(cpu) {
3695		struct workqueue_struct *wq;
 
3696		/*
3697		 * nr_active is monotonically decreasing.  It's safe
3698		 * to peek without lock.
3699		 */
3700		list_for_each_entry(wq, &workqueues, list) {
3701			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3702
3703			if (!cwq || !(wq->flags & WQ_FREEZABLE))
3704				continue;
3705
3706			BUG_ON(cwq->nr_active < 0);
3707			if (cwq->nr_active) {
3708				busy = true;
 
3709				goto out_unlock;
3710			}
3711		}
 
3712	}
3713out_unlock:
3714	spin_unlock(&workqueue_lock);
3715	return busy;
3716}
3717
3718/**
3719 * thaw_workqueues - thaw workqueues
3720 *
3721 * Thaw workqueues.  Normal queueing is restored and all collected
3722 * frozen works are transferred to their respective gcwq worklists.
3723 *
3724 * CONTEXT:
3725 * Grabs and releases workqueue_lock and gcwq->lock's.
3726 */
3727void thaw_workqueues(void)
3728{
3729	unsigned int cpu;
 
 
 
3730
3731	spin_lock(&workqueue_lock);
3732
3733	if (!workqueue_freezing)
3734		goto out_unlock;
3735
3736	for_each_gcwq_cpu(cpu) {
3737		struct global_cwq *gcwq = get_gcwq(cpu);
3738		struct workqueue_struct *wq;
 
 
 
 
3739
3740		spin_lock_irq(&gcwq->lock);
 
 
 
 
 
 
3741
3742		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3743		gcwq->flags &= ~GCWQ_FREEZING;
 
 
 
 
 
 
 
 
3744
3745		list_for_each_entry(wq, &workqueues, list) {
3746			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
3747
3748			if (!cwq || !(wq->flags & WQ_FREEZABLE))
3749				continue;
3750
3751			/* restore max_active and repopulate worklist */
3752			cwq->max_active = wq->saved_max_active;
 
 
3753
3754			while (!list_empty(&cwq->delayed_works) &&
3755			       cwq->nr_active < cwq->max_active)
3756				cwq_activate_first_delayed(cwq);
3757		}
3758
3759		wake_up_worker(gcwq);
 
 
 
 
 
 
3760
3761		spin_unlock_irq(&gcwq->lock);
 
 
 
 
 
 
 
 
 
 
 
3762	}
3763
3764	workqueue_freezing = false;
3765out_unlock:
3766	spin_unlock(&workqueue_lock);
3767}
3768#endif /* CONFIG_FREEZER */
3769
3770static int __init init_workqueues(void)
3771{
3772	unsigned int cpu;
3773	int i;
3774
3775	cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
3776
3777	/* initialize gcwqs */
3778	for_each_gcwq_cpu(cpu) {
3779		struct global_cwq *gcwq = get_gcwq(cpu);
3780
3781		spin_lock_init(&gcwq->lock);
3782		INIT_LIST_HEAD(&gcwq->worklist);
3783		gcwq->cpu = cpu;
3784		gcwq->flags |= GCWQ_DISASSOCIATED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3785
3786		INIT_LIST_HEAD(&gcwq->idle_list);
3787		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3788			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3789
3790		init_timer_deferrable(&gcwq->idle_timer);
3791		gcwq->idle_timer.function = idle_worker_timeout;
3792		gcwq->idle_timer.data = (unsigned long)gcwq;
3793
3794		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3795			    (unsigned long)gcwq);
3796
3797		ida_init(&gcwq->worker_ida);
3798
3799		gcwq->trustee_state = TRUSTEE_DONE;
3800		init_waitqueue_head(&gcwq->trustee_wait);
3801	}
3802
3803	/* create the initial worker */
3804	for_each_online_gcwq_cpu(cpu) {
3805		struct global_cwq *gcwq = get_gcwq(cpu);
3806		struct worker *worker;
 
 
 
3807
3808		if (cpu != WORK_CPU_UNBOUND)
3809			gcwq->flags &= ~GCWQ_DISASSOCIATED;
3810		worker = create_worker(gcwq, true);
3811		BUG_ON(!worker);
3812		spin_lock_irq(&gcwq->lock);
3813		start_worker(worker);
3814		spin_unlock_irq(&gcwq->lock);
 
 
3815	}
3816
3817	system_wq = alloc_workqueue("events", 0, 0);
 
3818	system_long_wq = alloc_workqueue("events_long", 0, 0);
3819	system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3820	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3821					    WQ_UNBOUND_MAX_ACTIVE);
3822	system_freezable_wq = alloc_workqueue("events_freezable",
3823					      WQ_FREEZABLE, 0);
3824	BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3825	       !system_unbound_wq || !system_freezable_wq);
 
 
 
 
 
 
 
3826	return 0;
3827}
3828early_initcall(init_workqueues);