Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kernel/workqueue.c - generic async execution with shared worker pool
   4 *
   5 * Copyright (C) 2002		Ingo Molnar
   6 *
   7 *   Derived from the taskqueue/keventd code by:
   8 *     David Woodhouse <dwmw2@infradead.org>
   9 *     Andrew Morton
  10 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  11 *     Theodore Ts'o <tytso@mit.edu>
  12 *
  13 * Made to use alloc_percpu by Christoph Lameter.
  14 *
  15 * Copyright (C) 2010		SUSE Linux Products GmbH
  16 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
  17 *
  18 * This is the generic async execution mechanism.  Work items as are
  19 * executed in process context.  The worker pool is shared and
  20 * automatically managed.  There are two worker pools for each CPU (one for
  21 * normal work items and the other for high priority ones) and some extra
  22 * pools for workqueues which are not bound to any specific CPU - the
  23 * number of these backing pools is dynamic.
  24 *
  25 * Please read Documentation/core-api/workqueue.rst for details.
  26 */
  27
  28#include <linux/export.h>
  29#include <linux/kernel.h>
  30#include <linux/sched.h>
  31#include <linux/init.h>
  32#include <linux/signal.h>
  33#include <linux/completion.h>
  34#include <linux/workqueue.h>
  35#include <linux/slab.h>
  36#include <linux/cpu.h>
  37#include <linux/notifier.h>
  38#include <linux/kthread.h>
  39#include <linux/hardirq.h>
  40#include <linux/mempolicy.h>
  41#include <linux/freezer.h>
 
  42#include <linux/debug_locks.h>
  43#include <linux/lockdep.h>
  44#include <linux/idr.h>
  45#include <linux/jhash.h>
  46#include <linux/hashtable.h>
  47#include <linux/rculist.h>
  48#include <linux/nodemask.h>
  49#include <linux/moduleparam.h>
  50#include <linux/uaccess.h>
  51#include <linux/sched/isolation.h>
  52#include <linux/nmi.h>
  53
  54#include "workqueue_internal.h"
  55
  56enum {
  57	/*
  58	 * worker_pool flags
  59	 *
  60	 * A bound pool is either associated or disassociated with its CPU.
  61	 * While associated (!DISASSOCIATED), all workers are bound to the
  62	 * CPU and none has %WORKER_UNBOUND set and concurrency management
  63	 * is in effect.
  64	 *
  65	 * While DISASSOCIATED, the cpu may be offline and all workers have
  66	 * %WORKER_UNBOUND set and concurrency management disabled, and may
  67	 * be executing on any CPU.  The pool behaves as an unbound one.
  68	 *
  69	 * Note that DISASSOCIATED should be flipped only while holding
  70	 * wq_pool_attach_mutex to avoid changing binding state while
  71	 * worker_attach_to_pool() is in progress.
  72	 */
  73	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
  74	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
  75
  76	/* worker flags */
 
  77	WORKER_DIE		= 1 << 1,	/* die die die */
  78	WORKER_IDLE		= 1 << 2,	/* is idle */
  79	WORKER_PREP		= 1 << 3,	/* preparing to run works */
 
 
  80	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
  81	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
  82	WORKER_REBOUND		= 1 << 8,	/* worker was rebound */
  83
  84	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_CPU_INTENSIVE |
  85				  WORKER_UNBOUND | WORKER_REBOUND,
  86
  87	NR_STD_WORKER_POOLS	= 2,		/* # standard pools per cpu */
 
 
 
 
 
  88
  89	UNBOUND_POOL_HASH_ORDER	= 6,		/* hashed by pool->attrs */
  90	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
 
 
  91
  92	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
  93	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
  94
  95	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  96						/* call for help after 10ms
  97						   (min two ticks) */
  98	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
  99	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
 
 100
 101	/*
 102	 * Rescue workers are used only on emergencies and shared by
 103	 * all cpus.  Give MIN_NICE.
 104	 */
 105	RESCUER_NICE_LEVEL	= MIN_NICE,
 106	HIGHPRI_NICE_LEVEL	= MIN_NICE,
 107
 108	WQ_NAME_LEN		= 24,
 109};
 110
 111/*
 112 * Structure fields follow one of the following exclusion rules.
 113 *
 114 * I: Modifiable by initialization/destruction paths and read-only for
 115 *    everyone else.
 116 *
 117 * P: Preemption protected.  Disabling preemption is enough and should
 118 *    only be modified and accessed from the local cpu.
 119 *
 120 * L: pool->lock protected.  Access with pool->lock held.
 121 *
 122 * X: During normal operation, modification requires pool->lock and should
 123 *    be done only from local cpu.  Either disabling preemption on local
 124 *    cpu or grabbing pool->lock is enough for read access.  If
 125 *    POOL_DISASSOCIATED is set, it's identical to L.
 126 *
 127 * A: wq_pool_attach_mutex protected.
 128 *
 129 * PL: wq_pool_mutex protected.
 
 
 
 130 *
 131 * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
 132 *
 133 * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
 134 *
 135 * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
 136 *      RCU for reads.
 137 *
 138 * WQ: wq->mutex protected.
 139 *
 140 * WR: wq->mutex protected for writes.  RCU protected for reads.
 141 *
 142 * MD: wq_mayday_lock protected.
 143 */
 144
 145/* struct worker is defined in workqueue_internal.h */
 146
 147struct worker_pool {
 148	spinlock_t		lock;		/* the pool lock */
 149	int			cpu;		/* I: the associated cpu */
 150	int			node;		/* I: the associated node ID */
 151	int			id;		/* I: pool ID */
 152	unsigned int		flags;		/* X: flags */
 
 
 
 
 153
 154	unsigned long		watchdog_ts;	/* L: watchdog timestamp */
 
 
 
 
 
 
 
 
 
 
 155
 
 
 
 
 
 
 
 156	struct list_head	worklist;	/* L: list of pending works */
 
 
 157
 158	int			nr_workers;	/* L: total number of workers */
 159	int			nr_idle;	/* L: currently idle workers */
 160
 
 161	struct list_head	idle_list;	/* X: list of idle workers */
 162	struct timer_list	idle_timer;	/* L: worker idle timeout */
 163	struct timer_list	mayday_timer;	/* L: SOS timer for workers */
 164
 165	/* a workers is either on busy_hash or idle_list, or the manager */
 166	DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
 167						/* L: hash of busy workers */
 168
 169	struct worker		*manager;	/* L: purely informational */
 170	struct list_head	workers;	/* A: attached workers */
 171	struct completion	*detach_completion; /* all workers detached */
 172
 173	struct ida		worker_ida;	/* worker IDs for task name */
 174
 175	struct workqueue_attrs	*attrs;		/* I: worker attributes */
 176	struct hlist_node	hash_node;	/* PL: unbound_pool_hash node */
 177	int			refcnt;		/* PL: refcnt for unbound pools */
 178
 179	/*
 180	 * The current concurrency level.  As it's likely to be accessed
 181	 * from other CPUs during try_to_wake_up(), put it in a separate
 182	 * cacheline.
 183	 */
 184	atomic_t		nr_running ____cacheline_aligned_in_smp;
 185
 186	/*
 187	 * Destruction of pool is RCU protected to allow dereferences
 188	 * from get_work_pool().
 189	 */
 190	struct rcu_head		rcu;
 191} ____cacheline_aligned_in_smp;
 192
 193/*
 194 * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
 195 * of work_struct->data are used for flags and the remaining high bits
 196 * point to the pwq; thus, pwqs need to be aligned at two's power of the
 197 * number of flag bits.
 198 */
 199struct pool_workqueue {
 200	struct worker_pool	*pool;		/* I: the associated pool */
 201	struct workqueue_struct *wq;		/* I: the owning workqueue */
 202	int			work_color;	/* L: current color */
 203	int			flush_color;	/* L: flushing color */
 204	int			refcnt;		/* L: reference count */
 205	int			nr_in_flight[WORK_NR_COLORS];
 206						/* L: nr of in_flight works */
 207	int			nr_active;	/* L: nr of active works */
 208	int			max_active;	/* L: max active works */
 209	struct list_head	delayed_works;	/* L: delayed works */
 210	struct list_head	pwqs_node;	/* WR: node on wq->pwqs */
 211	struct list_head	mayday_node;	/* MD: node on wq->maydays */
 212
 213	/*
 214	 * Release of unbound pwq is punted to system_wq.  See put_pwq()
 215	 * and pwq_unbound_release_workfn() for details.  pool_workqueue
 216	 * itself is also RCU protected so that the first pwq can be
 217	 * determined without grabbing wq->mutex.
 218	 */
 219	struct work_struct	unbound_release_work;
 220	struct rcu_head		rcu;
 221} __aligned(1 << WORK_STRUCT_FLAG_BITS);
 222
 223/*
 224 * Structure used to wait for workqueue flush.
 225 */
 226struct wq_flusher {
 227	struct list_head	list;		/* WQ: list of flushers */
 228	int			flush_color;	/* WQ: flush color waiting for */
 229	struct completion	done;		/* flush completion */
 230};
 231
 232struct wq_device;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 233
 234/*
 235 * The externally visible workqueue.  It relays the issued work items to
 236 * the appropriate worker_pool through its pool_workqueues.
 237 */
 238struct workqueue_struct {
 239	struct list_head	pwqs;		/* WR: all pwqs of this wq */
 240	struct list_head	list;		/* PR: list of all workqueues */
 
 
 
 
 
 
 
 
 
 
 
 
 
 241
 242	struct mutex		mutex;		/* protects this wq */
 243	int			work_color;	/* WQ: current work color */
 244	int			flush_color;	/* WQ: current flush color */
 245	atomic_t		nr_pwqs_to_flush; /* flush in progress */
 246	struct wq_flusher	*first_flusher;	/* WQ: first flusher */
 247	struct list_head	flusher_queue;	/* WQ: flush waiters */
 248	struct list_head	flusher_overflow; /* WQ: flush overflow list */
 249
 250	struct list_head	maydays;	/* MD: pwqs requesting rescue */
 251	struct worker		*rescuer;	/* I: rescue worker */
 252
 253	int			nr_drainers;	/* WQ: drain in progress */
 254	int			saved_max_active; /* WQ: saved pwq max_active */
 255
 256	struct workqueue_attrs	*unbound_attrs;	/* PW: only for unbound wqs */
 257	struct pool_workqueue	*dfl_pwq;	/* PW: only for unbound wqs */
 258
 259#ifdef CONFIG_SYSFS
 260	struct wq_device	*wq_dev;	/* I: for sysfs interface */
 261#endif
 262#ifdef CONFIG_LOCKDEP
 263	char			*lock_name;
 264	struct lock_class_key	key;
 265	struct lockdep_map	lockdep_map;
 266#endif
 267	char			name[WQ_NAME_LEN]; /* I: workqueue name */
 268
 269	/*
 270	 * Destruction of workqueue_struct is RCU protected to allow walking
 271	 * the workqueues list without grabbing wq_pool_mutex.
 272	 * This is used to dump all workqueues from sysrq.
 273	 */
 274	struct rcu_head		rcu;
 275
 276	/* hot fields used during command issue, aligned to cacheline */
 277	unsigned int		flags ____cacheline_aligned; /* WQ: WQ_* flags */
 278	struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
 279	struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
 280};
 281
 282static struct kmem_cache *pwq_cache;
 283
 284static cpumask_var_t *wq_numa_possible_cpumask;
 285					/* possible CPUs of each node */
 286
 287static bool wq_disable_numa;
 288module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 289
 290/* see the comment above the definition of WQ_POWER_EFFICIENT */
 291static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 292module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 293
 294static bool wq_online;			/* can kworkers be created yet? */
 295
 296static bool wq_numa_enabled;		/* unbound NUMA affinity enabled */
 297
 298/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
 299static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 300
 301static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
 302static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
 303static DEFINE_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
 304static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
 305
 306static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
 307static bool workqueue_freezing;		/* PL: have wqs started freezing? */
 308
 309/* PL: allowable cpus for unbound wqs and work items */
 310static cpumask_var_t wq_unbound_cpumask;
 311
 312/* CPU where unbound work was last round robin scheduled from this CPU */
 313static DEFINE_PER_CPU(int, wq_rr_cpu_last);
 314
 315/*
 316 * Local execution of unbound work items is no longer guaranteed.  The
 317 * following always forces round-robin CPU selection on unbound work items
 318 * to uncover usages which depend on it.
 319 */
 320#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
 321static bool wq_debug_force_rr_cpu = true;
 322#else
 323static bool wq_debug_force_rr_cpu = false;
 324#endif
 325module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
 326
 327/* the per-cpu worker pools */
 328static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
 329
 330static DEFINE_IDR(worker_pool_idr);	/* PR: idr of all pools */
 331
 332/* PL: hash of all unbound pools keyed by pool->attrs */
 333static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 334
 335/* I: attributes used when instantiating standard unbound pools on demand */
 336static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 337
 338/* I: attributes used when instantiating ordered pools on demand */
 339static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
 340
 341struct workqueue_struct *system_wq __read_mostly;
 342EXPORT_SYMBOL(system_wq);
 343struct workqueue_struct *system_highpri_wq __read_mostly;
 344EXPORT_SYMBOL_GPL(system_highpri_wq);
 345struct workqueue_struct *system_long_wq __read_mostly;
 346EXPORT_SYMBOL_GPL(system_long_wq);
 347struct workqueue_struct *system_unbound_wq __read_mostly;
 348EXPORT_SYMBOL_GPL(system_unbound_wq);
 349struct workqueue_struct *system_freezable_wq __read_mostly;
 
 
 
 
 350EXPORT_SYMBOL_GPL(system_freezable_wq);
 351struct workqueue_struct *system_power_efficient_wq __read_mostly;
 352EXPORT_SYMBOL_GPL(system_power_efficient_wq);
 353struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
 354EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
 355
 356static int worker_thread(void *__worker);
 357static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
 358
 359#define CREATE_TRACE_POINTS
 360#include <trace/events/workqueue.h>
 361
 362#define assert_rcu_or_pool_mutex()					\
 363	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
 364			 !lockdep_is_held(&wq_pool_mutex),		\
 365			 "RCU or wq_pool_mutex should be held")
 366
 367#define assert_rcu_or_wq_mutex(wq)					\
 368	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
 369			 !lockdep_is_held(&wq->mutex),			\
 370			 "RCU or wq->mutex should be held")
 371
 372#define assert_rcu_or_wq_mutex_or_pool_mutex(wq)			\
 373	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
 374			 !lockdep_is_held(&wq->mutex) &&		\
 375			 !lockdep_is_held(&wq_pool_mutex),		\
 376			 "RCU, wq->mutex or wq_pool_mutex should be held")
 377
 378#define for_each_cpu_worker_pool(pool, cpu)				\
 379	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
 380	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
 381	     (pool)++)
 382
 383/**
 384 * for_each_pool - iterate through all worker_pools in the system
 385 * @pool: iteration cursor
 386 * @pi: integer used for iteration
 387 *
 388 * This must be called either with wq_pool_mutex held or RCU read
 389 * locked.  If the pool needs to be used beyond the locking in effect, the
 390 * caller is responsible for guaranteeing that the pool stays online.
 391 *
 392 * The if/else clause exists only for the lockdep assertion and can be
 393 * ignored.
 394 */
 395#define for_each_pool(pool, pi)						\
 396	idr_for_each_entry(&worker_pool_idr, pool, pi)			\
 397		if (({ assert_rcu_or_pool_mutex(); false; })) { }	\
 398		else
 399
 400/**
 401 * for_each_pool_worker - iterate through all workers of a worker_pool
 402 * @worker: iteration cursor
 403 * @pool: worker_pool to iterate workers of
 404 *
 405 * This must be called with wq_pool_attach_mutex.
 406 *
 407 * The if/else clause exists only for the lockdep assertion and can be
 408 * ignored.
 409 */
 410#define for_each_pool_worker(worker, pool)				\
 411	list_for_each_entry((worker), &(pool)->workers, node)		\
 412		if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
 413		else
 414
 415/**
 416 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
 417 * @pwq: iteration cursor
 418 * @wq: the target workqueue
 419 *
 420 * This must be called either with wq->mutex held or RCU read locked.
 421 * If the pwq needs to be used beyond the locking in effect, the caller is
 422 * responsible for guaranteeing that the pwq stays online.
 423 *
 424 * The if/else clause exists only for the lockdep assertion and can be
 425 * ignored.
 426 */
 427#define for_each_pwq(pwq, wq)						\
 428	list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)		\
 429		if (({ assert_rcu_or_wq_mutex(wq); false; })) { }	\
 430		else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431
 432#ifdef CONFIG_DEBUG_OBJECTS_WORK
 433
 434static struct debug_obj_descr work_debug_descr;
 435
 436static void *work_debug_hint(void *addr)
 437{
 438	return ((struct work_struct *) addr)->func;
 439}
 440
 441static bool work_is_static_object(void *addr)
 
 
 
 
 442{
 443	struct work_struct *work = addr;
 444
 445	return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
 
 
 
 
 
 
 
 446}
 447
 448/*
 449 * fixup_init is called when:
 450 * - an active object is initialized
 
 451 */
 452static bool work_fixup_init(void *addr, enum debug_obj_state state)
 453{
 454	struct work_struct *work = addr;
 455
 456	switch (state) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 457	case ODEBUG_STATE_ACTIVE:
 458		cancel_work_sync(work);
 459		debug_object_init(work, &work_debug_descr);
 460		return true;
 461	default:
 462		return false;
 463	}
 464}
 465
 466/*
 467 * fixup_free is called when:
 468 * - an active object is freed
 469 */
 470static bool work_fixup_free(void *addr, enum debug_obj_state state)
 471{
 472	struct work_struct *work = addr;
 473
 474	switch (state) {
 475	case ODEBUG_STATE_ACTIVE:
 476		cancel_work_sync(work);
 477		debug_object_free(work, &work_debug_descr);
 478		return true;
 479	default:
 480		return false;
 481	}
 482}
 483
 484static struct debug_obj_descr work_debug_descr = {
 485	.name		= "work_struct",
 486	.debug_hint	= work_debug_hint,
 487	.is_static_object = work_is_static_object,
 488	.fixup_init	= work_fixup_init,
 
 489	.fixup_free	= work_fixup_free,
 490};
 491
 492static inline void debug_work_activate(struct work_struct *work)
 493{
 494	debug_object_activate(work, &work_debug_descr);
 495}
 496
 497static inline void debug_work_deactivate(struct work_struct *work)
 498{
 499	debug_object_deactivate(work, &work_debug_descr);
 500}
 501
 502void __init_work(struct work_struct *work, int onstack)
 503{
 504	if (onstack)
 505		debug_object_init_on_stack(work, &work_debug_descr);
 506	else
 507		debug_object_init(work, &work_debug_descr);
 508}
 509EXPORT_SYMBOL_GPL(__init_work);
 510
 511void destroy_work_on_stack(struct work_struct *work)
 512{
 513	debug_object_free(work, &work_debug_descr);
 514}
 515EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 516
 517void destroy_delayed_work_on_stack(struct delayed_work *work)
 518{
 519	destroy_timer_on_stack(&work->timer);
 520	debug_object_free(&work->work, &work_debug_descr);
 521}
 522EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
 523
 524#else
 525static inline void debug_work_activate(struct work_struct *work) { }
 526static inline void debug_work_deactivate(struct work_struct *work) { }
 527#endif
 528
 529/**
 530 * worker_pool_assign_id - allocate ID and assing it to @pool
 531 * @pool: the pool pointer of interest
 532 *
 533 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
 534 * successfully, -errno on failure.
 
 
 
 535 */
 536static int worker_pool_assign_id(struct worker_pool *pool)
 537{
 538	int ret;
 539
 540	lockdep_assert_held(&wq_pool_mutex);
 
 
 
 
 
 
 541
 542	ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
 543			GFP_KERNEL);
 544	if (ret >= 0) {
 545		pool->id = ret;
 546		return 0;
 547	}
 548	return ret;
 549}
 550
 551/**
 552 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
 553 * @wq: the target workqueue
 554 * @node: the node ID
 555 *
 556 * This must be called with any of wq_pool_mutex, wq->mutex or RCU
 557 * read locked.
 558 * If the pwq needs to be used beyond the locking in effect, the caller is
 559 * responsible for guaranteeing that the pwq stays online.
 560 *
 561 * Return: The unbound pool_workqueue for @node.
 562 */
 563static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
 564						  int node)
 565{
 566	assert_rcu_or_wq_mutex_or_pool_mutex(wq);
 
 
 
 
 567
 568	/*
 569	 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
 570	 * delayed item is pending.  The plan is to keep CPU -> NODE
 571	 * mapping valid and stable across CPU on/offlines.  Once that
 572	 * happens, this workaround can be removed.
 573	 */
 574	if (unlikely(node == NUMA_NO_NODE))
 575		return wq->dfl_pwq;
 576
 577	return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 578}
 579
 580static unsigned int work_color_to_flags(int color)
 581{
 582	return color << WORK_STRUCT_COLOR_SHIFT;
 583}
 584
 585static int get_work_color(struct work_struct *work)
 586{
 587	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 588		((1 << WORK_STRUCT_COLOR_BITS) - 1);
 589}
 590
 591static int work_next_color(int color)
 592{
 593	return (color + 1) % WORK_NR_COLORS;
 594}
 595
 596/*
 597 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
 598 * contain the pointer to the queued pwq.  Once execution starts, the flag
 599 * is cleared and the high bits contain OFFQ flags and pool ID.
 600 *
 601 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
 602 * and clear_work_data() can be used to set the pwq, pool or clear
 603 * work->data.  These functions should only be called while the work is
 604 * owned - ie. while the PENDING bit is set.
 605 *
 606 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
 607 * corresponding to a work.  Pool is available once the work has been
 608 * queued anywhere after initialization until it is sync canceled.  pwq is
 609 * available only while the work item is queued.
 610 *
 611 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
 612 * canceled.  While being canceled, a work item may have its PENDING set
 613 * but stay off timer and worklist for arbitrarily long and nobody should
 614 * try to steal the PENDING bit.
 615 */
 616static inline void set_work_data(struct work_struct *work, unsigned long data,
 617				 unsigned long flags)
 618{
 619	WARN_ON_ONCE(!work_pending(work));
 620	atomic_long_set(&work->data, data | flags | work_static(work));
 621}
 622
 623static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
 
 624			 unsigned long extra_flags)
 625{
 626	set_work_data(work, (unsigned long)pwq,
 627		      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
 628}
 629
 630static void set_work_pool_and_keep_pending(struct work_struct *work,
 631					   int pool_id)
 632{
 633	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
 634		      WORK_STRUCT_PENDING);
 635}
 636
 637static void set_work_pool_and_clear_pending(struct work_struct *work,
 638					    int pool_id)
 639{
 640	/*
 641	 * The following wmb is paired with the implied mb in
 642	 * test_and_set_bit(PENDING) and ensures all updates to @work made
 643	 * here are visible to and precede any updates by the next PENDING
 644	 * owner.
 645	 */
 646	smp_wmb();
 647	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 648	/*
 649	 * The following mb guarantees that previous clear of a PENDING bit
 650	 * will not be reordered with any speculative LOADS or STORES from
 651	 * work->current_func, which is executed afterwards.  This possible
 652	 * reordering can lead to a missed execution on attempt to queue
 653	 * the same @work.  E.g. consider this case:
 654	 *
 655	 *   CPU#0                         CPU#1
 656	 *   ----------------------------  --------------------------------
 657	 *
 658	 * 1  STORE event_indicated
 659	 * 2  queue_work_on() {
 660	 * 3    test_and_set_bit(PENDING)
 661	 * 4 }                             set_..._and_clear_pending() {
 662	 * 5                                 set_work_data() # clear bit
 663	 * 6                                 smp_mb()
 664	 * 7                               work->current_func() {
 665	 * 8				      LOAD event_indicated
 666	 *				   }
 667	 *
 668	 * Without an explicit full barrier speculative LOAD on line 8 can
 669	 * be executed before CPU#0 does STORE on line 1.  If that happens,
 670	 * CPU#0 observes the PENDING bit is still set and new execution of
 671	 * a @work is not queued in a hope, that CPU#1 will eventually
 672	 * finish the queued @work.  Meanwhile CPU#1 does not see
 673	 * event_indicated is set, because speculative LOAD was executed
 674	 * before actual STORE.
 675	 */
 676	smp_mb();
 677}
 678
 679static void clear_work_data(struct work_struct *work)
 680{
 681	smp_wmb();	/* see set_work_pool_and_clear_pending() */
 682	set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 683}
 684
 685static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 686{
 687	unsigned long data = atomic_long_read(&work->data);
 688
 689	if (data & WORK_STRUCT_PWQ)
 690		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 691	else
 692		return NULL;
 693}
 694
 695/**
 696 * get_work_pool - return the worker_pool a given work was associated with
 697 * @work: the work item of interest
 698 *
 699 * Pools are created and destroyed under wq_pool_mutex, and allows read
 700 * access under RCU read lock.  As such, this function should be
 701 * called under wq_pool_mutex or inside of a rcu_read_lock() region.
 702 *
 703 * All fields of the returned pool are accessible as long as the above
 704 * mentioned locking is in effect.  If the returned pool needs to be used
 705 * beyond the critical section, the caller is responsible for ensuring the
 706 * returned pool is and stays online.
 707 *
 708 * Return: The worker_pool @work was last associated with.  %NULL if none.
 709 */
 710static struct worker_pool *get_work_pool(struct work_struct *work)
 711{
 712	unsigned long data = atomic_long_read(&work->data);
 713	int pool_id;
 714
 715	assert_rcu_or_pool_mutex();
 716
 717	if (data & WORK_STRUCT_PWQ)
 718		return ((struct pool_workqueue *)
 719			(data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 720
 721	pool_id = data >> WORK_OFFQ_POOL_SHIFT;
 722	if (pool_id == WORK_OFFQ_POOL_NONE)
 723		return NULL;
 724
 725	return idr_find(&worker_pool_idr, pool_id);
 726}
 727
 728/**
 729 * get_work_pool_id - return the worker pool ID a given work is associated with
 730 * @work: the work item of interest
 731 *
 732 * Return: The worker_pool ID @work was last associated with.
 733 * %WORK_OFFQ_POOL_NONE if none.
 734 */
 735static int get_work_pool_id(struct work_struct *work)
 736{
 737	unsigned long data = atomic_long_read(&work->data);
 738
 739	if (data & WORK_STRUCT_PWQ)
 740		return ((struct pool_workqueue *)
 741			(data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
 742
 743	return data >> WORK_OFFQ_POOL_SHIFT;
 744}
 745
 746static void mark_work_canceling(struct work_struct *work)
 747{
 748	unsigned long pool_id = get_work_pool_id(work);
 749
 750	pool_id <<= WORK_OFFQ_POOL_SHIFT;
 751	set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 752}
 753
 754static bool work_is_canceling(struct work_struct *work)
 755{
 756	unsigned long data = atomic_long_read(&work->data);
 757
 758	return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 759}
 760
 761/*
 762 * Policy functions.  These define the policies on how the global worker
 763 * pools are managed.  Unless noted otherwise, these functions assume that
 764 * they're being called with pool->lock held.
 765 */
 766
 767static bool __need_more_worker(struct worker_pool *pool)
 768{
 769	return !atomic_read(&pool->nr_running);
 
 770}
 771
 772/*
 773 * Need to wake up a worker?  Called from anything but currently
 774 * running workers.
 775 *
 776 * Note that, because unbound workers never contribute to nr_running, this
 777 * function will always return %true for unbound pools as long as the
 778 * worklist isn't empty.
 779 */
 780static bool need_more_worker(struct worker_pool *pool)
 781{
 782	return !list_empty(&pool->worklist) && __need_more_worker(pool);
 783}
 784
 785/* Can I start working?  Called from busy but !running workers. */
 786static bool may_start_working(struct worker_pool *pool)
 787{
 788	return pool->nr_idle;
 789}
 790
 791/* Do I need to keep working?  Called from currently running workers. */
 792static bool keep_working(struct worker_pool *pool)
 793{
 794	return !list_empty(&pool->worklist) &&
 795		atomic_read(&pool->nr_running) <= 1;
 
 
 
 796}
 797
 798/* Do we need a new worker?  Called from manager. */
 799static bool need_to_create_worker(struct worker_pool *pool)
 
 
 
 
 
 
 800{
 801	return need_more_worker(pool) && !may_start_working(pool);
 802}
 803
 804/* Do we have too many workers and should some go away? */
 805static bool too_many_workers(struct worker_pool *pool)
 806{
 807	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
 808	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 809	int nr_busy = pool->nr_workers - nr_idle;
 810
 811	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 812}
 813
 814/*
 815 * Wake up functions.
 816 */
 817
 818/* Return the first idle worker.  Safe with preemption disabled */
 819static struct worker *first_idle_worker(struct worker_pool *pool)
 820{
 821	if (unlikely(list_empty(&pool->idle_list)))
 822		return NULL;
 823
 824	return list_first_entry(&pool->idle_list, struct worker, entry);
 825}
 826
 827/**
 828 * wake_up_worker - wake up an idle worker
 829 * @pool: worker pool to wake worker from
 830 *
 831 * Wake up the first idle worker of @pool.
 832 *
 833 * CONTEXT:
 834 * spin_lock_irq(pool->lock).
 835 */
 836static void wake_up_worker(struct worker_pool *pool)
 837{
 838	struct worker *worker = first_idle_worker(pool);
 839
 840	if (likely(worker))
 841		wake_up_process(worker->task);
 842}
 843
 844/**
 845 * wq_worker_running - a worker is running again
 846 * @task: task waking up
 
 847 *
 848 * This function is called when a worker returns from schedule()
 
 
 
 
 849 */
 850void wq_worker_running(struct task_struct *task)
 851{
 852	struct worker *worker = kthread_data(task);
 853
 854	if (!worker->sleeping)
 855		return;
 856	if (!(worker->flags & WORKER_NOT_RUNNING))
 857		atomic_inc(&worker->pool->nr_running);
 858	worker->sleeping = 0;
 859}
 860
 861/**
 862 * wq_worker_sleeping - a worker is going to sleep
 863 * @task: task going to sleep
 
 
 
 
 
 864 *
 865 * This function is called from schedule() when a busy worker is
 866 * going to sleep.
 
 
 
 867 */
 868void wq_worker_sleeping(struct task_struct *task)
 
 869{
 870	struct worker *next, *worker = kthread_data(task);
 871	struct worker_pool *pool;
 
 872
 873	/*
 874	 * Rescuers, which may not have all the fields set up like normal
 875	 * workers, also reach here, let's not access anything before
 876	 * checking NOT_RUNNING.
 877	 */
 878	if (worker->flags & WORKER_NOT_RUNNING)
 879		return;
 880
 881	pool = worker->pool;
 882
 883	if (WARN_ON_ONCE(worker->sleeping))
 884		return;
 885
 886	worker->sleeping = 1;
 887	spin_lock_irq(&pool->lock);
 888
 889	/*
 890	 * The counterpart of the following dec_and_test, implied mb,
 891	 * worklist not empty test sequence is in insert_work().
 892	 * Please read comment there.
 893	 *
 894	 * NOT_RUNNING is clear.  This means that we're bound to and
 895	 * running on the local cpu w/ rq lock held and preemption
 896	 * disabled, which in turn means that none else could be
 897	 * manipulating idle_list, so dereferencing idle_list without pool
 898	 * lock is safe.
 899	 */
 900	if (atomic_dec_and_test(&pool->nr_running) &&
 901	    !list_empty(&pool->worklist)) {
 902		next = first_idle_worker(pool);
 903		if (next)
 904			wake_up_process(next->task);
 905	}
 906	spin_unlock_irq(&pool->lock);
 907}
 908
 909/**
 910 * wq_worker_last_func - retrieve worker's last work function
 911 * @task: Task to retrieve last work function of.
 912 *
 913 * Determine the last function a worker executed. This is called from
 914 * the scheduler to get a worker's last known identity.
 915 *
 916 * CONTEXT:
 917 * spin_lock_irq(rq->lock)
 918 *
 919 * This function is called during schedule() when a kworker is going
 920 * to sleep. It's used by psi to identify aggregation workers during
 921 * dequeuing, to allow periodic aggregation to shut-off when that
 922 * worker is the last task in the system or cgroup to go to sleep.
 923 *
 924 * As this function doesn't involve any workqueue-related locking, it
 925 * only returns stable values when called from inside the scheduler's
 926 * queuing and dequeuing paths, when @task, which must be a kworker,
 927 * is guaranteed to not be processing any works.
 928 *
 929 * Return:
 930 * The last work function %current executed as a worker, NULL if it
 931 * hasn't executed any work yet.
 932 */
 933work_func_t wq_worker_last_func(struct task_struct *task)
 934{
 935	struct worker *worker = kthread_data(task);
 936
 937	return worker->last_func;
 938}
 939
 940/**
 941 * worker_set_flags - set worker flags and adjust nr_running accordingly
 942 * @worker: self
 943 * @flags: flags to set
 
 944 *
 945 * Set @flags in @worker->flags and adjust nr_running accordingly.
 
 
 946 *
 947 * CONTEXT:
 948 * spin_lock_irq(pool->lock)
 949 */
 950static inline void worker_set_flags(struct worker *worker, unsigned int flags)
 
 951{
 952	struct worker_pool *pool = worker->pool;
 953
 954	WARN_ON_ONCE(worker->task != current);
 955
 956	/* If transitioning into NOT_RUNNING, adjust nr_running. */
 
 
 
 
 957	if ((flags & WORKER_NOT_RUNNING) &&
 958	    !(worker->flags & WORKER_NOT_RUNNING)) {
 959		atomic_dec(&pool->nr_running);
 
 
 
 
 
 
 
 960	}
 961
 962	worker->flags |= flags;
 963}
 964
 965/**
 966 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 967 * @worker: self
 968 * @flags: flags to clear
 969 *
 970 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 971 *
 972 * CONTEXT:
 973 * spin_lock_irq(pool->lock)
 974 */
 975static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 976{
 977	struct worker_pool *pool = worker->pool;
 978	unsigned int oflags = worker->flags;
 979
 980	WARN_ON_ONCE(worker->task != current);
 981
 982	worker->flags &= ~flags;
 983
 984	/*
 985	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 986	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 987	 * of multiple flags, not a single flag.
 988	 */
 989	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 990		if (!(worker->flags & WORKER_NOT_RUNNING))
 991			atomic_inc(&pool->nr_running);
 992}
 993
 994/**
 995 * find_worker_executing_work - find worker which is executing a work
 996 * @pool: pool of interest
 997 * @work: work to find worker for
 998 *
 999 * Find a worker which is executing @work on @pool by searching
1000 * @pool->busy_hash which is keyed by the address of @work.  For a worker
1001 * to match, its current execution should match the address of @work and
1002 * its work function.  This is to avoid unwanted dependency between
1003 * unrelated work executions through a work item being recycled while still
1004 * being executed.
1005 *
1006 * This is a bit tricky.  A work item may be freed once its execution
1007 * starts and nothing prevents the freed area from being recycled for
1008 * another work item.  If the same work item address ends up being reused
1009 * before the original execution finishes, workqueue will identify the
1010 * recycled work item as currently executing and make it wait until the
1011 * current execution finishes, introducing an unwanted dependency.
1012 *
1013 * This function checks the work item address and work function to avoid
1014 * false positives.  Note that this isn't complete as one may construct a
1015 * work function which can introduce dependency onto itself through a
1016 * recycled work item.  Well, if somebody wants to shoot oneself in the
1017 * foot that badly, there's only so much we can do, and if such deadlock
1018 * actually occurs, it should be easy to locate the culprit work function.
1019 *
1020 * CONTEXT:
1021 * spin_lock_irq(pool->lock).
1022 *
1023 * Return:
1024 * Pointer to worker which is executing @work if found, %NULL
1025 * otherwise.
1026 */
1027static struct worker *find_worker_executing_work(struct worker_pool *pool,
1028						 struct work_struct *work)
1029{
1030	struct worker *worker;
 
1031
1032	hash_for_each_possible(pool->busy_hash, worker, hentry,
1033			       (unsigned long)work)
1034		if (worker->current_work == work &&
1035		    worker->current_func == work->func)
1036			return worker;
1037
1038	return NULL;
1039}
1040
1041/**
1042 * move_linked_works - move linked works to a list
1043 * @work: start of series of works to be scheduled
1044 * @head: target list to append @work to
1045 * @nextp: out parameter for nested worklist walking
1046 *
1047 * Schedule linked works starting from @work to @head.  Work series to
1048 * be scheduled starts at @work and includes any consecutive work with
1049 * WORK_STRUCT_LINKED set in its predecessor.
1050 *
1051 * If @nextp is not NULL, it's updated to point to the next work of
1052 * the last scheduled work.  This allows move_linked_works() to be
1053 * nested inside outer list_for_each_entry_safe().
1054 *
1055 * CONTEXT:
1056 * spin_lock_irq(pool->lock).
1057 */
1058static void move_linked_works(struct work_struct *work, struct list_head *head,
1059			      struct work_struct **nextp)
1060{
1061	struct work_struct *n;
1062
1063	/*
1064	 * Linked worklist will always end before the end of the list,
1065	 * use NULL for list head.
1066	 */
1067	list_for_each_entry_safe_from(work, n, NULL, entry) {
1068		list_move_tail(&work->entry, head);
1069		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1070			break;
1071	}
1072
1073	/*
1074	 * If we're already inside safe list traversal and have moved
1075	 * multiple works to the scheduled queue, the next position
1076	 * needs to be updated.
1077	 */
1078	if (nextp)
1079		*nextp = n;
1080}
1081
1082/**
1083 * get_pwq - get an extra reference on the specified pool_workqueue
1084 * @pwq: pool_workqueue to get
1085 *
1086 * Obtain an extra reference on @pwq.  The caller should guarantee that
1087 * @pwq has positive refcnt and be holding the matching pool->lock.
1088 */
1089static void get_pwq(struct pool_workqueue *pwq)
1090{
1091	lockdep_assert_held(&pwq->pool->lock);
1092	WARN_ON_ONCE(pwq->refcnt <= 0);
1093	pwq->refcnt++;
1094}
1095
1096/**
1097 * put_pwq - put a pool_workqueue reference
1098 * @pwq: pool_workqueue to put
1099 *
1100 * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1101 * destruction.  The caller should be holding the matching pool->lock.
1102 */
1103static void put_pwq(struct pool_workqueue *pwq)
1104{
1105	lockdep_assert_held(&pwq->pool->lock);
1106	if (likely(--pwq->refcnt))
1107		return;
1108	if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1109		return;
1110	/*
1111	 * @pwq can't be released under pool->lock, bounce to
1112	 * pwq_unbound_release_workfn().  This never recurses on the same
1113	 * pool->lock as this path is taken only for unbound workqueues and
1114	 * the release work item is scheduled on a per-cpu workqueue.  To
1115	 * avoid lockdep warning, unbound pool->locks are given lockdep
1116	 * subclass of 1 in get_unbound_pool().
1117	 */
1118	schedule_work(&pwq->unbound_release_work);
1119}
1120
1121/**
1122 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1123 * @pwq: pool_workqueue to put (can be %NULL)
1124 *
1125 * put_pwq() with locking.  This function also allows %NULL @pwq.
 
 
1126 */
1127static void put_pwq_unlocked(struct pool_workqueue *pwq)
 
 
1128{
1129	if (pwq) {
1130		/*
1131		 * As both pwqs and pools are RCU protected, the
1132		 * following lock operations are safe.
1133		 */
1134		spin_lock_irq(&pwq->pool->lock);
1135		put_pwq(pwq);
1136		spin_unlock_irq(&pwq->pool->lock);
1137	}
1138}
1139
1140static void pwq_activate_delayed_work(struct work_struct *work)
1141{
1142	struct pool_workqueue *pwq = get_work_pwq(work);
1143
1144	trace_workqueue_activate_work(work);
1145	if (list_empty(&pwq->pool->worklist))
1146		pwq->pool->watchdog_ts = jiffies;
1147	move_linked_works(work, &pwq->pool->worklist, NULL);
1148	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1149	pwq->nr_active++;
1150}
1151
1152static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1153{
1154	struct work_struct *work = list_first_entry(&pwq->delayed_works,
1155						    struct work_struct, entry);
1156
1157	pwq_activate_delayed_work(work);
 
 
 
1158}
1159
1160/**
1161 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1162 * @pwq: pwq of interest
1163 * @color: color of work which left the queue
1164 *
1165 * A work either has completed or is removed from pending queue,
1166 * decrement nr_in_flight of its pwq and handle workqueue flushing.
 
1167 *
1168 * CONTEXT:
1169 * spin_lock_irq(pool->lock).
 
 
 
 
1170 */
1171static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
 
1172{
1173	/* uncolored work items don't participate in flushing or nr_active */
1174	if (color == WORK_NO_COLOR)
1175		goto out_put;
1176
1177	pwq->nr_in_flight[color]--;
1178
1179	pwq->nr_active--;
1180	if (!list_empty(&pwq->delayed_works)) {
1181		/* one down, submit a delayed one */
1182		if (pwq->nr_active < pwq->max_active)
1183			pwq_activate_first_delayed(pwq);
1184	}
1185
1186	/* is flush in progress and are we at the flushing tip? */
1187	if (likely(pwq->flush_color != color))
1188		goto out_put;
1189
1190	/* are there still in-flight works? */
1191	if (pwq->nr_in_flight[color])
1192		goto out_put;
1193
1194	/* this pwq is done, clear flush_color */
1195	pwq->flush_color = -1;
1196
1197	/*
1198	 * If this was the last pwq, wake up the first flusher.  It
1199	 * will handle the rest.
1200	 */
1201	if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1202		complete(&pwq->wq->first_flusher->done);
1203out_put:
1204	put_pwq(pwq);
1205}
1206
1207/**
1208 * try_to_grab_pending - steal work item from worklist and disable irq
1209 * @work: work item to steal
1210 * @is_dwork: @work is a delayed_work
1211 * @flags: place to store irq state
1212 *
1213 * Try to grab PENDING bit of @work.  This function can handle @work in any
1214 * stable state - idle, on timer or on worklist.
1215 *
1216 * Return:
1217 *  1		if @work was pending and we successfully stole PENDING
1218 *  0		if @work was idle and we claimed PENDING
1219 *  -EAGAIN	if PENDING couldn't be grabbed at the moment, safe to busy-retry
1220 *  -ENOENT	if someone else is canceling @work, this state may persist
1221 *		for arbitrarily long
1222 *
1223 * Note:
1224 * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1225 * interrupted while holding PENDING and @work off queue, irq must be
1226 * disabled on entry.  This, combined with delayed_work->timer being
1227 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1228 *
1229 * On successful return, >= 0, irq is disabled and the caller is
1230 * responsible for releasing it using local_irq_restore(*@flags).
1231 *
1232 * This function is safe to call from any context including IRQ handler.
 
1233 */
1234static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1235			       unsigned long *flags)
1236{
1237	struct worker_pool *pool;
1238	struct pool_workqueue *pwq;
1239
1240	local_irq_save(*flags);
 
1241
1242	/* try to steal the timer if it exists */
1243	if (is_dwork) {
1244		struct delayed_work *dwork = to_delayed_work(work);
1245
1246		/*
1247		 * dwork->timer is irqsafe.  If del_timer() fails, it's
1248		 * guaranteed that the timer is not queued anywhere and not
1249		 * running on the local CPU.
1250		 */
1251		if (likely(del_timer(&dwork->timer)))
1252			return 1;
1253	}
1254
1255	/* try to claim PENDING the normal way */
1256	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1257		return 0;
1258
1259	rcu_read_lock();
1260	/*
1261	 * The queueing is in progress, or it is already queued. Try to
1262	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1263	 */
1264	pool = get_work_pool(work);
1265	if (!pool)
1266		goto fail;
1267
1268	spin_lock(&pool->lock);
1269	/*
1270	 * work->data is guaranteed to point to pwq only while the work
1271	 * item is queued on pwq->wq, and both updating work->data to point
1272	 * to pwq on queueing and to pool on dequeueing are done under
1273	 * pwq->pool->lock.  This in turn guarantees that, if work->data
1274	 * points to pwq which is associated with a locked pool, the work
1275	 * item is currently queued on that pool.
1276	 */
1277	pwq = get_work_pwq(work);
1278	if (pwq && pwq->pool == pool) {
1279		debug_work_deactivate(work);
1280
1281		/*
1282		 * A delayed work item cannot be grabbed directly because
1283		 * it might have linked NO_COLOR work items which, if left
1284		 * on the delayed_list, will confuse pwq->nr_active
1285		 * management later on and cause stall.  Make sure the work
1286		 * item is activated before grabbing.
1287		 */
1288		if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1289			pwq_activate_delayed_work(work);
1290
1291		list_del_init(&work->entry);
1292		pwq_dec_nr_in_flight(pwq, get_work_color(work));
1293
1294		/* work->data points to pwq iff queued, point to pool */
1295		set_work_pool_and_keep_pending(work, pool->id);
1296
1297		spin_unlock(&pool->lock);
1298		rcu_read_unlock();
1299		return 1;
1300	}
1301	spin_unlock(&pool->lock);
1302fail:
1303	rcu_read_unlock();
1304	local_irq_restore(*flags);
1305	if (work_is_canceling(work))
1306		return -ENOENT;
1307	cpu_relax();
1308	return -EAGAIN;
1309}
1310
1311/**
1312 * insert_work - insert a work into a pool
1313 * @pwq: pwq @work belongs to
1314 * @work: work to insert
1315 * @head: insertion point
1316 * @extra_flags: extra WORK_STRUCT_* flags to set
1317 *
1318 * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1319 * work_struct flags.
1320 *
1321 * CONTEXT:
1322 * spin_lock_irq(pool->lock).
1323 */
1324static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1325			struct list_head *head, unsigned int extra_flags)
 
1326{
1327	struct worker_pool *pool = pwq->pool;
1328
1329	/* we own @work, set data and link */
1330	set_work_pwq(work, pwq, extra_flags);
 
 
 
 
 
 
 
1331	list_add_tail(&work->entry, head);
1332	get_pwq(pwq);
1333
1334	/*
1335	 * Ensure either wq_worker_sleeping() sees the above
1336	 * list_add_tail() or we see zero nr_running to avoid workers lying
1337	 * around lazily while there are works to be processed.
1338	 */
1339	smp_mb();
1340
1341	if (__need_more_worker(pool))
1342		wake_up_worker(pool);
1343}
1344
1345/*
1346 * Test whether @work is being queued from another work executing on the
1347 * same workqueue.
 
1348 */
1349static bool is_chained_work(struct workqueue_struct *wq)
1350{
1351	struct worker *worker;
1352
1353	worker = current_wq_worker();
1354	/*
1355	 * Return %true iff I'm a worker executing a work item on @wq.  If
1356	 * I'm @worker, it's safe to dereference it without locking.
1357	 */
1358	return worker && worker->current_pwq->wq == wq;
1359}
1360
1361/*
1362 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1363 * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
1364 * avoid perturbing sensitive tasks.
1365 */
1366static int wq_select_unbound_cpu(int cpu)
1367{
1368	static bool printed_dbg_warning;
1369	int new_cpu;
1370
1371	if (likely(!wq_debug_force_rr_cpu)) {
1372		if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1373			return cpu;
1374	} else if (!printed_dbg_warning) {
1375		pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1376		printed_dbg_warning = true;
1377	}
1378
1379	if (cpumask_empty(wq_unbound_cpumask))
1380		return cpu;
 
 
 
1381
1382	new_cpu = __this_cpu_read(wq_rr_cpu_last);
1383	new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1384	if (unlikely(new_cpu >= nr_cpu_ids)) {
1385		new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1386		if (unlikely(new_cpu >= nr_cpu_ids))
1387			return cpu;
 
 
 
 
 
 
1388	}
1389	__this_cpu_write(wq_rr_cpu_last, new_cpu);
1390
1391	return new_cpu;
1392}
1393
1394static void __queue_work(int cpu, struct workqueue_struct *wq,
1395			 struct work_struct *work)
1396{
1397	struct pool_workqueue *pwq;
1398	struct worker_pool *last_pool;
1399	struct list_head *worklist;
1400	unsigned int work_flags;
1401	unsigned int req_cpu = cpu;
1402
1403	/*
1404	 * While a work item is PENDING && off queue, a task trying to
1405	 * steal the PENDING will busy-loop waiting for it to either get
1406	 * queued or lose PENDING.  Grabbing PENDING and queueing should
1407	 * happen with IRQ disabled.
1408	 */
1409	lockdep_assert_irqs_disabled();
1410
1411	debug_work_activate(work);
1412
1413	/* if draining, only works from the same workqueue are allowed */
1414	if (unlikely(wq->flags & __WQ_DRAINING) &&
1415	    WARN_ON_ONCE(!is_chained_work(wq)))
1416		return;
1417	rcu_read_lock();
1418retry:
1419	if (req_cpu == WORK_CPU_UNBOUND)
1420		cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1421
1422	/* pwq which will be used unless @work is executing elsewhere */
1423	if (!(wq->flags & WQ_UNBOUND))
1424		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1425	else
1426		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1427
1428	/*
1429	 * If @work was previously on a different pool, it might still be
1430	 * running there, in which case the work needs to be queued on that
1431	 * pool to guarantee non-reentrancy.
1432	 */
1433	last_pool = get_work_pool(work);
1434	if (last_pool && last_pool != pwq->pool) {
1435		struct worker *worker;
1436
1437		spin_lock(&last_pool->lock);
 
1438
1439		worker = find_worker_executing_work(last_pool, work);
 
 
 
 
 
 
 
 
 
1440
1441		if (worker && worker->current_pwq->wq == wq) {
1442			pwq = worker->current_pwq;
1443		} else {
1444			/* meh... not running there, queue here */
1445			spin_unlock(&last_pool->lock);
1446			spin_lock(&pwq->pool->lock);
1447		}
1448	} else {
1449		spin_lock(&pwq->pool->lock);
1450	}
1451
1452	/*
1453	 * pwq is determined and locked.  For unbound pools, we could have
1454	 * raced with pwq release and it could already be dead.  If its
1455	 * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1456	 * without another pwq replacing it in the numa_pwq_tbl or while
1457	 * work items are executing on it, so the retrying is guaranteed to
1458	 * make forward-progress.
1459	 */
1460	if (unlikely(!pwq->refcnt)) {
1461		if (wq->flags & WQ_UNBOUND) {
1462			spin_unlock(&pwq->pool->lock);
1463			cpu_relax();
1464			goto retry;
1465		}
1466		/* oops */
1467		WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1468			  wq->name, cpu);
1469	}
1470
1471	/* pwq determined, queue */
1472	trace_workqueue_queue_work(req_cpu, pwq, work);
 
1473
1474	if (WARN_ON(!list_empty(&work->entry)))
1475		goto out;
1476
1477	pwq->nr_in_flight[pwq->work_color]++;
1478	work_flags = work_color_to_flags(pwq->work_color);
1479
1480	if (likely(pwq->nr_active < pwq->max_active)) {
1481		trace_workqueue_activate_work(work);
1482		pwq->nr_active++;
1483		worklist = &pwq->pool->worklist;
1484		if (list_empty(worklist))
1485			pwq->pool->watchdog_ts = jiffies;
1486	} else {
1487		work_flags |= WORK_STRUCT_DELAYED;
1488		worklist = &pwq->delayed_works;
1489	}
1490
1491	insert_work(pwq, work, worklist, work_flags);
1492
1493out:
1494	spin_unlock(&pwq->pool->lock);
1495	rcu_read_unlock();
1496}
1497
1498/**
1499 * queue_work_on - queue work on specific cpu
1500 * @cpu: CPU number to execute work on
1501 * @wq: workqueue to use
1502 * @work: work to queue
1503 *
1504 * We queue the work to a specific CPU, the caller must ensure it
1505 * can't go away.
1506 *
1507 * Return: %false if @work was already on a queue, %true otherwise.
 
1508 */
1509bool queue_work_on(int cpu, struct workqueue_struct *wq,
1510		   struct work_struct *work)
1511{
1512	bool ret = false;
1513	unsigned long flags;
1514
1515	local_irq_save(flags);
1516
1517	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1518		__queue_work(cpu, wq, work);
1519		ret = true;
1520	}
1521
1522	local_irq_restore(flags);
1523	return ret;
1524}
1525EXPORT_SYMBOL(queue_work_on);
1526
1527/**
1528 * workqueue_select_cpu_near - Select a CPU based on NUMA node
1529 * @node: NUMA node ID that we want to select a CPU from
1530 *
1531 * This function will attempt to find a "random" cpu available on a given
1532 * node. If there are no CPUs available on the given node it will return
1533 * WORK_CPU_UNBOUND indicating that we should just schedule to any
1534 * available CPU if we need to schedule this work.
1535 */
1536static int workqueue_select_cpu_near(int node)
1537{
1538	int cpu;
1539
1540	/* No point in doing this if NUMA isn't enabled for workqueues */
1541	if (!wq_numa_enabled)
1542		return WORK_CPU_UNBOUND;
1543
1544	/* Delay binding to CPU if node is not valid or online */
1545	if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1546		return WORK_CPU_UNBOUND;
1547
1548	/* Use local node/cpu if we are already there */
1549	cpu = raw_smp_processor_id();
1550	if (node == cpu_to_node(cpu))
1551		return cpu;
1552
1553	/* Use "random" otherwise know as "first" online CPU of node */
1554	cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1555
1556	/* If CPU is valid return that, otherwise just defer */
1557	return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1558}
1559
1560/**
1561 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1562 * @node: NUMA node that we are targeting the work for
1563 * @wq: workqueue to use
1564 * @work: work to queue
1565 *
1566 * We queue the work to a "random" CPU within a given NUMA node. The basic
1567 * idea here is to provide a way to somehow associate work with a given
1568 * NUMA node.
1569 *
1570 * This function will only make a best effort attempt at getting this onto
1571 * the right NUMA node. If no node is requested or the requested node is
1572 * offline then we just fall back to standard queue_work behavior.
1573 *
1574 * Currently the "random" CPU ends up being the first available CPU in the
1575 * intersection of cpu_online_mask and the cpumask of the node, unless we
1576 * are running on the node. In that case we just use the current CPU.
1577 *
1578 * Return: %false if @work was already on a queue, %true otherwise.
 
1579 */
1580bool queue_work_node(int node, struct workqueue_struct *wq,
1581		     struct work_struct *work)
1582{
1583	unsigned long flags;
1584	bool ret = false;
1585
1586	/*
1587	 * This current implementation is specific to unbound workqueues.
1588	 * Specifically we only return the first available CPU for a given
1589	 * node instead of cycling through individual CPUs within the node.
1590	 *
1591	 * If this is used with a per-cpu workqueue then the logic in
1592	 * workqueue_select_cpu_near would need to be updated to allow for
1593	 * some round robin type logic.
1594	 */
1595	WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1596
1597	local_irq_save(flags);
1598
1599	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1600		int cpu = workqueue_select_cpu_near(node);
1601
1602		__queue_work(cpu, wq, work);
1603		ret = true;
1604	}
1605
1606	local_irq_restore(flags);
1607	return ret;
1608}
1609EXPORT_SYMBOL_GPL(queue_work_node);
1610
1611void delayed_work_timer_fn(struct timer_list *t)
1612{
1613	struct delayed_work *dwork = from_timer(dwork, t, timer);
1614
1615	/* should have been called from irqsafe timer with irq already off */
1616	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
1617}
1618EXPORT_SYMBOL(delayed_work_timer_fn);
1619
1620static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1621				struct delayed_work *dwork, unsigned long delay)
1622{
1623	struct timer_list *timer = &dwork->timer;
1624	struct work_struct *work = &dwork->work;
1625
1626	WARN_ON_ONCE(!wq);
1627	WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1628	WARN_ON_ONCE(timer_pending(timer));
1629	WARN_ON_ONCE(!list_empty(&work->entry));
1630
1631	/*
1632	 * If @delay is 0, queue @dwork->work immediately.  This is for
1633	 * both optimization and correctness.  The earliest @timer can
1634	 * expire is on the closest next tick and delayed_work users depend
1635	 * on that there's no such delay when @delay is 0.
1636	 */
1637	if (!delay) {
1638		__queue_work(cpu, wq, &dwork->work);
1639		return;
1640	}
1641
1642	dwork->wq = wq;
1643	dwork->cpu = cpu;
1644	timer->expires = jiffies + delay;
1645
1646	if (unlikely(cpu != WORK_CPU_UNBOUND))
1647		add_timer_on(timer, cpu);
1648	else
1649		add_timer(timer);
1650}
1651
1652/**
1653 * queue_delayed_work_on - queue work on specific CPU after delay
1654 * @cpu: CPU number to execute work on
1655 * @wq: workqueue to use
1656 * @dwork: work to queue
1657 * @delay: number of jiffies to wait before queueing
1658 *
1659 * Return: %false if @work was already on a queue, %true otherwise.  If
1660 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1661 * execution.
1662 */
1663bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1664			   struct delayed_work *dwork, unsigned long delay)
1665{
1666	struct work_struct *work = &dwork->work;
1667	bool ret = false;
1668	unsigned long flags;
1669
1670	/* read the comment in __queue_work() */
1671	local_irq_save(flags);
1672
1673	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1674		__queue_delayed_work(cpu, wq, dwork, delay);
1675		ret = true;
1676	}
1677
1678	local_irq_restore(flags);
1679	return ret;
1680}
1681EXPORT_SYMBOL(queue_delayed_work_on);
1682
1683/**
1684 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1685 * @cpu: CPU number to execute work on
1686 * @wq: workqueue to use
1687 * @dwork: work to queue
1688 * @delay: number of jiffies to wait before queueing
1689 *
1690 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1691 * modify @dwork's timer so that it expires after @delay.  If @delay is
1692 * zero, @work is guaranteed to be scheduled immediately regardless of its
1693 * current state.
1694 *
1695 * Return: %false if @dwork was idle and queued, %true if @dwork was
1696 * pending and its timer was modified.
1697 *
1698 * This function is safe to call from any context including IRQ handler.
1699 * See try_to_grab_pending() for details.
1700 */
1701bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1702			 struct delayed_work *dwork, unsigned long delay)
1703{
1704	unsigned long flags;
1705	int ret;
 
1706
1707	do {
1708		ret = try_to_grab_pending(&dwork->work, true, &flags);
1709	} while (unlikely(ret == -EAGAIN));
1710
1711	if (likely(ret >= 0)) {
1712		__queue_delayed_work(cpu, wq, dwork, delay);
1713		local_irq_restore(flags);
1714	}
1715
1716	/* -ENOENT from try_to_grab_pending() becomes %true */
1717	return ret;
1718}
1719EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1720
1721static void rcu_work_rcufn(struct rcu_head *rcu)
1722{
1723	struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
 
 
 
 
1724
1725	/* read the comment in __queue_work() */
1726	local_irq_disable();
1727	__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1728	local_irq_enable();
1729}
 
1730
1731/**
1732 * queue_rcu_work - queue work after a RCU grace period
1733 * @wq: workqueue to use
1734 * @rwork: work to queue
1735 *
1736 * Return: %false if @rwork was already pending, %true otherwise.  Note
1737 * that a full RCU grace period is guaranteed only after a %true return.
1738 * While @rwork is guaranteed to be executed after a %false return, the
1739 * execution may happen before a full RCU grace period has passed.
1740 */
1741bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1742{
1743	struct work_struct *work = &rwork->work;
1744
1745	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1746		rwork->wq = wq;
1747		call_rcu(&rwork->rcu, rcu_work_rcufn);
1748		return true;
1749	}
1750
1751	return false;
 
 
 
 
 
 
1752}
1753EXPORT_SYMBOL(queue_rcu_work);
1754
1755/**
1756 * worker_enter_idle - enter idle state
1757 * @worker: worker which is entering idle state
1758 *
1759 * @worker is entering idle state.  Update stats and idle timer if
1760 * necessary.
1761 *
1762 * LOCKING:
1763 * spin_lock_irq(pool->lock).
1764 */
1765static void worker_enter_idle(struct worker *worker)
1766{
1767	struct worker_pool *pool = worker->pool;
1768
1769	if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1770	    WARN_ON_ONCE(!list_empty(&worker->entry) &&
1771			 (worker->hentry.next || worker->hentry.pprev)))
1772		return;
1773
1774	/* can't use worker_set_flags(), also called from create_worker() */
1775	worker->flags |= WORKER_IDLE;
1776	pool->nr_idle++;
1777	worker->last_active = jiffies;
1778
1779	/* idle_list is LIFO */
1780	list_add(&worker->entry, &pool->idle_list);
1781
1782	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1783		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1784
1785	/*
1786	 * Sanity check nr_running.  Because unbind_workers() releases
1787	 * pool->lock between setting %WORKER_UNBOUND and zapping
1788	 * nr_running, the warning may trigger spuriously.  Check iff
1789	 * unbind is not in progress.
1790	 */
1791	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1792		     pool->nr_workers == pool->nr_idle &&
1793		     atomic_read(&pool->nr_running));
 
1794}
1795
1796/**
1797 * worker_leave_idle - leave idle state
1798 * @worker: worker which is leaving idle state
1799 *
1800 * @worker is leaving idle state.  Update stats.
1801 *
1802 * LOCKING:
1803 * spin_lock_irq(pool->lock).
1804 */
1805static void worker_leave_idle(struct worker *worker)
1806{
1807	struct worker_pool *pool = worker->pool;
1808
1809	if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1810		return;
1811	worker_clr_flags(worker, WORKER_IDLE);
1812	pool->nr_idle--;
1813	list_del_init(&worker->entry);
1814}
1815
1816static struct worker *alloc_worker(int node)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1817{
1818	struct worker *worker;
 
1819
1820	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1821	if (worker) {
1822		INIT_LIST_HEAD(&worker->entry);
1823		INIT_LIST_HEAD(&worker->scheduled);
1824		INIT_LIST_HEAD(&worker->node);
1825		/* on creation a worker is in !idle && prep state */
1826		worker->flags = WORKER_PREP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1827	}
1828	return worker;
1829}
1830
1831/**
1832 * worker_attach_to_pool() - attach a worker to a pool
1833 * @worker: worker to be attached
1834 * @pool: the target pool
1835 *
1836 * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
1837 * cpu-binding of @worker are kept coordinated with the pool across
1838 * cpu-[un]hotplugs.
1839 */
1840static void worker_attach_to_pool(struct worker *worker,
1841				   struct worker_pool *pool)
1842{
1843	mutex_lock(&wq_pool_attach_mutex);
 
1844
1845	/*
1846	 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1847	 * online CPUs.  It'll be re-applied when any of the CPUs come up.
1848	 */
1849	set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1850
1851	/*
1852	 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1853	 * stable across this function.  See the comments above the flag
1854	 * definition for details.
1855	 */
1856	if (pool->flags & POOL_DISASSOCIATED)
1857		worker->flags |= WORKER_UNBOUND;
1858
1859	list_add_tail(&worker->node, &pool->workers);
1860	worker->pool = pool;
1861
1862	mutex_unlock(&wq_pool_attach_mutex);
1863}
1864
1865/**
1866 * worker_detach_from_pool() - detach a worker from its pool
1867 * @worker: worker which is attached to its pool
1868 *
1869 * Undo the attaching which had been done in worker_attach_to_pool().  The
1870 * caller worker shouldn't access to the pool after detached except it has
1871 * other reference to the pool.
1872 */
1873static void worker_detach_from_pool(struct worker *worker)
1874{
1875	struct worker_pool *pool = worker->pool;
1876	struct completion *detach_completion = NULL;
1877
1878	mutex_lock(&wq_pool_attach_mutex);
1879
1880	list_del(&worker->node);
1881	worker->pool = NULL;
1882
1883	if (list_empty(&pool->workers))
1884		detach_completion = pool->detach_completion;
1885	mutex_unlock(&wq_pool_attach_mutex);
1886
1887	/* clear leftover flags without pool->lock after it is detached */
1888	worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1889
1890	if (detach_completion)
1891		complete(detach_completion);
 
 
 
 
 
 
 
1892}
1893
1894/**
1895 * create_worker - create a new workqueue worker
1896 * @pool: pool the new worker will belong to
 
1897 *
1898 * Create and start a new worker which is attached to @pool.
 
 
1899 *
1900 * CONTEXT:
1901 * Might sleep.  Does GFP_KERNEL allocations.
1902 *
1903 * Return:
1904 * Pointer to the newly created worker.
1905 */
1906static struct worker *create_worker(struct worker_pool *pool)
1907{
 
1908	struct worker *worker = NULL;
1909	int id = -1;
1910	char id_buf[16];
1911
1912	/* ID is needed to determine kthread name */
1913	id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1914	if (id < 0)
1915		goto fail;
 
 
 
 
1916
1917	worker = alloc_worker(pool->node);
1918	if (!worker)
1919		goto fail;
1920
 
1921	worker->id = id;
1922
1923	if (pool->cpu >= 0)
1924		snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1925			 pool->attrs->nice < 0  ? "H" : "");
 
 
1926	else
1927		snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1928
1929	worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1930					      "kworker/%s", id_buf);
1931	if (IS_ERR(worker->task))
1932		goto fail;
1933
1934	set_user_nice(worker->task, pool->attrs->nice);
1935	kthread_bind_mask(worker->task, pool->attrs->cpumask);
1936
1937	/* successful, attach the worker to the pool */
1938	worker_attach_to_pool(worker, pool);
1939
1940	/* start the newly created worker */
1941	spin_lock_irq(&pool->lock);
1942	worker->pool->nr_workers++;
1943	worker_enter_idle(worker);
1944	wake_up_process(worker->task);
1945	spin_unlock_irq(&pool->lock);
1946
1947	return worker;
1948
1949fail:
1950	if (id >= 0)
1951		ida_simple_remove(&pool->worker_ida, id);
 
 
 
1952	kfree(worker);
1953	return NULL;
1954}
1955
1956/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1957 * destroy_worker - destroy a workqueue worker
1958 * @worker: worker to be destroyed
1959 *
1960 * Destroy @worker and adjust @pool stats accordingly.  The worker should
1961 * be idle.
1962 *
1963 * CONTEXT:
1964 * spin_lock_irq(pool->lock).
1965 */
1966static void destroy_worker(struct worker *worker)
1967{
1968	struct worker_pool *pool = worker->pool;
1969
1970	lockdep_assert_held(&pool->lock);
1971
1972	/* sanity check frenzy */
1973	if (WARN_ON(worker->current_work) ||
1974	    WARN_ON(!list_empty(&worker->scheduled)) ||
1975	    WARN_ON(!(worker->flags & WORKER_IDLE)))
1976		return;
1977
1978	pool->nr_workers--;
1979	pool->nr_idle--;
 
 
1980
1981	list_del_init(&worker->entry);
1982	worker->flags |= WORKER_DIE;
1983	wake_up_process(worker->task);
 
 
 
 
 
 
 
1984}
1985
1986static void idle_worker_timeout(struct timer_list *t)
1987{
1988	struct worker_pool *pool = from_timer(pool, t, idle_timer);
1989
1990	spin_lock_irq(&pool->lock);
1991
1992	while (too_many_workers(pool)) {
1993		struct worker *worker;
1994		unsigned long expires;
1995
1996		/* idle_list is kept in LIFO order, check the last one */
1997		worker = list_entry(pool->idle_list.prev, struct worker, entry);
1998		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1999
2000		if (time_before(jiffies, expires)) {
2001			mod_timer(&pool->idle_timer, expires);
2002			break;
 
 
 
2003		}
2004
2005		destroy_worker(worker);
2006	}
2007
2008	spin_unlock_irq(&pool->lock);
2009}
2010
2011static void send_mayday(struct work_struct *work)
2012{
2013	struct pool_workqueue *pwq = get_work_pwq(work);
2014	struct workqueue_struct *wq = pwq->wq;
2015
2016	lockdep_assert_held(&wq_mayday_lock);
2017
2018	if (!wq->rescuer)
2019		return;
2020
2021	/* mayday mayday mayday */
2022	if (list_empty(&pwq->mayday_node)) {
2023		/*
2024		 * If @pwq is for an unbound wq, its base ref may be put at
2025		 * any time due to an attribute change.  Pin @pwq until the
2026		 * rescuer is done with it.
2027		 */
2028		get_pwq(pwq);
2029		list_add_tail(&pwq->mayday_node, &wq->maydays);
2030		wake_up_process(wq->rescuer->task);
2031	}
2032}
2033
2034static void pool_mayday_timeout(struct timer_list *t)
2035{
2036	struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2037	struct work_struct *work;
2038
2039	spin_lock_irq(&pool->lock);
2040	spin_lock(&wq_mayday_lock);		/* for wq->maydays */
2041
2042	if (need_to_create_worker(pool)) {
2043		/*
2044		 * We've been trying to create a new worker but
2045		 * haven't been successful.  We might be hitting an
2046		 * allocation deadlock.  Send distress signals to
2047		 * rescuers.
2048		 */
2049		list_for_each_entry(work, &pool->worklist, entry)
2050			send_mayday(work);
2051	}
2052
2053	spin_unlock(&wq_mayday_lock);
2054	spin_unlock_irq(&pool->lock);
2055
2056	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2057}
2058
2059/**
2060 * maybe_create_worker - create a new worker if necessary
2061 * @pool: pool to create a new worker for
2062 *
2063 * Create a new worker for @pool if necessary.  @pool is guaranteed to
2064 * have at least one idle worker on return from this function.  If
2065 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2066 * sent to all rescuers with works scheduled on @pool to resolve
2067 * possible allocation deadlock.
2068 *
2069 * On return, need_to_create_worker() is guaranteed to be %false and
2070 * may_start_working() %true.
2071 *
2072 * LOCKING:
2073 * spin_lock_irq(pool->lock) which may be released and regrabbed
2074 * multiple times.  Does GFP_KERNEL allocations.  Called only from
2075 * manager.
 
 
 
 
2076 */
2077static void maybe_create_worker(struct worker_pool *pool)
2078__releases(&pool->lock)
2079__acquires(&pool->lock)
2080{
 
 
2081restart:
2082	spin_unlock_irq(&pool->lock);
2083
2084	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2085	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2086
2087	while (true) {
2088		if (create_worker(pool) || !need_to_create_worker(pool))
 
 
 
 
 
 
 
 
 
 
 
2089			break;
2090
2091		schedule_timeout_interruptible(CREATE_COOLDOWN);
 
2092
2093		if (!need_to_create_worker(pool))
2094			break;
2095	}
2096
2097	del_timer_sync(&pool->mayday_timer);
2098	spin_lock_irq(&pool->lock);
2099	/*
2100	 * This is necessary even after a new worker was just successfully
2101	 * created as @pool->lock was dropped and the new worker might have
2102	 * already become busy.
2103	 */
2104	if (need_to_create_worker(pool))
2105		goto restart;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2106}
2107
2108/**
2109 * manage_workers - manage worker pool
2110 * @worker: self
2111 *
2112 * Assume the manager role and manage the worker pool @worker belongs
2113 * to.  At any given time, there can be only zero or one manager per
2114 * pool.  The exclusion is handled automatically by this function.
2115 *
2116 * The caller can safely start processing works on false return.  On
2117 * true return, it's guaranteed that need_to_create_worker() is false
2118 * and may_start_working() is true.
2119 *
2120 * CONTEXT:
2121 * spin_lock_irq(pool->lock) which may be released and regrabbed
2122 * multiple times.  Does GFP_KERNEL allocations.
2123 *
2124 * Return:
2125 * %false if the pool doesn't need management and the caller can safely
2126 * start processing works, %true if management function was performed and
2127 * the conditions that the caller verified before calling the function may
2128 * no longer be true.
2129 */
2130static bool manage_workers(struct worker *worker)
2131{
2132	struct worker_pool *pool = worker->pool;
 
2133
2134	if (pool->flags & POOL_MANAGER_ACTIVE)
2135		return false;
2136
2137	pool->flags |= POOL_MANAGER_ACTIVE;
2138	pool->manager = worker;
2139
2140	maybe_create_worker(pool);
 
 
 
 
 
2141
2142	pool->manager = NULL;
2143	pool->flags &= ~POOL_MANAGER_ACTIVE;
2144	wake_up(&wq_manager_wait);
2145	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2146}
2147
2148/**
2149 * process_one_work - process single work
2150 * @worker: self
2151 * @work: work to process
2152 *
2153 * Process @work.  This function contains all the logics necessary to
2154 * process a single work including synchronization against and
2155 * interaction with other workers on the same cpu, queueing and
2156 * flushing.  As long as context requirement is met, any worker can
2157 * call this function to process a work.
2158 *
2159 * CONTEXT:
2160 * spin_lock_irq(pool->lock) which is released and regrabbed.
2161 */
2162static void process_one_work(struct worker *worker, struct work_struct *work)
2163__releases(&pool->lock)
2164__acquires(&pool->lock)
2165{
2166	struct pool_workqueue *pwq = get_work_pwq(work);
2167	struct worker_pool *pool = worker->pool;
2168	bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
 
 
2169	int work_color;
2170	struct worker *collision;
2171#ifdef CONFIG_LOCKDEP
2172	/*
2173	 * It is permissible to free the struct work_struct from
2174	 * inside the function that is called from it, this we need to
2175	 * take into account for lockdep too.  To avoid bogus "held
2176	 * lock freed" warnings as well as problems when looking into
2177	 * work->lockdep_map, make a copy and use that here.
2178	 */
2179	struct lockdep_map lockdep_map;
2180
2181	lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2182#endif
2183	/* ensure we're on the correct CPU */
2184	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2185		     raw_smp_processor_id() != pool->cpu);
2186
2187	/*
2188	 * A single work shouldn't be executed concurrently by
2189	 * multiple workers on a single cpu.  Check whether anyone is
2190	 * already processing the work.  If so, defer the work to the
2191	 * currently executing one.
2192	 */
2193	collision = find_worker_executing_work(pool, work);
2194	if (unlikely(collision)) {
2195		move_linked_works(work, &collision->scheduled, NULL);
2196		return;
2197	}
2198
2199	/* claim and dequeue */
2200	debug_work_deactivate(work);
2201	hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2202	worker->current_work = work;
2203	worker->current_func = work->func;
2204	worker->current_pwq = pwq;
2205	work_color = get_work_color(work);
2206
2207	/*
2208	 * Record wq name for cmdline and debug reporting, may get
2209	 * overridden through set_worker_desc().
2210	 */
2211	strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2212
2213	list_del_init(&work->entry);
2214
2215	/*
2216	 * CPU intensive works don't participate in concurrency management.
2217	 * They're the scheduler's responsibility.  This takes @worker out
2218	 * of concurrency management and the next code block will chain
2219	 * execution of the pending work items.
2220	 */
2221	if (unlikely(cpu_intensive))
2222		worker_set_flags(worker, WORKER_CPU_INTENSIVE);
 
2223
2224	/*
2225	 * Wake up another worker if necessary.  The condition is always
2226	 * false for normal per-cpu workers since nr_running would always
2227	 * be >= 1 at this point.  This is used to chain execution of the
2228	 * pending work items for WORKER_NOT_RUNNING workers such as the
2229	 * UNBOUND and CPU_INTENSIVE ones.
2230	 */
2231	if (need_more_worker(pool))
2232		wake_up_worker(pool);
2233
2234	/*
2235	 * Record the last pool and clear PENDING which should be the last
2236	 * update to @work.  Also, do this inside @pool->lock so that
2237	 * PENDING and queued state changes happen together while IRQ is
2238	 * disabled.
2239	 */
2240	set_work_pool_and_clear_pending(work, pool->id);
 
2241
2242	spin_unlock_irq(&pool->lock);
2243
2244	lock_map_acquire(&pwq->wq->lockdep_map);
 
2245	lock_map_acquire(&lockdep_map);
2246	/*
2247	 * Strictly speaking we should mark the invariant state without holding
2248	 * any locks, that is, before these two lock_map_acquire()'s.
2249	 *
2250	 * However, that would result in:
2251	 *
2252	 *   A(W1)
2253	 *   WFC(C)
2254	 *		A(W1)
2255	 *		C(C)
2256	 *
2257	 * Which would create W1->C->W1 dependencies, even though there is no
2258	 * actual deadlock possible. There are two solutions, using a
2259	 * read-recursive acquire on the work(queue) 'locks', but this will then
2260	 * hit the lockdep limitation on recursive locks, or simply discard
2261	 * these locks.
2262	 *
2263	 * AFAICT there is no possible deadlock scenario between the
2264	 * flush_work() and complete() primitives (except for single-threaded
2265	 * workqueues), so hiding them isn't a problem.
2266	 */
2267	lockdep_invariant_state(true);
2268	trace_workqueue_execute_start(work);
2269	worker->current_func(work);
2270	/*
2271	 * While we must be careful to not use "work" after this, the trace
2272	 * point will only record its address.
2273	 */
2274	trace_workqueue_execute_end(work);
2275	lock_map_release(&lockdep_map);
2276	lock_map_release(&pwq->wq->lockdep_map);
2277
2278	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2279		pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2280		       "     last function: %ps\n",
2281		       current->comm, preempt_count(), task_pid_nr(current),
2282		       worker->current_func);
 
2283		debug_show_held_locks(current);
2284		dump_stack();
2285	}
2286
2287	/*
2288	 * The following prevents a kworker from hogging CPU on !PREEMPT
2289	 * kernels, where a requeueing work item waiting for something to
2290	 * happen could deadlock with stop_machine as such work item could
2291	 * indefinitely requeue itself while all other CPUs are trapped in
2292	 * stop_machine. At the same time, report a quiescent RCU state so
2293	 * the same condition doesn't freeze RCU.
2294	 */
2295	cond_resched();
2296
2297	spin_lock_irq(&pool->lock);
2298
2299	/* clear cpu intensive status */
2300	if (unlikely(cpu_intensive))
2301		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2302
2303	/* tag the worker for identification in schedule() */
2304	worker->last_func = worker->current_func;
2305
2306	/* we're done with it, release */
2307	hash_del(&worker->hentry);
2308	worker->current_work = NULL;
2309	worker->current_func = NULL;
2310	worker->current_pwq = NULL;
2311	pwq_dec_nr_in_flight(pwq, work_color);
2312}
2313
2314/**
2315 * process_scheduled_works - process scheduled works
2316 * @worker: self
2317 *
2318 * Process all scheduled works.  Please note that the scheduled list
2319 * may change while processing a work, so this function repeatedly
2320 * fetches a work from the top and executes it.
2321 *
2322 * CONTEXT:
2323 * spin_lock_irq(pool->lock) which may be released and regrabbed
2324 * multiple times.
2325 */
2326static void process_scheduled_works(struct worker *worker)
2327{
2328	while (!list_empty(&worker->scheduled)) {
2329		struct work_struct *work = list_first_entry(&worker->scheduled,
2330						struct work_struct, entry);
2331		process_one_work(worker, work);
2332	}
2333}
2334
2335static void set_pf_worker(bool val)
2336{
2337	mutex_lock(&wq_pool_attach_mutex);
2338	if (val)
2339		current->flags |= PF_WQ_WORKER;
2340	else
2341		current->flags &= ~PF_WQ_WORKER;
2342	mutex_unlock(&wq_pool_attach_mutex);
2343}
2344
2345/**
2346 * worker_thread - the worker thread function
2347 * @__worker: self
2348 *
2349 * The worker thread function.  All workers belong to a worker_pool -
2350 * either a per-cpu one or dynamic unbound one.  These workers process all
2351 * work items regardless of their specific target workqueue.  The only
2352 * exception is work items which belong to workqueues with a rescuer which
2353 * will be explained in rescuer_thread().
2354 *
2355 * Return: 0
2356 */
2357static int worker_thread(void *__worker)
2358{
2359	struct worker *worker = __worker;
2360	struct worker_pool *pool = worker->pool;
2361
2362	/* tell the scheduler that this is a workqueue worker */
2363	set_pf_worker(true);
2364woke_up:
2365	spin_lock_irq(&pool->lock);
2366
2367	/* am I supposed to die? */
2368	if (unlikely(worker->flags & WORKER_DIE)) {
2369		spin_unlock_irq(&pool->lock);
2370		WARN_ON_ONCE(!list_empty(&worker->entry));
2371		set_pf_worker(false);
2372
2373		set_task_comm(worker->task, "kworker/dying");
2374		ida_simple_remove(&pool->worker_ida, worker->id);
2375		worker_detach_from_pool(worker);
2376		kfree(worker);
2377		return 0;
2378	}
2379
2380	worker_leave_idle(worker);
2381recheck:
2382	/* no more worker necessary? */
2383	if (!need_more_worker(pool))
2384		goto sleep;
2385
2386	/* do we need to manage? */
2387	if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2388		goto recheck;
2389
2390	/*
2391	 * ->scheduled list can only be filled while a worker is
2392	 * preparing to process a work or actually processing it.
2393	 * Make sure nobody diddled with it while I was sleeping.
2394	 */
2395	WARN_ON_ONCE(!list_empty(&worker->scheduled));
2396
2397	/*
2398	 * Finish PREP stage.  We're guaranteed to have at least one idle
2399	 * worker or that someone else has already assumed the manager
2400	 * role.  This is where @worker starts participating in concurrency
2401	 * management if applicable and concurrency management is restored
2402	 * after being rebound.  See rebind_workers() for details.
2403	 */
2404	worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2405
2406	do {
2407		struct work_struct *work =
2408			list_first_entry(&pool->worklist,
2409					 struct work_struct, entry);
2410
2411		pool->watchdog_ts = jiffies;
2412
2413		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2414			/* optimization path, not strictly necessary */
2415			process_one_work(worker, work);
2416			if (unlikely(!list_empty(&worker->scheduled)))
2417				process_scheduled_works(worker);
2418		} else {
2419			move_linked_works(work, &worker->scheduled, NULL);
2420			process_scheduled_works(worker);
2421		}
2422	} while (keep_working(pool));
2423
2424	worker_set_flags(worker, WORKER_PREP);
2425sleep:
 
 
 
2426	/*
2427	 * pool->lock is held and there's no work to process and no need to
2428	 * manage, sleep.  Workers are woken up only while holding
2429	 * pool->lock or from local cpu, so setting the current state
2430	 * before releasing pool->lock is enough to prevent losing any
2431	 * event.
2432	 */
2433	worker_enter_idle(worker);
2434	__set_current_state(TASK_IDLE);
2435	spin_unlock_irq(&pool->lock);
2436	schedule();
2437	goto woke_up;
2438}
2439
2440/**
2441 * rescuer_thread - the rescuer thread function
2442 * @__rescuer: self
2443 *
2444 * Workqueue rescuer thread function.  There's one rescuer for each
2445 * workqueue which has WQ_MEM_RECLAIM set.
2446 *
2447 * Regular work processing on a pool may block trying to create a new
2448 * worker which uses GFP_KERNEL allocation which has slight chance of
2449 * developing into deadlock if some works currently on the same queue
2450 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2451 * the problem rescuer solves.
2452 *
2453 * When such condition is possible, the pool summons rescuers of all
2454 * workqueues which have works queued on the pool and let them process
2455 * those works so that forward progress can be guaranteed.
2456 *
2457 * This should happen rarely.
2458 *
2459 * Return: 0
2460 */
2461static int rescuer_thread(void *__rescuer)
2462{
2463	struct worker *rescuer = __rescuer;
2464	struct workqueue_struct *wq = rescuer->rescue_wq;
2465	struct list_head *scheduled = &rescuer->scheduled;
2466	bool should_stop;
 
2467
2468	set_user_nice(current, RESCUER_NICE_LEVEL);
2469
2470	/*
2471	 * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2472	 * doesn't participate in concurrency management.
2473	 */
2474	set_pf_worker(true);
2475repeat:
2476	set_current_state(TASK_IDLE);
 
 
 
2477
2478	/*
2479	 * By the time the rescuer is requested to stop, the workqueue
2480	 * shouldn't have any work pending, but @wq->maydays may still have
2481	 * pwq(s) queued.  This can happen by non-rescuer workers consuming
2482	 * all the work items before the rescuer got to them.  Go through
2483	 * @wq->maydays processing before acting on should_stop so that the
2484	 * list is always empty on exit.
2485	 */
2486	should_stop = kthread_should_stop();
2487
2488	/* see whether any pwq is asking for help */
2489	spin_lock_irq(&wq_mayday_lock);
2490
2491	while (!list_empty(&wq->maydays)) {
2492		struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2493					struct pool_workqueue, mayday_node);
2494		struct worker_pool *pool = pwq->pool;
2495		struct work_struct *work, *n;
2496		bool first = true;
2497
2498		__set_current_state(TASK_RUNNING);
2499		list_del_init(&pwq->mayday_node);
2500
2501		spin_unlock_irq(&wq_mayday_lock);
2502
2503		worker_attach_to_pool(rescuer, pool);
2504
2505		spin_lock_irq(&pool->lock);
2506
2507		/*
2508		 * Slurp in all works issued via this workqueue and
2509		 * process'em.
2510		 */
2511		WARN_ON_ONCE(!list_empty(scheduled));
2512		list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2513			if (get_work_pwq(work) == pwq) {
2514				if (first)
2515					pool->watchdog_ts = jiffies;
2516				move_linked_works(work, scheduled, &n);
2517			}
2518			first = false;
2519		}
2520
2521		if (!list_empty(scheduled)) {
2522			process_scheduled_works(rescuer);
2523
2524			/*
2525			 * The above execution of rescued work items could
2526			 * have created more to rescue through
2527			 * pwq_activate_first_delayed() or chained
2528			 * queueing.  Let's put @pwq back on mayday list so
2529			 * that such back-to-back work items, which may be
2530			 * being used to relieve memory pressure, don't
2531			 * incur MAYDAY_INTERVAL delay inbetween.
2532			 */
2533			if (need_to_create_worker(pool)) {
2534				spin_lock(&wq_mayday_lock);
2535				get_pwq(pwq);
2536				list_move_tail(&pwq->mayday_node, &wq->maydays);
2537				spin_unlock(&wq_mayday_lock);
2538			}
2539		}
2540
2541		/*
2542		 * Put the reference grabbed by send_mayday().  @pool won't
2543		 * go away while we're still attached to it.
2544		 */
2545		put_pwq(pwq);
2546
2547		/*
2548		 * Leave this pool.  If need_more_worker() is %true, notify a
2549		 * regular worker; otherwise, we end up with 0 concurrency
2550		 * and stalling the execution.
2551		 */
2552		if (need_more_worker(pool))
2553			wake_up_worker(pool);
2554
2555		spin_unlock_irq(&pool->lock);
2556
2557		worker_detach_from_pool(rescuer);
2558
2559		spin_lock_irq(&wq_mayday_lock);
2560	}
2561
2562	spin_unlock_irq(&wq_mayday_lock);
2563
2564	if (should_stop) {
2565		__set_current_state(TASK_RUNNING);
2566		set_pf_worker(false);
2567		return 0;
2568	}
2569
2570	/* rescuers should never participate in concurrency management */
2571	WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2572	schedule();
2573	goto repeat;
2574}
2575
2576/**
2577 * check_flush_dependency - check for flush dependency sanity
2578 * @target_wq: workqueue being flushed
2579 * @target_work: work item being flushed (NULL for workqueue flushes)
2580 *
2581 * %current is trying to flush the whole @target_wq or @target_work on it.
2582 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2583 * reclaiming memory or running on a workqueue which doesn't have
2584 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2585 * a deadlock.
2586 */
2587static void check_flush_dependency(struct workqueue_struct *target_wq,
2588				   struct work_struct *target_work)
2589{
2590	work_func_t target_func = target_work ? target_work->func : NULL;
2591	struct worker *worker;
2592
2593	if (target_wq->flags & WQ_MEM_RECLAIM)
2594		return;
2595
2596	worker = current_wq_worker();
2597
2598	WARN_ONCE(current->flags & PF_MEMALLOC,
2599		  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2600		  current->pid, current->comm, target_wq->name, target_func);
2601	WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2602			      (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2603		  "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2604		  worker->current_pwq->wq->name, worker->current_func,
2605		  target_wq->name, target_func);
2606}
2607
2608struct wq_barrier {
2609	struct work_struct	work;
2610	struct completion	done;
2611	struct task_struct	*task;	/* purely informational */
2612};
2613
2614static void wq_barrier_func(struct work_struct *work)
2615{
2616	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2617	complete(&barr->done);
2618}
2619
2620/**
2621 * insert_wq_barrier - insert a barrier work
2622 * @pwq: pwq to insert barrier into
2623 * @barr: wq_barrier to insert
2624 * @target: target work to attach @barr to
2625 * @worker: worker currently executing @target, NULL if @target is not executing
2626 *
2627 * @barr is linked to @target such that @barr is completed only after
2628 * @target finishes execution.  Please note that the ordering
2629 * guarantee is observed only with respect to @target and on the local
2630 * cpu.
2631 *
2632 * Currently, a queued barrier can't be canceled.  This is because
2633 * try_to_grab_pending() can't determine whether the work to be
2634 * grabbed is at the head of the queue and thus can't clear LINKED
2635 * flag of the previous work while there must be a valid next work
2636 * after a work with LINKED flag set.
2637 *
2638 * Note that when @worker is non-NULL, @target may be modified
2639 * underneath us, so we can't reliably determine pwq from @target.
2640 *
2641 * CONTEXT:
2642 * spin_lock_irq(pool->lock).
2643 */
2644static void insert_wq_barrier(struct pool_workqueue *pwq,
2645			      struct wq_barrier *barr,
2646			      struct work_struct *target, struct worker *worker)
2647{
2648	struct list_head *head;
2649	unsigned int linked = 0;
2650
2651	/*
2652	 * debugobject calls are safe here even with pool->lock locked
2653	 * as we know for sure that this will not trigger any of the
2654	 * checks and call back into the fixup functions where we
2655	 * might deadlock.
2656	 */
2657	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2658	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2659
2660	init_completion_map(&barr->done, &target->lockdep_map);
2661
2662	barr->task = current;
2663
2664	/*
2665	 * If @target is currently being executed, schedule the
2666	 * barrier to the worker; otherwise, put it after @target.
2667	 */
2668	if (worker)
2669		head = worker->scheduled.next;
2670	else {
2671		unsigned long *bits = work_data_bits(target);
2672
2673		head = target->entry.next;
2674		/* there can already be other linked works, inherit and set */
2675		linked = *bits & WORK_STRUCT_LINKED;
2676		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2677	}
2678
2679	debug_work_activate(&barr->work);
2680	insert_work(pwq, &barr->work, head,
2681		    work_color_to_flags(WORK_NO_COLOR) | linked);
2682}
2683
2684/**
2685 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2686 * @wq: workqueue being flushed
2687 * @flush_color: new flush color, < 0 for no-op
2688 * @work_color: new work color, < 0 for no-op
2689 *
2690 * Prepare pwqs for workqueue flushing.
2691 *
2692 * If @flush_color is non-negative, flush_color on all pwqs should be
2693 * -1.  If no pwq has in-flight commands at the specified color, all
2694 * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2695 * has in flight commands, its pwq->flush_color is set to
2696 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2697 * wakeup logic is armed and %true is returned.
2698 *
2699 * The caller should have initialized @wq->first_flusher prior to
2700 * calling this function with non-negative @flush_color.  If
2701 * @flush_color is negative, no flush color update is done and %false
2702 * is returned.
2703 *
2704 * If @work_color is non-negative, all pwqs should have the same
2705 * work_color which is previous to @work_color and all will be
2706 * advanced to @work_color.
2707 *
2708 * CONTEXT:
2709 * mutex_lock(wq->mutex).
2710 *
2711 * Return:
2712 * %true if @flush_color >= 0 and there's something to flush.  %false
2713 * otherwise.
2714 */
2715static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2716				      int flush_color, int work_color)
2717{
2718	bool wait = false;
2719	struct pool_workqueue *pwq;
2720
2721	if (flush_color >= 0) {
2722		WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2723		atomic_set(&wq->nr_pwqs_to_flush, 1);
2724	}
2725
2726	for_each_pwq(pwq, wq) {
2727		struct worker_pool *pool = pwq->pool;
 
2728
2729		spin_lock_irq(&pool->lock);
2730
2731		if (flush_color >= 0) {
2732			WARN_ON_ONCE(pwq->flush_color != -1);
2733
2734			if (pwq->nr_in_flight[flush_color]) {
2735				pwq->flush_color = flush_color;
2736				atomic_inc(&wq->nr_pwqs_to_flush);
2737				wait = true;
2738			}
2739		}
2740
2741		if (work_color >= 0) {
2742			WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2743			pwq->work_color = work_color;
2744		}
2745
2746		spin_unlock_irq(&pool->lock);
2747	}
2748
2749	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2750		complete(&wq->first_flusher->done);
2751
2752	return wait;
2753}
2754
2755/**
2756 * flush_workqueue - ensure that any scheduled work has run to completion.
2757 * @wq: workqueue to flush
2758 *
2759 * This function sleeps until all work items which were queued on entry
2760 * have finished execution, but it is not livelocked by new incoming ones.
 
 
 
2761 */
2762void flush_workqueue(struct workqueue_struct *wq)
2763{
2764	struct wq_flusher this_flusher = {
2765		.list = LIST_HEAD_INIT(this_flusher.list),
2766		.flush_color = -1,
2767		.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2768	};
2769	int next_color;
2770
2771	if (WARN_ON(!wq_online))
2772		return;
2773
2774	lock_map_acquire(&wq->lockdep_map);
2775	lock_map_release(&wq->lockdep_map);
2776
2777	mutex_lock(&wq->mutex);
2778
2779	/*
2780	 * Start-to-wait phase
2781	 */
2782	next_color = work_next_color(wq->work_color);
2783
2784	if (next_color != wq->flush_color) {
2785		/*
2786		 * Color space is not full.  The current work_color
2787		 * becomes our flush_color and work_color is advanced
2788		 * by one.
2789		 */
2790		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2791		this_flusher.flush_color = wq->work_color;
2792		wq->work_color = next_color;
2793
2794		if (!wq->first_flusher) {
2795			/* no flush in progress, become the first flusher */
2796			WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2797
2798			wq->first_flusher = &this_flusher;
2799
2800			if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2801						       wq->work_color)) {
2802				/* nothing to flush, done */
2803				wq->flush_color = next_color;
2804				wq->first_flusher = NULL;
2805				goto out_unlock;
2806			}
2807		} else {
2808			/* wait in queue */
2809			WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2810			list_add_tail(&this_flusher.list, &wq->flusher_queue);
2811			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2812		}
2813	} else {
2814		/*
2815		 * Oops, color space is full, wait on overflow queue.
2816		 * The next flush completion will assign us
2817		 * flush_color and transfer to flusher_queue.
2818		 */
2819		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2820	}
2821
2822	check_flush_dependency(wq, NULL);
2823
2824	mutex_unlock(&wq->mutex);
2825
2826	wait_for_completion(&this_flusher.done);
2827
2828	/*
2829	 * Wake-up-and-cascade phase
2830	 *
2831	 * First flushers are responsible for cascading flushes and
2832	 * handling overflow.  Non-first flushers can simply return.
2833	 */
2834	if (wq->first_flusher != &this_flusher)
2835		return;
2836
2837	mutex_lock(&wq->mutex);
2838
2839	/* we might have raced, check again with mutex held */
2840	if (wq->first_flusher != &this_flusher)
2841		goto out_unlock;
2842
2843	wq->first_flusher = NULL;
2844
2845	WARN_ON_ONCE(!list_empty(&this_flusher.list));
2846	WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2847
2848	while (true) {
2849		struct wq_flusher *next, *tmp;
2850
2851		/* complete all the flushers sharing the current flush color */
2852		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2853			if (next->flush_color != wq->flush_color)
2854				break;
2855			list_del_init(&next->list);
2856			complete(&next->done);
2857		}
2858
2859		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2860			     wq->flush_color != work_next_color(wq->work_color));
2861
2862		/* this flush_color is finished, advance by one */
2863		wq->flush_color = work_next_color(wq->flush_color);
2864
2865		/* one color has been freed, handle overflow queue */
2866		if (!list_empty(&wq->flusher_overflow)) {
2867			/*
2868			 * Assign the same color to all overflowed
2869			 * flushers, advance work_color and append to
2870			 * flusher_queue.  This is the start-to-wait
2871			 * phase for these overflowed flushers.
2872			 */
2873			list_for_each_entry(tmp, &wq->flusher_overflow, list)
2874				tmp->flush_color = wq->work_color;
2875
2876			wq->work_color = work_next_color(wq->work_color);
2877
2878			list_splice_tail_init(&wq->flusher_overflow,
2879					      &wq->flusher_queue);
2880			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2881		}
2882
2883		if (list_empty(&wq->flusher_queue)) {
2884			WARN_ON_ONCE(wq->flush_color != wq->work_color);
2885			break;
2886		}
2887
2888		/*
2889		 * Need to flush more colors.  Make the next flusher
2890		 * the new first flusher and arm pwqs.
2891		 */
2892		WARN_ON_ONCE(wq->flush_color == wq->work_color);
2893		WARN_ON_ONCE(wq->flush_color != next->flush_color);
2894
2895		list_del_init(&next->list);
2896		wq->first_flusher = next;
2897
2898		if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2899			break;
2900
2901		/*
2902		 * Meh... this color is already done, clear first
2903		 * flusher and repeat cascading.
2904		 */
2905		wq->first_flusher = NULL;
2906	}
2907
2908out_unlock:
2909	mutex_unlock(&wq->mutex);
2910}
2911EXPORT_SYMBOL(flush_workqueue);
2912
2913/**
2914 * drain_workqueue - drain a workqueue
2915 * @wq: workqueue to drain
2916 *
2917 * Wait until the workqueue becomes empty.  While draining is in progress,
2918 * only chain queueing is allowed.  IOW, only currently pending or running
2919 * work items on @wq can queue further work items on it.  @wq is flushed
2920 * repeatedly until it becomes empty.  The number of flushing is determined
2921 * by the depth of chaining and should be relatively short.  Whine if it
2922 * takes too long.
2923 */
2924void drain_workqueue(struct workqueue_struct *wq)
2925{
2926	unsigned int flush_cnt = 0;
2927	struct pool_workqueue *pwq;
2928
2929	/*
2930	 * __queue_work() needs to test whether there are drainers, is much
2931	 * hotter than drain_workqueue() and already looks at @wq->flags.
2932	 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2933	 */
2934	mutex_lock(&wq->mutex);
2935	if (!wq->nr_drainers++)
2936		wq->flags |= __WQ_DRAINING;
2937	mutex_unlock(&wq->mutex);
2938reflush:
2939	flush_workqueue(wq);
2940
2941	mutex_lock(&wq->mutex);
2942
2943	for_each_pwq(pwq, wq) {
2944		bool drained;
2945
2946		spin_lock_irq(&pwq->pool->lock);
2947		drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2948		spin_unlock_irq(&pwq->pool->lock);
2949
2950		if (drained)
2951			continue;
2952
2953		if (++flush_cnt == 10 ||
2954		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2955			pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2956				wq->name, flush_cnt);
2957
2958		mutex_unlock(&wq->mutex);
2959		goto reflush;
2960	}
2961
 
2962	if (!--wq->nr_drainers)
2963		wq->flags &= ~__WQ_DRAINING;
2964	mutex_unlock(&wq->mutex);
2965}
2966EXPORT_SYMBOL_GPL(drain_workqueue);
2967
2968static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2969			     bool from_cancel)
2970{
2971	struct worker *worker = NULL;
2972	struct worker_pool *pool;
2973	struct pool_workqueue *pwq;
2974
2975	might_sleep();
2976
2977	rcu_read_lock();
2978	pool = get_work_pool(work);
2979	if (!pool) {
2980		rcu_read_unlock();
2981		return false;
2982	}
2983
2984	spin_lock_irq(&pool->lock);
2985	/* see the comment in try_to_grab_pending() with the same code */
2986	pwq = get_work_pwq(work);
2987	if (pwq) {
2988		if (unlikely(pwq->pool != pool))
 
 
 
 
 
2989			goto already_gone;
2990	} else {
2991		worker = find_worker_executing_work(pool, work);
2992		if (!worker)
2993			goto already_gone;
2994		pwq = worker->current_pwq;
2995	}
2996
2997	check_flush_dependency(pwq->wq, work);
2998
2999	insert_wq_barrier(pwq, barr, work, worker);
3000	spin_unlock_irq(&pool->lock);
3001
3002	/*
3003	 * Force a lock recursion deadlock when using flush_work() inside a
3004	 * single-threaded or rescuer equipped workqueue.
3005	 *
3006	 * For single threaded workqueues the deadlock happens when the work
3007	 * is after the work issuing the flush_work(). For rescuer equipped
3008	 * workqueues the deadlock happens when the rescuer stalls, blocking
3009	 * forward progress.
3010	 */
3011	if (!from_cancel &&
3012	    (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3013		lock_map_acquire(&pwq->wq->lockdep_map);
3014		lock_map_release(&pwq->wq->lockdep_map);
3015	}
3016	rcu_read_unlock();
3017	return true;
3018already_gone:
3019	spin_unlock_irq(&pool->lock);
3020	rcu_read_unlock();
3021	return false;
3022}
3023
3024static bool __flush_work(struct work_struct *work, bool from_cancel)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3025{
3026	struct wq_barrier barr;
3027
3028	if (WARN_ON(!wq_online))
 
 
 
 
3029		return false;
 
 
3030
3031	if (WARN_ON(!work->func))
3032		return false;
 
 
3033
3034	if (!from_cancel) {
3035		lock_map_acquire(&work->lockdep_map);
3036		lock_map_release(&work->lockdep_map);
3037	}
3038
3039	if (start_flush_work(work, &barr, from_cancel)) {
 
 
 
 
 
 
3040		wait_for_completion(&barr.done);
3041		destroy_work_on_stack(&barr.work);
3042		return true;
3043	} else {
3044		return false;
3045	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3046}
3047
3048/**
3049 * flush_work - wait for a work to finish executing the last queueing instance
3050 * @work: the work to flush
3051 *
3052 * Wait until @work has finished execution.  @work is guaranteed to be idle
3053 * on return if it hasn't been requeued since flush started.
 
 
 
3054 *
3055 * Return:
3056 * %true if flush_work() waited for the work to finish execution,
3057 * %false if it was already idle.
3058 */
3059bool flush_work(struct work_struct *work)
3060{
3061	return __flush_work(work, false);
3062}
3063EXPORT_SYMBOL_GPL(flush_work);
3064
3065struct cwt_wait {
3066	wait_queue_entry_t		wait;
3067	struct work_struct	*work;
3068};
 
3069
3070static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3071{
3072	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
 
3073
3074	if (cwait->work != key)
3075		return 0;
3076	return autoremove_wake_function(wait, mode, sync, key);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3077}
3078
3079static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
 
3080{
3081	static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3082	unsigned long flags;
3083	int ret;
3084
3085	do {
3086		ret = try_to_grab_pending(work, is_dwork, &flags);
3087		/*
3088		 * If someone else is already canceling, wait for it to
3089		 * finish.  flush_work() doesn't work for PREEMPT_NONE
3090		 * because we may get scheduled between @work's completion
3091		 * and the other canceling task resuming and clearing
3092		 * CANCELING - flush_work() will return false immediately
3093		 * as @work is no longer busy, try_to_grab_pending() will
3094		 * return -ENOENT as @work is still being canceled and the
3095		 * other canceling task won't be able to clear CANCELING as
3096		 * we're hogging the CPU.
3097		 *
3098		 * Let's wait for completion using a waitqueue.  As this
3099		 * may lead to the thundering herd problem, use a custom
3100		 * wake function which matches @work along with exclusive
3101		 * wait and wakeup.
3102		 */
3103		if (unlikely(ret == -ENOENT)) {
3104			struct cwt_wait cwait;
3105
3106			init_wait(&cwait.wait);
3107			cwait.wait.func = cwt_wakefn;
3108			cwait.work = work;
3109
3110			prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3111						  TASK_UNINTERRUPTIBLE);
3112			if (work_is_canceling(work))
3113				schedule();
3114			finish_wait(&cancel_waitq, &cwait.wait);
3115		}
3116	} while (unlikely(ret < 0));
3117
3118	/* tell other tasks trying to grab @work to back off */
3119	mark_work_canceling(work);
3120	local_irq_restore(flags);
3121
3122	/*
3123	 * This allows canceling during early boot.  We know that @work
3124	 * isn't executing.
3125	 */
3126	if (wq_online)
3127		__flush_work(work, true);
3128
3129	clear_work_data(work);
3130
3131	/*
3132	 * Paired with prepare_to_wait() above so that either
3133	 * waitqueue_active() is visible here or !work_is_canceling() is
3134	 * visible there.
3135	 */
3136	smp_mb();
3137	if (waitqueue_active(&cancel_waitq))
3138		__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3139
3140	return ret;
3141}
3142
3143/**
3144 * cancel_work_sync - cancel a work and wait for it to finish
3145 * @work: the work to cancel
3146 *
3147 * Cancel @work and wait for its execution to finish.  This function
3148 * can be used even if the work re-queues itself or migrates to
3149 * another workqueue.  On return from this function, @work is
3150 * guaranteed to be not pending or executing on any CPU.
3151 *
3152 * cancel_work_sync(&delayed_work->work) must not be used for
3153 * delayed_work's.  Use cancel_delayed_work_sync() instead.
3154 *
3155 * The caller must ensure that the workqueue on which @work was last
3156 * queued can't be destroyed before this function returns.
3157 *
3158 * Return:
3159 * %true if @work was pending, %false otherwise.
3160 */
3161bool cancel_work_sync(struct work_struct *work)
3162{
3163	return __cancel_work_timer(work, false);
3164}
3165EXPORT_SYMBOL_GPL(cancel_work_sync);
3166
3167/**
3168 * flush_delayed_work - wait for a dwork to finish executing the last queueing
3169 * @dwork: the delayed work to flush
3170 *
3171 * Delayed timer is cancelled and the pending work is queued for
3172 * immediate execution.  Like flush_work(), this function only
3173 * considers the last queueing instance of @dwork.
3174 *
3175 * Return:
3176 * %true if flush_work() waited for the work to finish execution,
3177 * %false if it was already idle.
3178 */
3179bool flush_delayed_work(struct delayed_work *dwork)
3180{
3181	local_irq_disable();
3182	if (del_timer_sync(&dwork->timer))
3183		__queue_work(dwork->cpu, dwork->wq, &dwork->work);
3184	local_irq_enable();
3185	return flush_work(&dwork->work);
3186}
3187EXPORT_SYMBOL(flush_delayed_work);
3188
3189/**
3190 * flush_rcu_work - wait for a rwork to finish executing the last queueing
3191 * @rwork: the rcu work to flush
 
 
 
 
3192 *
3193 * Return:
3194 * %true if flush_rcu_work() waited for the work to finish execution,
3195 * %false if it was already idle.
3196 */
3197bool flush_rcu_work(struct rcu_work *rwork)
3198{
3199	if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3200		rcu_barrier();
3201		flush_work(&rwork->work);
3202		return true;
3203	} else {
3204		return flush_work(&rwork->work);
3205	}
3206}
3207EXPORT_SYMBOL(flush_rcu_work);
3208
3209static bool __cancel_work(struct work_struct *work, bool is_dwork)
 
 
 
 
 
 
 
 
 
3210{
3211	unsigned long flags;
3212	int ret;
3213
3214	do {
3215		ret = try_to_grab_pending(work, is_dwork, &flags);
3216	} while (unlikely(ret == -EAGAIN));
3217
3218	if (unlikely(ret < 0))
3219		return false;
3220
3221	set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3222	local_irq_restore(flags);
3223	return ret;
3224}
 
3225
3226/**
3227 * cancel_delayed_work - cancel a delayed work
3228 * @dwork: delayed_work to cancel
3229 *
3230 * Kill off a pending delayed_work.
 
3231 *
3232 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3233 * pending.
3234 *
3235 * Note:
3236 * The work callback function may still be running on return, unless
3237 * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3238 * use cancel_delayed_work_sync() to wait on it.
 
 
 
 
 
 
 
3239 *
3240 * This function is safe to call from any context including IRQ handler.
3241 */
3242bool cancel_delayed_work(struct delayed_work *dwork)
3243{
3244	return __cancel_work(&dwork->work, true);
3245}
3246EXPORT_SYMBOL(cancel_delayed_work);
3247
3248/**
3249 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3250 * @dwork: the delayed work cancel
 
3251 *
3252 * This is cancel_work_sync() for delayed works.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3253 *
3254 * Return:
3255 * %true if @dwork was pending, %false otherwise.
3256 */
3257bool cancel_delayed_work_sync(struct delayed_work *dwork)
 
3258{
3259	return __cancel_work_timer(&dwork->work, true);
3260}
3261EXPORT_SYMBOL(cancel_delayed_work_sync);
3262
3263/**
3264 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3265 * @func: the function to call
3266 *
3267 * schedule_on_each_cpu() executes @func on each online CPU using the
3268 * system workqueue and blocks until all CPUs have completed.
3269 * schedule_on_each_cpu() is very slow.
3270 *
3271 * Return:
3272 * 0 on success, -errno on failure.
3273 */
3274int schedule_on_each_cpu(work_func_t func)
3275{
3276	int cpu;
3277	struct work_struct __percpu *works;
3278
3279	works = alloc_percpu(struct work_struct);
3280	if (!works)
3281		return -ENOMEM;
3282
3283	get_online_cpus();
3284
3285	for_each_online_cpu(cpu) {
3286		struct work_struct *work = per_cpu_ptr(works, cpu);
3287
3288		INIT_WORK(work, func);
3289		schedule_work_on(cpu, work);
3290	}
3291
3292	for_each_online_cpu(cpu)
3293		flush_work(per_cpu_ptr(works, cpu));
3294
3295	put_online_cpus();
3296	free_percpu(works);
3297	return 0;
3298}
3299
3300/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3301 * execute_in_process_context - reliably execute the routine with user context
3302 * @fn:		the function to execute
3303 * @ew:		guaranteed storage for the execute work structure (must
3304 *		be available when the work executes)
3305 *
3306 * Executes the function immediately if process context is available,
3307 * otherwise schedules the function for delayed execution.
3308 *
3309 * Return:	0 - function was executed
3310 *		1 - function was scheduled for execution
3311 */
3312int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3313{
3314	if (!in_interrupt()) {
3315		fn(&ew->work);
3316		return 0;
3317	}
3318
3319	INIT_WORK(&ew->work, fn);
3320	schedule_work(&ew->work);
3321
3322	return 1;
3323}
3324EXPORT_SYMBOL_GPL(execute_in_process_context);
3325
3326/**
3327 * free_workqueue_attrs - free a workqueue_attrs
3328 * @attrs: workqueue_attrs to free
3329 *
3330 * Undo alloc_workqueue_attrs().
3331 */
3332void free_workqueue_attrs(struct workqueue_attrs *attrs)
3333{
3334	if (attrs) {
3335		free_cpumask_var(attrs->cpumask);
3336		kfree(attrs);
3337	}
3338}
3339
3340/**
3341 * alloc_workqueue_attrs - allocate a workqueue_attrs
3342 *
3343 * Allocate a new workqueue_attrs, initialize with default settings and
3344 * return it.
3345 *
3346 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3347 */
3348struct workqueue_attrs *alloc_workqueue_attrs(void)
3349{
3350	struct workqueue_attrs *attrs;
3351
3352	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3353	if (!attrs)
3354		goto fail;
3355	if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3356		goto fail;
3357
3358	cpumask_copy(attrs->cpumask, cpu_possible_mask);
3359	return attrs;
3360fail:
3361	free_workqueue_attrs(attrs);
3362	return NULL;
3363}
3364
3365static void copy_workqueue_attrs(struct workqueue_attrs *to,
3366				 const struct workqueue_attrs *from)
3367{
3368	to->nice = from->nice;
3369	cpumask_copy(to->cpumask, from->cpumask);
3370	/*
3371	 * Unlike hash and equality test, this function doesn't ignore
3372	 * ->no_numa as it is used for both pool and wq attrs.  Instead,
3373	 * get_unbound_pool() explicitly clears ->no_numa after copying.
3374	 */
3375	to->no_numa = from->no_numa;
3376}
3377
3378/* hash value of the content of @attr */
3379static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3380{
3381	u32 hash = 0;
3382
3383	hash = jhash_1word(attrs->nice, hash);
3384	hash = jhash(cpumask_bits(attrs->cpumask),
3385		     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3386	return hash;
3387}
3388
3389/* content equality test */
3390static bool wqattrs_equal(const struct workqueue_attrs *a,
3391			  const struct workqueue_attrs *b)
3392{
3393	if (a->nice != b->nice)
3394		return false;
3395	if (!cpumask_equal(a->cpumask, b->cpumask))
3396		return false;
3397	return true;
3398}
3399
3400/**
3401 * init_worker_pool - initialize a newly zalloc'd worker_pool
3402 * @pool: worker_pool to initialize
3403 *
3404 * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3405 *
3406 * Return: 0 on success, -errno on failure.  Even on failure, all fields
3407 * inside @pool proper are initialized and put_unbound_pool() can be called
3408 * on @pool safely to release it.
3409 */
3410static int init_worker_pool(struct worker_pool *pool)
3411{
3412	spin_lock_init(&pool->lock);
3413	pool->id = -1;
3414	pool->cpu = -1;
3415	pool->node = NUMA_NO_NODE;
3416	pool->flags |= POOL_DISASSOCIATED;
3417	pool->watchdog_ts = jiffies;
3418	INIT_LIST_HEAD(&pool->worklist);
3419	INIT_LIST_HEAD(&pool->idle_list);
3420	hash_init(pool->busy_hash);
3421
3422	timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3423
3424	timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3425
3426	INIT_LIST_HEAD(&pool->workers);
3427
3428	ida_init(&pool->worker_ida);
3429	INIT_HLIST_NODE(&pool->hash_node);
3430	pool->refcnt = 1;
3431
3432	/* shouldn't fail above this point */
3433	pool->attrs = alloc_workqueue_attrs();
3434	if (!pool->attrs)
3435		return -ENOMEM;
3436	return 0;
3437}
3438
3439#ifdef CONFIG_LOCKDEP
3440static void wq_init_lockdep(struct workqueue_struct *wq)
3441{
3442	char *lock_name;
3443
3444	lockdep_register_key(&wq->key);
3445	lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3446	if (!lock_name)
3447		lock_name = wq->name;
3448
3449	wq->lock_name = lock_name;
3450	lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3451}
3452
3453static void wq_unregister_lockdep(struct workqueue_struct *wq)
3454{
3455	lockdep_unregister_key(&wq->key);
3456}
3457
3458static void wq_free_lockdep(struct workqueue_struct *wq)
3459{
3460	if (wq->lock_name != wq->name)
3461		kfree(wq->lock_name);
3462}
3463#else
3464static void wq_init_lockdep(struct workqueue_struct *wq)
3465{
3466}
3467
3468static void wq_unregister_lockdep(struct workqueue_struct *wq)
3469{
3470}
3471
3472static void wq_free_lockdep(struct workqueue_struct *wq)
3473{
3474}
3475#endif
3476
3477static void rcu_free_wq(struct rcu_head *rcu)
3478{
3479	struct workqueue_struct *wq =
3480		container_of(rcu, struct workqueue_struct, rcu);
3481
3482	wq_free_lockdep(wq);
3483
3484	if (!(wq->flags & WQ_UNBOUND))
3485		free_percpu(wq->cpu_pwqs);
3486	else
3487		free_workqueue_attrs(wq->unbound_attrs);
3488
3489	kfree(wq->rescuer);
3490	kfree(wq);
3491}
3492
3493static void rcu_free_pool(struct rcu_head *rcu)
3494{
3495	struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3496
3497	ida_destroy(&pool->worker_ida);
3498	free_workqueue_attrs(pool->attrs);
3499	kfree(pool);
3500}
3501
3502/**
3503 * put_unbound_pool - put a worker_pool
3504 * @pool: worker_pool to put
3505 *
3506 * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
3507 * safe manner.  get_unbound_pool() calls this function on its failure path
3508 * and this function should be able to release pools which went through,
3509 * successfully or not, init_worker_pool().
3510 *
3511 * Should be called with wq_pool_mutex held.
3512 */
3513static void put_unbound_pool(struct worker_pool *pool)
3514{
3515	DECLARE_COMPLETION_ONSTACK(detach_completion);
3516	struct worker *worker;
3517
3518	lockdep_assert_held(&wq_pool_mutex);
3519
3520	if (--pool->refcnt)
3521		return;
3522
3523	/* sanity checks */
3524	if (WARN_ON(!(pool->cpu < 0)) ||
3525	    WARN_ON(!list_empty(&pool->worklist)))
3526		return;
3527
3528	/* release id and unhash */
3529	if (pool->id >= 0)
3530		idr_remove(&worker_pool_idr, pool->id);
3531	hash_del(&pool->hash_node);
3532
3533	/*
3534	 * Become the manager and destroy all workers.  This prevents
3535	 * @pool's workers from blocking on attach_mutex.  We're the last
3536	 * manager and @pool gets freed with the flag set.
3537	 */
3538	spin_lock_irq(&pool->lock);
3539	wait_event_lock_irq(wq_manager_wait,
3540			    !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3541	pool->flags |= POOL_MANAGER_ACTIVE;
3542
3543	while ((worker = first_idle_worker(pool)))
3544		destroy_worker(worker);
3545	WARN_ON(pool->nr_workers || pool->nr_idle);
3546	spin_unlock_irq(&pool->lock);
3547
3548	mutex_lock(&wq_pool_attach_mutex);
3549	if (!list_empty(&pool->workers))
3550		pool->detach_completion = &detach_completion;
3551	mutex_unlock(&wq_pool_attach_mutex);
3552
3553	if (pool->detach_completion)
3554		wait_for_completion(pool->detach_completion);
3555
3556	/* shut down the timers */
3557	del_timer_sync(&pool->idle_timer);
3558	del_timer_sync(&pool->mayday_timer);
3559
3560	/* RCU protected to allow dereferences from get_work_pool() */
3561	call_rcu(&pool->rcu, rcu_free_pool);
3562}
3563
3564/**
3565 * get_unbound_pool - get a worker_pool with the specified attributes
3566 * @attrs: the attributes of the worker_pool to get
3567 *
3568 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3569 * reference count and return it.  If there already is a matching
3570 * worker_pool, it will be used; otherwise, this function attempts to
3571 * create a new one.
3572 *
3573 * Should be called with wq_pool_mutex held.
3574 *
3575 * Return: On success, a worker_pool with the same attributes as @attrs.
3576 * On failure, %NULL.
3577 */
3578static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3579{
3580	u32 hash = wqattrs_hash(attrs);
3581	struct worker_pool *pool;
3582	int node;
3583	int target_node = NUMA_NO_NODE;
3584
3585	lockdep_assert_held(&wq_pool_mutex);
3586
3587	/* do we already have a matching pool? */
3588	hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3589		if (wqattrs_equal(pool->attrs, attrs)) {
3590			pool->refcnt++;
3591			return pool;
3592		}
3593	}
3594
3595	/* if cpumask is contained inside a NUMA node, we belong to that node */
3596	if (wq_numa_enabled) {
3597		for_each_node(node) {
3598			if (cpumask_subset(attrs->cpumask,
3599					   wq_numa_possible_cpumask[node])) {
3600				target_node = node;
3601				break;
3602			}
3603		}
3604	}
3605
3606	/* nope, create a new one */
3607	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3608	if (!pool || init_worker_pool(pool) < 0)
3609		goto fail;
3610
3611	lockdep_set_subclass(&pool->lock, 1);	/* see put_pwq() */
3612	copy_workqueue_attrs(pool->attrs, attrs);
3613	pool->node = target_node;
3614
3615	/*
3616	 * no_numa isn't a worker_pool attribute, always clear it.  See
3617	 * 'struct workqueue_attrs' comments for detail.
3618	 */
3619	pool->attrs->no_numa = false;
3620
3621	if (worker_pool_assign_id(pool) < 0)
3622		goto fail;
3623
3624	/* create and start the initial worker */
3625	if (wq_online && !create_worker(pool))
3626		goto fail;
3627
3628	/* install */
3629	hash_add(unbound_pool_hash, &pool->hash_node, hash);
3630
3631	return pool;
3632fail:
3633	if (pool)
3634		put_unbound_pool(pool);
3635	return NULL;
3636}
3637
3638static void rcu_free_pwq(struct rcu_head *rcu)
3639{
3640	kmem_cache_free(pwq_cache,
3641			container_of(rcu, struct pool_workqueue, rcu));
3642}
3643
3644/*
3645 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3646 * and needs to be destroyed.
3647 */
3648static void pwq_unbound_release_workfn(struct work_struct *work)
3649{
3650	struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3651						  unbound_release_work);
3652	struct workqueue_struct *wq = pwq->wq;
3653	struct worker_pool *pool = pwq->pool;
3654	bool is_last;
3655
3656	if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3657		return;
3658
3659	mutex_lock(&wq->mutex);
3660	list_del_rcu(&pwq->pwqs_node);
3661	is_last = list_empty(&wq->pwqs);
3662	mutex_unlock(&wq->mutex);
3663
3664	mutex_lock(&wq_pool_mutex);
3665	put_unbound_pool(pool);
3666	mutex_unlock(&wq_pool_mutex);
3667
3668	call_rcu(&pwq->rcu, rcu_free_pwq);
3669
3670	/*
3671	 * If we're the last pwq going away, @wq is already dead and no one
3672	 * is gonna access it anymore.  Schedule RCU free.
3673	 */
3674	if (is_last) {
3675		wq_unregister_lockdep(wq);
3676		call_rcu(&wq->rcu, rcu_free_wq);
3677	}
3678}
3679
3680/**
3681 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3682 * @pwq: target pool_workqueue
3683 *
3684 * If @pwq isn't freezing, set @pwq->max_active to the associated
3685 * workqueue's saved_max_active and activate delayed work items
3686 * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
3687 */
3688static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3689{
3690	struct workqueue_struct *wq = pwq->wq;
3691	bool freezable = wq->flags & WQ_FREEZABLE;
3692	unsigned long flags;
3693
3694	/* for @wq->saved_max_active */
3695	lockdep_assert_held(&wq->mutex);
3696
3697	/* fast exit for non-freezable wqs */
3698	if (!freezable && pwq->max_active == wq->saved_max_active)
3699		return;
3700
3701	/* this function can be called during early boot w/ irq disabled */
3702	spin_lock_irqsave(&pwq->pool->lock, flags);
3703
3704	/*
3705	 * During [un]freezing, the caller is responsible for ensuring that
3706	 * this function is called at least once after @workqueue_freezing
3707	 * is updated and visible.
3708	 */
3709	if (!freezable || !workqueue_freezing) {
3710		pwq->max_active = wq->saved_max_active;
3711
3712		while (!list_empty(&pwq->delayed_works) &&
3713		       pwq->nr_active < pwq->max_active)
3714			pwq_activate_first_delayed(pwq);
3715
3716		/*
3717		 * Need to kick a worker after thawed or an unbound wq's
3718		 * max_active is bumped.  It's a slow path.  Do it always.
 
3719		 */
3720		wake_up_worker(pwq->pool);
3721	} else {
3722		pwq->max_active = 0;
3723	}
3724
3725	spin_unlock_irqrestore(&pwq->pool->lock, flags);
3726}
3727
3728/* initialize newly alloced @pwq which is associated with @wq and @pool */
3729static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3730		     struct worker_pool *pool)
3731{
3732	BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3733
3734	memset(pwq, 0, sizeof(*pwq));
3735
3736	pwq->pool = pool;
3737	pwq->wq = wq;
3738	pwq->flush_color = -1;
3739	pwq->refcnt = 1;
3740	INIT_LIST_HEAD(&pwq->delayed_works);
3741	INIT_LIST_HEAD(&pwq->pwqs_node);
3742	INIT_LIST_HEAD(&pwq->mayday_node);
3743	INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3744}
3745
3746/* sync @pwq with the current state of its associated wq and link it */
3747static void link_pwq(struct pool_workqueue *pwq)
3748{
3749	struct workqueue_struct *wq = pwq->wq;
3750
3751	lockdep_assert_held(&wq->mutex);
3752
3753	/* may be called multiple times, ignore if already linked */
3754	if (!list_empty(&pwq->pwqs_node))
3755		return;
3756
3757	/* set the matching work_color */
3758	pwq->work_color = wq->work_color;
3759
3760	/* sync max_active to the current setting */
3761	pwq_adjust_max_active(pwq);
3762
3763	/* link in @pwq */
3764	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3765}
3766
3767/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3768static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3769					const struct workqueue_attrs *attrs)
3770{
3771	struct worker_pool *pool;
3772	struct pool_workqueue *pwq;
3773
3774	lockdep_assert_held(&wq_pool_mutex);
3775
3776	pool = get_unbound_pool(attrs);
3777	if (!pool)
3778		return NULL;
3779
3780	pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3781	if (!pwq) {
3782		put_unbound_pool(pool);
3783		return NULL;
3784	}
3785
3786	init_pwq(pwq, wq, pool);
3787	return pwq;
3788}
3789
3790/**
3791 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3792 * @attrs: the wq_attrs of the default pwq of the target workqueue
3793 * @node: the target NUMA node
3794 * @cpu_going_down: if >= 0, the CPU to consider as offline
3795 * @cpumask: outarg, the resulting cpumask
3796 *
3797 * Calculate the cpumask a workqueue with @attrs should use on @node.  If
3798 * @cpu_going_down is >= 0, that cpu is considered offline during
3799 * calculation.  The result is stored in @cpumask.
3800 *
3801 * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
3802 * enabled and @node has online CPUs requested by @attrs, the returned
3803 * cpumask is the intersection of the possible CPUs of @node and
3804 * @attrs->cpumask.
3805 *
3806 * The caller is responsible for ensuring that the cpumask of @node stays
3807 * stable.
3808 *
3809 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3810 * %false if equal.
3811 */
3812static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3813				 int cpu_going_down, cpumask_t *cpumask)
3814{
3815	if (!wq_numa_enabled || attrs->no_numa)
3816		goto use_dfl;
3817
3818	/* does @node have any online CPUs @attrs wants? */
3819	cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3820	if (cpu_going_down >= 0)
3821		cpumask_clear_cpu(cpu_going_down, cpumask);
3822
3823	if (cpumask_empty(cpumask))
3824		goto use_dfl;
3825
3826	/* yeap, return possible CPUs in @node that @attrs wants */
3827	cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3828
3829	if (cpumask_empty(cpumask)) {
3830		pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3831				"possible intersect\n");
3832		return false;
3833	}
3834
3835	return !cpumask_equal(cpumask, attrs->cpumask);
3836
3837use_dfl:
3838	cpumask_copy(cpumask, attrs->cpumask);
3839	return false;
3840}
3841
3842/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3843static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3844						   int node,
3845						   struct pool_workqueue *pwq)
3846{
3847	struct pool_workqueue *old_pwq;
3848
3849	lockdep_assert_held(&wq_pool_mutex);
3850	lockdep_assert_held(&wq->mutex);
3851
3852	/* link_pwq() can handle duplicate calls */
3853	link_pwq(pwq);
3854
3855	old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3856	rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3857	return old_pwq;
3858}
3859
3860/* context to store the prepared attrs & pwqs before applying */
3861struct apply_wqattrs_ctx {
3862	struct workqueue_struct	*wq;		/* target workqueue */
3863	struct workqueue_attrs	*attrs;		/* attrs to apply */
3864	struct list_head	list;		/* queued for batching commit */
3865	struct pool_workqueue	*dfl_pwq;
3866	struct pool_workqueue	*pwq_tbl[];
3867};
3868
3869/* free the resources after success or abort */
3870static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3871{
3872	if (ctx) {
3873		int node;
3874
3875		for_each_node(node)
3876			put_pwq_unlocked(ctx->pwq_tbl[node]);
3877		put_pwq_unlocked(ctx->dfl_pwq);
3878
3879		free_workqueue_attrs(ctx->attrs);
3880
3881		kfree(ctx);
3882	}
3883}
3884
3885/* allocate the attrs and pwqs for later installation */
3886static struct apply_wqattrs_ctx *
3887apply_wqattrs_prepare(struct workqueue_struct *wq,
3888		      const struct workqueue_attrs *attrs)
3889{
3890	struct apply_wqattrs_ctx *ctx;
3891	struct workqueue_attrs *new_attrs, *tmp_attrs;
3892	int node;
3893
3894	lockdep_assert_held(&wq_pool_mutex);
3895
3896	ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
3897
3898	new_attrs = alloc_workqueue_attrs();
3899	tmp_attrs = alloc_workqueue_attrs();
3900	if (!ctx || !new_attrs || !tmp_attrs)
3901		goto out_free;
3902
3903	/*
3904	 * Calculate the attrs of the default pwq.
3905	 * If the user configured cpumask doesn't overlap with the
3906	 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3907	 */
3908	copy_workqueue_attrs(new_attrs, attrs);
3909	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3910	if (unlikely(cpumask_empty(new_attrs->cpumask)))
3911		cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3912
3913	/*
3914	 * We may create multiple pwqs with differing cpumasks.  Make a
3915	 * copy of @new_attrs which will be modified and used to obtain
3916	 * pools.
3917	 */
3918	copy_workqueue_attrs(tmp_attrs, new_attrs);
3919
3920	/*
3921	 * If something goes wrong during CPU up/down, we'll fall back to
3922	 * the default pwq covering whole @attrs->cpumask.  Always create
3923	 * it even if we don't use it immediately.
3924	 */
3925	ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3926	if (!ctx->dfl_pwq)
3927		goto out_free;
3928
3929	for_each_node(node) {
3930		if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3931			ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3932			if (!ctx->pwq_tbl[node])
3933				goto out_free;
3934		} else {
3935			ctx->dfl_pwq->refcnt++;
3936			ctx->pwq_tbl[node] = ctx->dfl_pwq;
3937		}
3938	}
3939
3940	/* save the user configured attrs and sanitize it. */
3941	copy_workqueue_attrs(new_attrs, attrs);
3942	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3943	ctx->attrs = new_attrs;
3944
3945	ctx->wq = wq;
3946	free_workqueue_attrs(tmp_attrs);
3947	return ctx;
3948
3949out_free:
3950	free_workqueue_attrs(tmp_attrs);
3951	free_workqueue_attrs(new_attrs);
3952	apply_wqattrs_cleanup(ctx);
3953	return NULL;
3954}
3955
3956/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
3957static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3958{
3959	int node;
3960
3961	/* all pwqs have been created successfully, let's install'em */
3962	mutex_lock(&ctx->wq->mutex);
3963
3964	copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
3965
3966	/* save the previous pwq and install the new one */
3967	for_each_node(node)
3968		ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
3969							  ctx->pwq_tbl[node]);
3970
3971	/* @dfl_pwq might not have been used, ensure it's linked */
3972	link_pwq(ctx->dfl_pwq);
3973	swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
3974
3975	mutex_unlock(&ctx->wq->mutex);
3976}
3977
3978static void apply_wqattrs_lock(void)
3979{
3980	/* CPUs should stay stable across pwq creations and installations */
3981	get_online_cpus();
3982	mutex_lock(&wq_pool_mutex);
3983}
3984
3985static void apply_wqattrs_unlock(void)
3986{
3987	mutex_unlock(&wq_pool_mutex);
3988	put_online_cpus();
3989}
3990
3991static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3992					const struct workqueue_attrs *attrs)
3993{
3994	struct apply_wqattrs_ctx *ctx;
3995
3996	/* only unbound workqueues can change attributes */
3997	if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3998		return -EINVAL;
3999
4000	/* creating multiple pwqs breaks ordering guarantee */
4001	if (!list_empty(&wq->pwqs)) {
4002		if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4003			return -EINVAL;
4004
4005		wq->flags &= ~__WQ_ORDERED;
4006	}
4007
4008	ctx = apply_wqattrs_prepare(wq, attrs);
4009	if (!ctx)
4010		return -ENOMEM;
4011
4012	/* the ctx has been prepared successfully, let's commit it */
4013	apply_wqattrs_commit(ctx);
4014	apply_wqattrs_cleanup(ctx);
4015
4016	return 0;
4017}
4018
4019/**
4020 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4021 * @wq: the target workqueue
4022 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4023 *
4024 * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
4025 * machines, this function maps a separate pwq to each NUMA node with
4026 * possibles CPUs in @attrs->cpumask so that work items are affine to the
4027 * NUMA node it was issued on.  Older pwqs are released as in-flight work
4028 * items finish.  Note that a work item which repeatedly requeues itself
4029 * back-to-back will stay on its current pwq.
4030 *
4031 * Performs GFP_KERNEL allocations.
4032 *
4033 * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
4034 *
4035 * Return: 0 on success and -errno on failure.
4036 */
4037int apply_workqueue_attrs(struct workqueue_struct *wq,
4038			  const struct workqueue_attrs *attrs)
4039{
4040	int ret;
4041
4042	lockdep_assert_cpus_held();
4043
4044	mutex_lock(&wq_pool_mutex);
4045	ret = apply_workqueue_attrs_locked(wq, attrs);
4046	mutex_unlock(&wq_pool_mutex);
4047
4048	return ret;
4049}
4050
4051/**
4052 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
4053 * @wq: the target workqueue
4054 * @cpu: the CPU coming up or going down
4055 * @online: whether @cpu is coming up or going down
4056 *
4057 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4058 * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
4059 * @wq accordingly.
4060 *
4061 * If NUMA affinity can't be adjusted due to memory allocation failure, it
4062 * falls back to @wq->dfl_pwq which may not be optimal but is always
4063 * correct.
4064 *
4065 * Note that when the last allowed CPU of a NUMA node goes offline for a
4066 * workqueue with a cpumask spanning multiple nodes, the workers which were
4067 * already executing the work items for the workqueue will lose their CPU
4068 * affinity and may execute on any CPU.  This is similar to how per-cpu
4069 * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
4070 * affinity, it's the user's responsibility to flush the work item from
4071 * CPU_DOWN_PREPARE.
4072 */
4073static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4074				   bool online)
4075{
4076	int node = cpu_to_node(cpu);
4077	int cpu_off = online ? -1 : cpu;
4078	struct pool_workqueue *old_pwq = NULL, *pwq;
4079	struct workqueue_attrs *target_attrs;
4080	cpumask_t *cpumask;
4081
4082	lockdep_assert_held(&wq_pool_mutex);
4083
4084	if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4085	    wq->unbound_attrs->no_numa)
4086		return;
4087
4088	/*
4089	 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4090	 * Let's use a preallocated one.  The following buf is protected by
4091	 * CPU hotplug exclusion.
4092	 */
4093	target_attrs = wq_update_unbound_numa_attrs_buf;
4094	cpumask = target_attrs->cpumask;
4095
4096	copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4097	pwq = unbound_pwq_by_node(wq, node);
4098
4099	/*
4100	 * Let's determine what needs to be done.  If the target cpumask is
4101	 * different from the default pwq's, we need to compare it to @pwq's
4102	 * and create a new one if they don't match.  If the target cpumask
4103	 * equals the default pwq's, the default pwq should be used.
4104	 */
4105	if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4106		if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4107			return;
4108	} else {
4109		goto use_dfl_pwq;
4110	}
4111
4112	/* create a new pwq */
4113	pwq = alloc_unbound_pwq(wq, target_attrs);
4114	if (!pwq) {
4115		pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4116			wq->name);
4117		goto use_dfl_pwq;
4118	}
4119
4120	/* Install the new pwq. */
4121	mutex_lock(&wq->mutex);
4122	old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4123	goto out_unlock;
4124
4125use_dfl_pwq:
4126	mutex_lock(&wq->mutex);
4127	spin_lock_irq(&wq->dfl_pwq->pool->lock);
4128	get_pwq(wq->dfl_pwq);
4129	spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4130	old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4131out_unlock:
4132	mutex_unlock(&wq->mutex);
4133	put_pwq_unlocked(old_pwq);
4134}
4135
4136static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4137{
4138	bool highpri = wq->flags & WQ_HIGHPRI;
4139	int cpu, ret;
4140
4141	if (!(wq->flags & WQ_UNBOUND)) {
4142		wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4143		if (!wq->cpu_pwqs)
4144			return -ENOMEM;
4145
4146		for_each_possible_cpu(cpu) {
4147			struct pool_workqueue *pwq =
4148				per_cpu_ptr(wq->cpu_pwqs, cpu);
4149			struct worker_pool *cpu_pools =
4150				per_cpu(cpu_worker_pools, cpu);
4151
4152			init_pwq(pwq, wq, &cpu_pools[highpri]);
4153
4154			mutex_lock(&wq->mutex);
4155			link_pwq(pwq);
4156			mutex_unlock(&wq->mutex);
4157		}
4158		return 0;
4159	}
4160
4161	get_online_cpus();
4162	if (wq->flags & __WQ_ORDERED) {
4163		ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4164		/* there should only be single pwq for ordering guarantee */
4165		WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4166			      wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4167		     "ordering guarantee broken for workqueue %s\n", wq->name);
4168	} else {
4169		ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4170	}
4171	put_online_cpus();
4172
4173	return ret;
4174}
4175
4176static int wq_clamp_max_active(int max_active, unsigned int flags,
4177			       const char *name)
4178{
4179	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4180
4181	if (max_active < 1 || max_active > lim)
4182		pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4183			max_active, name, 1, lim);
 
4184
4185	return clamp_val(max_active, 1, lim);
4186}
4187
4188/*
4189 * Workqueues which may be used during memory reclaim should have a rescuer
4190 * to guarantee forward progress.
4191 */
4192static int init_rescuer(struct workqueue_struct *wq)
4193{
4194	struct worker *rescuer;
4195	int ret;
4196
4197	if (!(wq->flags & WQ_MEM_RECLAIM))
4198		return 0;
4199
4200	rescuer = alloc_worker(NUMA_NO_NODE);
4201	if (!rescuer)
4202		return -ENOMEM;
4203
4204	rescuer->rescue_wq = wq;
4205	rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4206	ret = PTR_ERR_OR_ZERO(rescuer->task);
4207	if (ret) {
4208		kfree(rescuer);
4209		return ret;
4210	}
4211
4212	wq->rescuer = rescuer;
4213	kthread_bind_mask(rescuer->task, cpu_possible_mask);
4214	wake_up_process(rescuer->task);
4215
4216	return 0;
4217}
4218
4219__printf(1, 4)
4220struct workqueue_struct *alloc_workqueue(const char *fmt,
4221					 unsigned int flags,
4222					 int max_active, ...)
4223{
4224	size_t tbl_size = 0;
4225	va_list args;
4226	struct workqueue_struct *wq;
4227	struct pool_workqueue *pwq;
4228
4229	/*
4230	 * Unbound && max_active == 1 used to imply ordered, which is no
4231	 * longer the case on NUMA machines due to per-node pools.  While
4232	 * alloc_ordered_workqueue() is the right way to create an ordered
4233	 * workqueue, keep the previous behavior to avoid subtle breakages
4234	 * on NUMA.
4235	 */
4236	if ((flags & WQ_UNBOUND) && max_active == 1)
4237		flags |= __WQ_ORDERED;
4238
4239	/* see the comment above the definition of WQ_POWER_EFFICIENT */
4240	if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4241		flags |= WQ_UNBOUND;
4242
4243	/* allocate wq and format name */
 
 
 
4244	if (flags & WQ_UNBOUND)
4245		tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4246
4247	wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4248	if (!wq)
4249		return NULL;
4250
4251	if (flags & WQ_UNBOUND) {
4252		wq->unbound_attrs = alloc_workqueue_attrs();
4253		if (!wq->unbound_attrs)
4254			goto err_free_wq;
4255	}
4256
4257	va_start(args, max_active);
4258	vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4259	va_end(args);
4260
4261	max_active = max_active ?: WQ_DFL_ACTIVE;
4262	max_active = wq_clamp_max_active(max_active, flags, wq->name);
 
 
 
 
4263
4264	/* init wq */
4265	wq->flags = flags;
4266	wq->saved_max_active = max_active;
4267	mutex_init(&wq->mutex);
4268	atomic_set(&wq->nr_pwqs_to_flush, 0);
4269	INIT_LIST_HEAD(&wq->pwqs);
4270	INIT_LIST_HEAD(&wq->flusher_queue);
4271	INIT_LIST_HEAD(&wq->flusher_overflow);
4272	INIT_LIST_HEAD(&wq->maydays);
4273
4274	wq_init_lockdep(wq);
 
4275	INIT_LIST_HEAD(&wq->list);
4276
4277	if (alloc_and_link_pwqs(wq) < 0)
4278		goto err_unreg_lockdep;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4279
4280	if (wq_online && init_rescuer(wq) < 0)
4281		goto err_destroy;
4282
4283	if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4284		goto err_destroy;
 
 
 
 
 
 
 
 
 
4285
4286	/*
4287	 * wq_pool_mutex protects global freeze state and workqueues list.
4288	 * Grab it, adjust max_active and add the new @wq to workqueues
4289	 * list.
4290	 */
4291	mutex_lock(&wq_pool_mutex);
4292
4293	mutex_lock(&wq->mutex);
4294	for_each_pwq(pwq, wq)
4295		pwq_adjust_max_active(pwq);
4296	mutex_unlock(&wq->mutex);
4297
4298	list_add_tail_rcu(&wq->list, &workqueues);
4299
4300	mutex_unlock(&wq_pool_mutex);
4301
4302	return wq;
4303
4304err_unreg_lockdep:
4305	wq_unregister_lockdep(wq);
4306	wq_free_lockdep(wq);
4307err_free_wq:
4308	free_workqueue_attrs(wq->unbound_attrs);
4309	kfree(wq);
4310	return NULL;
4311err_destroy:
4312	destroy_workqueue(wq);
4313	return NULL;
4314}
4315EXPORT_SYMBOL_GPL(alloc_workqueue);
4316
4317/**
4318 * destroy_workqueue - safely terminate a workqueue
4319 * @wq: target workqueue
4320 *
4321 * Safely destroy a workqueue. All work currently pending will be done first.
4322 */
4323void destroy_workqueue(struct workqueue_struct *wq)
4324{
4325	struct pool_workqueue *pwq;
4326	int node;
4327
4328	/* drain it before proceeding with destruction */
4329	drain_workqueue(wq);
4330
4331	/* sanity checks */
4332	mutex_lock(&wq->mutex);
4333	for_each_pwq(pwq, wq) {
4334		int i;
4335
4336		for (i = 0; i < WORK_NR_COLORS; i++) {
4337			if (WARN_ON(pwq->nr_in_flight[i])) {
4338				mutex_unlock(&wq->mutex);
4339				show_workqueue_state();
4340				return;
4341			}
4342		}
4343
4344		if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4345		    WARN_ON(pwq->nr_active) ||
4346		    WARN_ON(!list_empty(&pwq->delayed_works))) {
4347			mutex_unlock(&wq->mutex);
4348			show_workqueue_state();
4349			return;
4350		}
4351	}
4352	mutex_unlock(&wq->mutex);
4353
4354	/*
4355	 * wq list is used to freeze wq, remove from list after
4356	 * flushing is complete in case freeze races us.
4357	 */
4358	mutex_lock(&wq_pool_mutex);
4359	list_del_rcu(&wq->list);
4360	mutex_unlock(&wq_pool_mutex);
 
 
 
 
 
4361
4362	workqueue_sysfs_unregister(wq);
 
 
 
 
4363
4364	if (wq->rescuer)
4365		kthread_stop(wq->rescuer->task);
4366
4367	if (!(wq->flags & WQ_UNBOUND)) {
4368		wq_unregister_lockdep(wq);
4369		/*
4370		 * The base ref is never dropped on per-cpu pwqs.  Directly
4371		 * schedule RCU free.
4372		 */
4373		call_rcu(&wq->rcu, rcu_free_wq);
4374	} else {
4375		/*
4376		 * We're the sole accessor of @wq at this point.  Directly
4377		 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4378		 * @wq will be freed when the last pwq is released.
4379		 */
4380		for_each_node(node) {
4381			pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4382			RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4383			put_pwq_unlocked(pwq);
4384		}
4385
4386		/*
4387		 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
4388		 * put.  Don't access it afterwards.
4389		 */
4390		pwq = wq->dfl_pwq;
4391		wq->dfl_pwq = NULL;
4392		put_pwq_unlocked(pwq);
4393	}
 
 
 
4394}
4395EXPORT_SYMBOL_GPL(destroy_workqueue);
4396
4397/**
4398 * workqueue_set_max_active - adjust max_active of a workqueue
4399 * @wq: target workqueue
4400 * @max_active: new max_active value.
4401 *
4402 * Set max_active of @wq to @max_active.
4403 *
4404 * CONTEXT:
4405 * Don't call from IRQ context.
4406 */
4407void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4408{
4409	struct pool_workqueue *pwq;
4410
4411	/* disallow meddling with max_active for ordered workqueues */
4412	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4413		return;
4414
4415	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4416
4417	mutex_lock(&wq->mutex);
4418
4419	wq->flags &= ~__WQ_ORDERED;
4420	wq->saved_max_active = max_active;
4421
4422	for_each_pwq(pwq, wq)
4423		pwq_adjust_max_active(pwq);
4424
4425	mutex_unlock(&wq->mutex);
4426}
4427EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4428
4429/**
4430 * current_work - retrieve %current task's work struct
4431 *
4432 * Determine if %current task is a workqueue worker and what it's working on.
4433 * Useful to find out the context that the %current task is running in.
4434 *
4435 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4436 */
4437struct work_struct *current_work(void)
4438{
4439	struct worker *worker = current_wq_worker();
4440
4441	return worker ? worker->current_work : NULL;
4442}
4443EXPORT_SYMBOL(current_work);
4444
4445/**
4446 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4447 *
4448 * Determine whether %current is a workqueue rescuer.  Can be used from
4449 * work functions to determine whether it's being run off the rescuer task.
4450 *
4451 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4452 */
4453bool current_is_workqueue_rescuer(void)
4454{
4455	struct worker *worker = current_wq_worker();
4456
4457	return worker && worker->rescue_wq;
4458}
 
4459
4460/**
4461 * workqueue_congested - test whether a workqueue is congested
4462 * @cpu: CPU in question
4463 * @wq: target workqueue
4464 *
4465 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4466 * no synchronization around this function and the test result is
4467 * unreliable and only useful as advisory hints or for debugging.
4468 *
4469 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4470 * Note that both per-cpu and unbound workqueues may be associated with
4471 * multiple pool_workqueues which have separate congested states.  A
4472 * workqueue being congested on one CPU doesn't mean the workqueue is also
4473 * contested on other CPUs / NUMA nodes.
4474 *
4475 * Return:
4476 * %true if congested, %false otherwise.
4477 */
4478bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4479{
4480	struct pool_workqueue *pwq;
4481	bool ret;
4482
4483	rcu_read_lock();
4484	preempt_disable();
4485
4486	if (cpu == WORK_CPU_UNBOUND)
4487		cpu = smp_processor_id();
4488
4489	if (!(wq->flags & WQ_UNBOUND))
4490		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4491	else
4492		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4493
4494	ret = !list_empty(&pwq->delayed_works);
4495	preempt_enable();
4496	rcu_read_unlock();
 
 
 
 
 
 
 
4497
4498	return ret;
4499}
4500EXPORT_SYMBOL_GPL(workqueue_congested);
4501
4502/**
4503 * work_busy - test whether a work is currently pending or running
4504 * @work: the work to be tested
4505 *
4506 * Test whether @work is currently pending or running.  There is no
4507 * synchronization around this function and the test result is
4508 * unreliable and only useful as advisory hints or for debugging.
 
 
4509 *
4510 * Return:
4511 * OR'd bitmask of WORK_BUSY_* bits.
4512 */
4513unsigned int work_busy(struct work_struct *work)
4514{
4515	struct worker_pool *pool;
4516	unsigned long flags;
4517	unsigned int ret = 0;
4518
 
 
 
 
 
4519	if (work_pending(work))
4520		ret |= WORK_BUSY_PENDING;
 
 
4521
4522	rcu_read_lock();
4523	pool = get_work_pool(work);
4524	if (pool) {
4525		spin_lock_irqsave(&pool->lock, flags);
4526		if (find_worker_executing_work(pool, work))
4527			ret |= WORK_BUSY_RUNNING;
4528		spin_unlock_irqrestore(&pool->lock, flags);
4529	}
4530	rcu_read_unlock();
4531
4532	return ret;
4533}
4534EXPORT_SYMBOL_GPL(work_busy);
4535
4536/**
4537 * set_worker_desc - set description for the current work item
4538 * @fmt: printf-style format string
4539 * @...: arguments for the format string
4540 *
4541 * This function can be called by a running work function to describe what
4542 * the work item is about.  If the worker task gets dumped, this
4543 * information will be printed out together to help debugging.  The
4544 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4545 */
4546void set_worker_desc(const char *fmt, ...)
4547{
4548	struct worker *worker = current_wq_worker();
4549	va_list args;
4550
4551	if (worker) {
4552		va_start(args, fmt);
4553		vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4554		va_end(args);
4555	}
4556}
4557EXPORT_SYMBOL_GPL(set_worker_desc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4558
4559/**
4560 * print_worker_info - print out worker information and description
4561 * @log_lvl: the log level to use when printing
4562 * @task: target task
 
 
4563 *
4564 * If @task is a worker and currently executing a work item, print out the
4565 * name of the workqueue being serviced and worker description set with
4566 * set_worker_desc() by the currently executing work item.
4567 *
4568 * This function can be safely called on any task as long as the
4569 * task_struct itself is accessible.  While safe, this function isn't
4570 * synchronized and may print out mixups or garbages of limited length.
4571 */
4572void print_worker_info(const char *log_lvl, struct task_struct *task)
 
 
 
 
 
 
4573{
4574	work_func_t *fn = NULL;
4575	char name[WQ_NAME_LEN] = { };
4576	char desc[WORKER_DESC_LEN] = { };
4577	struct pool_workqueue *pwq = NULL;
4578	struct workqueue_struct *wq = NULL;
4579	struct worker *worker;
 
 
 
 
4580
4581	if (!(task->flags & PF_WQ_WORKER))
4582		return;
4583
4584	/*
4585	 * This function is called without any synchronization and @task
4586	 * could be in any state.  Be careful with dereferences.
4587	 */
4588	worker = kthread_probe_data(task);
4589
 
4590	/*
4591	 * Carefully copy the associated workqueue's workfn, name and desc.
4592	 * Keep the original last '\0' in case the original is garbage.
 
4593	 */
4594	probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4595	probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4596	probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4597	probe_kernel_read(name, wq->name, sizeof(name) - 1);
4598	probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4599
4600	if (fn || name[0] || desc[0]) {
4601		printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
4602		if (strcmp(name, desc))
4603			pr_cont(" (%s)", desc);
4604		pr_cont("\n");
4605	}
4606}
4607
4608static void pr_cont_pool_info(struct worker_pool *pool)
4609{
4610	pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4611	if (pool->node != NUMA_NO_NODE)
4612		pr_cont(" node=%d", pool->node);
4613	pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4614}
4615
4616static void pr_cont_work(bool comma, struct work_struct *work)
4617{
4618	if (work->func == wq_barrier_func) {
4619		struct wq_barrier *barr;
4620
4621		barr = container_of(work, struct wq_barrier, work);
4622
4623		pr_cont("%s BAR(%d)", comma ? "," : "",
4624			task_pid_nr(barr->task));
4625	} else {
4626		pr_cont("%s %ps", comma ? "," : "", work->func);
4627	}
4628}
4629
4630static void show_pwq(struct pool_workqueue *pwq)
4631{
4632	struct worker_pool *pool = pwq->pool;
4633	struct work_struct *work;
4634	struct worker *worker;
4635	bool has_in_flight = false, has_pending = false;
4636	int bkt;
4637
4638	pr_info("  pwq %d:", pool->id);
4639	pr_cont_pool_info(pool);
4640
4641	pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
4642		!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4643
4644	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4645		if (worker->current_pwq == pwq) {
4646			has_in_flight = true;
4647			break;
4648		}
4649	}
4650	if (has_in_flight) {
4651		bool comma = false;
4652
4653		pr_info("    in-flight:");
4654		hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4655			if (worker->current_pwq != pwq)
4656				continue;
4657
4658			pr_cont("%s %d%s:%ps", comma ? "," : "",
4659				task_pid_nr(worker->task),
4660				worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4661				worker->current_func);
4662			list_for_each_entry(work, &worker->scheduled, entry)
4663				pr_cont_work(false, work);
4664			comma = true;
4665		}
4666		pr_cont("\n");
4667	}
4668
4669	list_for_each_entry(work, &pool->worklist, entry) {
4670		if (get_work_pwq(work) == pwq) {
4671			has_pending = true;
4672			break;
4673		}
4674	}
4675	if (has_pending) {
4676		bool comma = false;
4677
4678		pr_info("    pending:");
4679		list_for_each_entry(work, &pool->worklist, entry) {
4680			if (get_work_pwq(work) != pwq)
4681				continue;
4682
4683			pr_cont_work(comma, work);
4684			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4685		}
4686		pr_cont("\n");
4687	}
 
 
 
4688
4689	if (!list_empty(&pwq->delayed_works)) {
4690		bool comma = false;
 
 
 
 
 
 
 
 
 
 
 
 
4691
4692		pr_info("    delayed:");
4693		list_for_each_entry(work, &pwq->delayed_works, entry) {
4694			pr_cont_work(comma, work);
4695			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4696		}
4697		pr_cont("\n");
4698	}
4699}
4700
4701/**
4702 * show_workqueue_state - dump workqueue state
4703 *
4704 * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4705 * all busy workqueues and pools.
4706 */
4707void show_workqueue_state(void)
4708{
4709	struct workqueue_struct *wq;
4710	struct worker_pool *pool;
4711	unsigned long flags;
4712	int pi;
4713
4714	rcu_read_lock();
4715
4716	pr_info("Showing busy workqueues and worker pools:\n");
4717
4718	list_for_each_entry_rcu(wq, &workqueues, list) {
4719		struct pool_workqueue *pwq;
4720		bool idle = true;
4721
4722		for_each_pwq(pwq, wq) {
4723			if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4724				idle = false;
4725				break;
4726			}
4727		}
4728		if (idle)
4729			continue;
4730
4731		pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4732
4733		for_each_pwq(pwq, wq) {
4734			spin_lock_irqsave(&pwq->pool->lock, flags);
4735			if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4736				show_pwq(pwq);
4737			spin_unlock_irqrestore(&pwq->pool->lock, flags);
4738			/*
4739			 * We could be printing a lot from atomic context, e.g.
4740			 * sysrq-t -> show_workqueue_state(). Avoid triggering
4741			 * hard lockup.
4742			 */
4743			touch_nmi_watchdog();
4744		}
4745	}
4746
4747	for_each_pool(pool, pi) {
4748		struct worker *worker;
4749		bool first = true;
4750
4751		spin_lock_irqsave(&pool->lock, flags);
4752		if (pool->nr_workers == pool->nr_idle)
4753			goto next_pool;
4754
4755		pr_info("pool %d:", pool->id);
4756		pr_cont_pool_info(pool);
4757		pr_cont(" hung=%us workers=%d",
4758			jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4759			pool->nr_workers);
4760		if (pool->manager)
4761			pr_cont(" manager: %d",
4762				task_pid_nr(pool->manager->task));
4763		list_for_each_entry(worker, &pool->idle_list, entry) {
4764			pr_cont(" %s%d", first ? "idle: " : "",
4765				task_pid_nr(worker->task));
4766			first = false;
4767		}
4768		pr_cont("\n");
4769	next_pool:
4770		spin_unlock_irqrestore(&pool->lock, flags);
4771		/*
4772		 * We could be printing a lot from atomic context, e.g.
4773		 * sysrq-t -> show_workqueue_state(). Avoid triggering
4774		 * hard lockup.
4775		 */
4776		touch_nmi_watchdog();
4777	}
4778
4779	rcu_read_unlock();
4780}
4781
4782/* used to show worker information through /proc/PID/{comm,stat,status} */
4783void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
4784{
4785	int off;
4786
4787	/* always show the actual comm */
4788	off = strscpy(buf, task->comm, size);
4789	if (off < 0)
4790		return;
4791
4792	/* stabilize PF_WQ_WORKER and worker pool association */
4793	mutex_lock(&wq_pool_attach_mutex);
4794
4795	if (task->flags & PF_WQ_WORKER) {
4796		struct worker *worker = kthread_data(task);
4797		struct worker_pool *pool = worker->pool;
4798
4799		if (pool) {
4800			spin_lock_irq(&pool->lock);
4801			/*
4802			 * ->desc tracks information (wq name or
4803			 * set_worker_desc()) for the latest execution.  If
4804			 * current, prepend '+', otherwise '-'.
4805			 */
4806			if (worker->desc[0] != '\0') {
4807				if (worker->current_work)
4808					scnprintf(buf + off, size - off, "+%s",
4809						  worker->desc);
4810				else
4811					scnprintf(buf + off, size - off, "-%s",
4812						  worker->desc);
4813			}
4814			spin_unlock_irq(&pool->lock);
4815		}
4816	}
4817
4818	mutex_unlock(&wq_pool_attach_mutex);
4819}
4820
4821#ifdef CONFIG_SMP
4822
4823/*
4824 * CPU hotplug.
4825 *
4826 * There are two challenges in supporting CPU hotplug.  Firstly, there
4827 * are a lot of assumptions on strong associations among work, pwq and
4828 * pool which make migrating pending and scheduled works very
4829 * difficult to implement without impacting hot paths.  Secondly,
4830 * worker pools serve mix of short, long and very long running works making
4831 * blocked draining impractical.
4832 *
4833 * This is solved by allowing the pools to be disassociated from the CPU
4834 * running as an unbound one and allowing it to be reattached later if the
4835 * cpu comes back online.
4836 */
4837
4838static void unbind_workers(int cpu)
4839{
4840	struct worker_pool *pool;
4841	struct worker *worker;
4842
4843	for_each_cpu_worker_pool(pool, cpu) {
4844		mutex_lock(&wq_pool_attach_mutex);
4845		spin_lock_irq(&pool->lock);
4846
4847		/*
4848		 * We've blocked all attach/detach operations. Make all workers
4849		 * unbound and set DISASSOCIATED.  Before this, all workers
4850		 * except for the ones which are still executing works from
4851		 * before the last CPU down must be on the cpu.  After
4852		 * this, they may become diasporas.
4853		 */
4854		for_each_pool_worker(worker, pool)
4855			worker->flags |= WORKER_UNBOUND;
4856
4857		pool->flags |= POOL_DISASSOCIATED;
4858
4859		spin_unlock_irq(&pool->lock);
4860		mutex_unlock(&wq_pool_attach_mutex);
4861
4862		/*
4863		 * Call schedule() so that we cross rq->lock and thus can
4864		 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4865		 * This is necessary as scheduler callbacks may be invoked
4866		 * from other cpus.
4867		 */
4868		schedule();
4869
4870		/*
4871		 * Sched callbacks are disabled now.  Zap nr_running.
4872		 * After this, nr_running stays zero and need_more_worker()
4873		 * and keep_working() are always true as long as the
4874		 * worklist is not empty.  This pool now behaves as an
4875		 * unbound (in terms of concurrency management) pool which
4876		 * are served by workers tied to the pool.
4877		 */
4878		atomic_set(&pool->nr_running, 0);
4879
4880		/*
4881		 * With concurrency management just turned off, a busy
4882		 * worker blocking could lead to lengthy stalls.  Kick off
4883		 * unbound chain execution of currently pending work items.
4884		 */
4885		spin_lock_irq(&pool->lock);
4886		wake_up_worker(pool);
4887		spin_unlock_irq(&pool->lock);
4888	}
4889}
4890
4891/**
4892 * rebind_workers - rebind all workers of a pool to the associated CPU
4893 * @pool: pool of interest
4894 *
4895 * @pool->cpu is coming online.  Rebind all workers to the CPU.
4896 */
4897static void rebind_workers(struct worker_pool *pool)
4898{
4899	struct worker *worker;
4900
4901	lockdep_assert_held(&wq_pool_attach_mutex);
4902
4903	/*
4904	 * Restore CPU affinity of all workers.  As all idle workers should
4905	 * be on the run-queue of the associated CPU before any local
4906	 * wake-ups for concurrency management happen, restore CPU affinity
4907	 * of all workers first and then clear UNBOUND.  As we're called
4908	 * from CPU_ONLINE, the following shouldn't fail.
4909	 */
4910	for_each_pool_worker(worker, pool)
4911		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4912						  pool->attrs->cpumask) < 0);
4913
4914	spin_lock_irq(&pool->lock);
 
4915
4916	pool->flags &= ~POOL_DISASSOCIATED;
 
 
 
 
 
 
 
4917
4918	for_each_pool_worker(worker, pool) {
4919		unsigned int worker_flags = worker->flags;
4920
4921		/*
4922		 * A bound idle worker should actually be on the runqueue
4923		 * of the associated CPU for local wake-ups targeting it to
4924		 * work.  Kick all idle workers so that they migrate to the
4925		 * associated CPU.  Doing this in the same loop as
4926		 * replacing UNBOUND with REBOUND is safe as no worker will
4927		 * be bound before @pool->lock is released.
4928		 */
4929		if (worker_flags & WORKER_IDLE)
4930			wake_up_process(worker->task);
4931
4932		/*
4933		 * We want to clear UNBOUND but can't directly call
4934		 * worker_clr_flags() or adjust nr_running.  Atomically
4935		 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4936		 * @worker will clear REBOUND using worker_clr_flags() when
4937		 * it initiates the next execution cycle thus restoring
4938		 * concurrency management.  Note that when or whether
4939		 * @worker clears REBOUND doesn't affect correctness.
4940		 *
4941		 * WRITE_ONCE() is necessary because @worker->flags may be
4942		 * tested without holding any lock in
4943		 * wq_worker_running().  Without it, NOT_RUNNING test may
4944		 * fail incorrectly leading to premature concurrency
4945		 * management operations.
4946		 */
4947		WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4948		worker_flags |= WORKER_REBOUND;
4949		worker_flags &= ~WORKER_UNBOUND;
4950		WRITE_ONCE(worker->flags, worker_flags);
4951	}
4952
4953	spin_unlock_irq(&pool->lock);
 
 
 
 
 
 
 
 
4954}
4955
4956/**
4957 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4958 * @pool: unbound pool of interest
4959 * @cpu: the CPU which is coming up
 
 
4960 *
4961 * An unbound pool may end up with a cpumask which doesn't have any online
4962 * CPUs.  When a worker of such pool get scheduled, the scheduler resets
4963 * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
4964 * online CPU before, cpus_allowed of all its workers should be restored.
4965 */
4966static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4967{
4968	static cpumask_t cpumask;
4969	struct worker *worker;
4970
4971	lockdep_assert_held(&wq_pool_attach_mutex);
4972
4973	/* is @cpu allowed for @pool? */
4974	if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4975		return;
4976
4977	cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4978
4979	/* as we're called from CPU_ONLINE, the following shouldn't fail */
4980	for_each_pool_worker(worker, pool)
4981		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
4982}
4983
4984int workqueue_prepare_cpu(unsigned int cpu)
4985{
4986	struct worker_pool *pool;
 
 
 
 
 
 
4987
4988	for_each_cpu_worker_pool(pool, cpu) {
4989		if (pool->nr_workers)
4990			continue;
4991		if (!create_worker(pool))
4992			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
4993	}
4994	return 0;
4995}
4996
4997int workqueue_online_cpu(unsigned int cpu)
4998{
4999	struct worker_pool *pool;
5000	struct workqueue_struct *wq;
5001	int pi;
 
 
 
 
 
 
 
 
 
 
 
5002
5003	mutex_lock(&wq_pool_mutex);
 
 
 
 
 
 
 
 
5004
5005	for_each_pool(pool, pi) {
5006		mutex_lock(&wq_pool_attach_mutex);
 
 
 
 
 
5007
5008		if (pool->cpu == cpu)
5009			rebind_workers(pool);
5010		else if (pool->cpu < 0)
5011			restore_unbound_workers_cpumask(pool, cpu);
 
 
 
 
5012
5013		mutex_unlock(&wq_pool_attach_mutex);
 
 
 
 
 
 
 
 
 
 
 
5014	}
5015
5016	/* update NUMA affinity of unbound workqueues */
5017	list_for_each_entry(wq, &workqueues, list)
5018		wq_update_unbound_numa(wq, cpu, true);
5019
5020	mutex_unlock(&wq_pool_mutex);
5021	return 0;
5022}
5023
5024int workqueue_offline_cpu(unsigned int cpu)
5025{
5026	struct workqueue_struct *wq;
5027
5028	/* unbinding per-cpu workers should happen on the local CPU */
5029	if (WARN_ON(cpu != smp_processor_id()))
5030		return -1;
5031
5032	unbind_workers(cpu);
5033
5034	/* update NUMA affinity of unbound workqueues */
5035	mutex_lock(&wq_pool_mutex);
5036	list_for_each_entry(wq, &workqueues, list)
5037		wq_update_unbound_numa(wq, cpu, false);
5038	mutex_unlock(&wq_pool_mutex);
5039
5040	return 0;
5041}
5042
5043struct work_for_cpu {
5044	struct work_struct work;
5045	long (*fn)(void *);
5046	void *arg;
5047	long ret;
5048};
5049
5050static void work_for_cpu_fn(struct work_struct *work)
5051{
5052	struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5053
5054	wfc->ret = wfc->fn(wfc->arg);
 
 
5055}
5056
5057/**
5058 * work_on_cpu - run a function in thread context on a particular cpu
5059 * @cpu: the cpu to run on
5060 * @fn: the function to run
5061 * @arg: the function arg
5062 *
 
5063 * It is up to the caller to ensure that the cpu doesn't go offline.
5064 * The caller must not hold any locks which would prevent @fn from completing.
5065 *
5066 * Return: The value @fn returns.
5067 */
5068long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
5069{
5070	struct work_for_cpu wfc = { .fn = fn, .arg = arg };
 
 
 
 
 
5071
5072	INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5073	schedule_work_on(cpu, &wfc.work);
5074	flush_work(&wfc.work);
5075	destroy_work_on_stack(&wfc.work);
 
 
5076	return wfc.ret;
5077}
5078EXPORT_SYMBOL_GPL(work_on_cpu);
5079
5080/**
5081 * work_on_cpu_safe - run a function in thread context on a particular cpu
5082 * @cpu: the cpu to run on
5083 * @fn:  the function to run
5084 * @arg: the function argument
5085 *
5086 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5087 * any locks which would prevent @fn from completing.
5088 *
5089 * Return: The value @fn returns.
5090 */
5091long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5092{
5093	long ret = -ENODEV;
5094
5095	get_online_cpus();
5096	if (cpu_online(cpu))
5097		ret = work_on_cpu(cpu, fn, arg);
5098	put_online_cpus();
5099	return ret;
5100}
5101EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5102#endif /* CONFIG_SMP */
5103
5104#ifdef CONFIG_FREEZER
5105
5106/**
5107 * freeze_workqueues_begin - begin freezing workqueues
5108 *
5109 * Start freezing workqueues.  After this function returns, all freezable
5110 * workqueues will queue new works to their delayed_works list instead of
5111 * pool->worklist.
5112 *
5113 * CONTEXT:
5114 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5115 */
5116void freeze_workqueues_begin(void)
5117{
5118	struct workqueue_struct *wq;
5119	struct pool_workqueue *pwq;
5120
5121	mutex_lock(&wq_pool_mutex);
5122
5123	WARN_ON_ONCE(workqueue_freezing);
5124	workqueue_freezing = true;
5125
5126	list_for_each_entry(wq, &workqueues, list) {
5127		mutex_lock(&wq->mutex);
5128		for_each_pwq(pwq, wq)
5129			pwq_adjust_max_active(pwq);
5130		mutex_unlock(&wq->mutex);
 
 
 
 
 
 
 
 
 
 
 
 
5131	}
5132
5133	mutex_unlock(&wq_pool_mutex);
5134}
5135
5136/**
5137 * freeze_workqueues_busy - are freezable workqueues still busy?
5138 *
5139 * Check whether freezing is complete.  This function must be called
5140 * between freeze_workqueues_begin() and thaw_workqueues().
5141 *
5142 * CONTEXT:
5143 * Grabs and releases wq_pool_mutex.
5144 *
5145 * Return:
5146 * %true if some freezable workqueues are still busy.  %false if freezing
5147 * is complete.
5148 */
5149bool freeze_workqueues_busy(void)
5150{
 
5151	bool busy = false;
5152	struct workqueue_struct *wq;
5153	struct pool_workqueue *pwq;
5154
5155	mutex_lock(&wq_pool_mutex);
5156
5157	WARN_ON_ONCE(!workqueue_freezing);
5158
5159	list_for_each_entry(wq, &workqueues, list) {
5160		if (!(wq->flags & WQ_FREEZABLE))
5161			continue;
5162		/*
5163		 * nr_active is monotonically decreasing.  It's safe
5164		 * to peek without lock.
5165		 */
5166		rcu_read_lock();
5167		for_each_pwq(pwq, wq) {
5168			WARN_ON_ONCE(pwq->nr_active < 0);
5169			if (pwq->nr_active) {
 
 
 
 
5170				busy = true;
5171				rcu_read_unlock();
5172				goto out_unlock;
5173			}
5174		}
5175		rcu_read_unlock();
5176	}
5177out_unlock:
5178	mutex_unlock(&wq_pool_mutex);
5179	return busy;
5180}
5181
5182/**
5183 * thaw_workqueues - thaw workqueues
5184 *
5185 * Thaw workqueues.  Normal queueing is restored and all collected
5186 * frozen works are transferred to their respective pool worklists.
5187 *
5188 * CONTEXT:
5189 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5190 */
5191void thaw_workqueues(void)
5192{
5193	struct workqueue_struct *wq;
5194	struct pool_workqueue *pwq;
5195
5196	mutex_lock(&wq_pool_mutex);
5197
5198	if (!workqueue_freezing)
5199		goto out_unlock;
5200
5201	workqueue_freezing = false;
 
 
5202
5203	/* restore max_active and repopulate worklist */
5204	list_for_each_entry(wq, &workqueues, list) {
5205		mutex_lock(&wq->mutex);
5206		for_each_pwq(pwq, wq)
5207			pwq_adjust_max_active(pwq);
5208		mutex_unlock(&wq->mutex);
5209	}
5210
5211out_unlock:
5212	mutex_unlock(&wq_pool_mutex);
5213}
5214#endif /* CONFIG_FREEZER */
5215
5216static int workqueue_apply_unbound_cpumask(void)
5217{
5218	LIST_HEAD(ctxs);
5219	int ret = 0;
5220	struct workqueue_struct *wq;
5221	struct apply_wqattrs_ctx *ctx, *n;
5222
5223	lockdep_assert_held(&wq_pool_mutex);
 
5224
5225	list_for_each_entry(wq, &workqueues, list) {
5226		if (!(wq->flags & WQ_UNBOUND))
5227			continue;
5228		/* creating multiple pwqs breaks ordering guarantee */
5229		if (wq->flags & __WQ_ORDERED)
5230			continue;
5231
5232		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
5233		if (!ctx) {
5234			ret = -ENOMEM;
5235			break;
5236		}
5237
5238		list_add_tail(&ctx->list, &ctxs);
5239	}
5240
5241	list_for_each_entry_safe(ctx, n, &ctxs, list) {
5242		if (!ret)
5243			apply_wqattrs_commit(ctx);
5244		apply_wqattrs_cleanup(ctx);
5245	}
5246
5247	return ret;
5248}
5249
5250/**
5251 *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5252 *  @cpumask: the cpumask to set
5253 *
5254 *  The low-level workqueues cpumask is a global cpumask that limits
5255 *  the affinity of all unbound workqueues.  This function check the @cpumask
5256 *  and apply it to all unbound workqueues and updates all pwqs of them.
5257 *
5258 *  Retun:	0	- Success
5259 *  		-EINVAL	- Invalid @cpumask
5260 *  		-ENOMEM	- Failed to allocate memory for attrs or pwqs.
5261 */
5262int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5263{
5264	int ret = -EINVAL;
5265	cpumask_var_t saved_cpumask;
5266
5267	if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
5268		return -ENOMEM;
5269
5270	/*
5271	 * Not excluding isolated cpus on purpose.
5272	 * If the user wishes to include them, we allow that.
5273	 */
5274	cpumask_and(cpumask, cpumask, cpu_possible_mask);
5275	if (!cpumask_empty(cpumask)) {
5276		apply_wqattrs_lock();
5277
5278		/* save the old wq_unbound_cpumask. */
5279		cpumask_copy(saved_cpumask, wq_unbound_cpumask);
5280
5281		/* update wq_unbound_cpumask at first and apply it to wqs. */
5282		cpumask_copy(wq_unbound_cpumask, cpumask);
5283		ret = workqueue_apply_unbound_cpumask();
5284
5285		/* restore the wq_unbound_cpumask when failed. */
5286		if (ret < 0)
5287			cpumask_copy(wq_unbound_cpumask, saved_cpumask);
5288
5289		apply_wqattrs_unlock();
5290	}
5291
5292	free_cpumask_var(saved_cpumask);
5293	return ret;
5294}
5295
5296#ifdef CONFIG_SYSFS
5297/*
5298 * Workqueues with WQ_SYSFS flag set is visible to userland via
5299 * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
5300 * following attributes.
5301 *
5302 *  per_cpu	RO bool	: whether the workqueue is per-cpu or unbound
5303 *  max_active	RW int	: maximum number of in-flight work items
5304 *
5305 * Unbound workqueues have the following extra attributes.
5306 *
5307 *  pool_ids	RO int	: the associated pool IDs for each node
5308 *  nice	RW int	: nice value of the workers
5309 *  cpumask	RW mask	: bitmask of allowed CPUs for the workers
5310 *  numa	RW bool	: whether enable NUMA affinity
5311 */
5312struct wq_device {
5313	struct workqueue_struct		*wq;
5314	struct device			dev;
5315};
5316
5317static struct workqueue_struct *dev_to_wq(struct device *dev)
5318{
5319	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5320
5321	return wq_dev->wq;
5322}
5323
5324static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5325			    char *buf)
5326{
5327	struct workqueue_struct *wq = dev_to_wq(dev);
5328
5329	return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5330}
5331static DEVICE_ATTR_RO(per_cpu);
5332
5333static ssize_t max_active_show(struct device *dev,
5334			       struct device_attribute *attr, char *buf)
5335{
5336	struct workqueue_struct *wq = dev_to_wq(dev);
5337
5338	return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5339}
5340
5341static ssize_t max_active_store(struct device *dev,
5342				struct device_attribute *attr, const char *buf,
5343				size_t count)
5344{
5345	struct workqueue_struct *wq = dev_to_wq(dev);
5346	int val;
5347
5348	if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5349		return -EINVAL;
5350
5351	workqueue_set_max_active(wq, val);
5352	return count;
5353}
5354static DEVICE_ATTR_RW(max_active);
5355
5356static struct attribute *wq_sysfs_attrs[] = {
5357	&dev_attr_per_cpu.attr,
5358	&dev_attr_max_active.attr,
5359	NULL,
5360};
5361ATTRIBUTE_GROUPS(wq_sysfs);
5362
5363static ssize_t wq_pool_ids_show(struct device *dev,
5364				struct device_attribute *attr, char *buf)
5365{
5366	struct workqueue_struct *wq = dev_to_wq(dev);
5367	const char *delim = "";
5368	int node, written = 0;
5369
5370	get_online_cpus();
5371	rcu_read_lock();
5372	for_each_node(node) {
5373		written += scnprintf(buf + written, PAGE_SIZE - written,
5374				     "%s%d:%d", delim, node,
5375				     unbound_pwq_by_node(wq, node)->pool->id);
5376		delim = " ";
5377	}
5378	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5379	rcu_read_unlock();
5380	put_online_cpus();
5381
5382	return written;
5383}
5384
5385static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5386			    char *buf)
5387{
5388	struct workqueue_struct *wq = dev_to_wq(dev);
5389	int written;
5390
5391	mutex_lock(&wq->mutex);
5392	written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5393	mutex_unlock(&wq->mutex);
5394
5395	return written;
5396}
5397
5398/* prepare workqueue_attrs for sysfs store operations */
5399static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5400{
5401	struct workqueue_attrs *attrs;
5402
5403	lockdep_assert_held(&wq_pool_mutex);
5404
5405	attrs = alloc_workqueue_attrs();
5406	if (!attrs)
5407		return NULL;
5408
5409	copy_workqueue_attrs(attrs, wq->unbound_attrs);
5410	return attrs;
5411}
5412
5413static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5414			     const char *buf, size_t count)
5415{
5416	struct workqueue_struct *wq = dev_to_wq(dev);
5417	struct workqueue_attrs *attrs;
5418	int ret = -ENOMEM;
5419
5420	apply_wqattrs_lock();
5421
5422	attrs = wq_sysfs_prep_attrs(wq);
5423	if (!attrs)
5424		goto out_unlock;
5425
5426	if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5427	    attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5428		ret = apply_workqueue_attrs_locked(wq, attrs);
5429	else
5430		ret = -EINVAL;
5431
5432out_unlock:
5433	apply_wqattrs_unlock();
5434	free_workqueue_attrs(attrs);
5435	return ret ?: count;
5436}
5437
5438static ssize_t wq_cpumask_show(struct device *dev,
5439			       struct device_attribute *attr, char *buf)
5440{
5441	struct workqueue_struct *wq = dev_to_wq(dev);
5442	int written;
5443
5444	mutex_lock(&wq->mutex);
5445	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5446			    cpumask_pr_args(wq->unbound_attrs->cpumask));
5447	mutex_unlock(&wq->mutex);
5448	return written;
5449}
5450
5451static ssize_t wq_cpumask_store(struct device *dev,
5452				struct device_attribute *attr,
5453				const char *buf, size_t count)
5454{
5455	struct workqueue_struct *wq = dev_to_wq(dev);
5456	struct workqueue_attrs *attrs;
5457	int ret = -ENOMEM;
5458
5459	apply_wqattrs_lock();
5460
5461	attrs = wq_sysfs_prep_attrs(wq);
5462	if (!attrs)
5463		goto out_unlock;
5464
5465	ret = cpumask_parse(buf, attrs->cpumask);
5466	if (!ret)
5467		ret = apply_workqueue_attrs_locked(wq, attrs);
5468
5469out_unlock:
5470	apply_wqattrs_unlock();
5471	free_workqueue_attrs(attrs);
5472	return ret ?: count;
5473}
5474
5475static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5476			    char *buf)
5477{
5478	struct workqueue_struct *wq = dev_to_wq(dev);
5479	int written;
5480
5481	mutex_lock(&wq->mutex);
5482	written = scnprintf(buf, PAGE_SIZE, "%d\n",
5483			    !wq->unbound_attrs->no_numa);
5484	mutex_unlock(&wq->mutex);
5485
5486	return written;
5487}
5488
5489static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5490			     const char *buf, size_t count)
5491{
5492	struct workqueue_struct *wq = dev_to_wq(dev);
5493	struct workqueue_attrs *attrs;
5494	int v, ret = -ENOMEM;
5495
5496	apply_wqattrs_lock();
5497
5498	attrs = wq_sysfs_prep_attrs(wq);
5499	if (!attrs)
5500		goto out_unlock;
5501
5502	ret = -EINVAL;
5503	if (sscanf(buf, "%d", &v) == 1) {
5504		attrs->no_numa = !v;
5505		ret = apply_workqueue_attrs_locked(wq, attrs);
5506	}
5507
 
5508out_unlock:
5509	apply_wqattrs_unlock();
5510	free_workqueue_attrs(attrs);
5511	return ret ?: count;
5512}
5513
5514static struct device_attribute wq_sysfs_unbound_attrs[] = {
5515	__ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5516	__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5517	__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5518	__ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5519	__ATTR_NULL,
5520};
5521
5522static struct bus_type wq_subsys = {
5523	.name				= "workqueue",
5524	.dev_groups			= wq_sysfs_groups,
5525};
5526
5527static ssize_t wq_unbound_cpumask_show(struct device *dev,
5528		struct device_attribute *attr, char *buf)
5529{
5530	int written;
5531
5532	mutex_lock(&wq_pool_mutex);
5533	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5534			    cpumask_pr_args(wq_unbound_cpumask));
5535	mutex_unlock(&wq_pool_mutex);
5536
5537	return written;
5538}
5539
5540static ssize_t wq_unbound_cpumask_store(struct device *dev,
5541		struct device_attribute *attr, const char *buf, size_t count)
5542{
5543	cpumask_var_t cpumask;
5544	int ret;
5545
5546	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5547		return -ENOMEM;
5548
5549	ret = cpumask_parse(buf, cpumask);
5550	if (!ret)
5551		ret = workqueue_set_unbound_cpumask(cpumask);
5552
5553	free_cpumask_var(cpumask);
5554	return ret ? ret : count;
5555}
5556
5557static struct device_attribute wq_sysfs_cpumask_attr =
5558	__ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5559	       wq_unbound_cpumask_store);
5560
5561static int __init wq_sysfs_init(void)
5562{
5563	int err;
5564
5565	err = subsys_virtual_register(&wq_subsys, NULL);
5566	if (err)
5567		return err;
5568
5569	return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5570}
5571core_initcall(wq_sysfs_init);
5572
5573static void wq_device_release(struct device *dev)
5574{
5575	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5576
5577	kfree(wq_dev);
5578}
5579
5580/**
5581 * workqueue_sysfs_register - make a workqueue visible in sysfs
5582 * @wq: the workqueue to register
5583 *
5584 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5585 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5586 * which is the preferred method.
5587 *
5588 * Workqueue user should use this function directly iff it wants to apply
5589 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5590 * apply_workqueue_attrs() may race against userland updating the
5591 * attributes.
5592 *
5593 * Return: 0 on success, -errno on failure.
5594 */
5595int workqueue_sysfs_register(struct workqueue_struct *wq)
5596{
5597	struct wq_device *wq_dev;
5598	int ret;
5599
5600	/*
5601	 * Adjusting max_active or creating new pwqs by applying
5602	 * attributes breaks ordering guarantee.  Disallow exposing ordered
5603	 * workqueues.
5604	 */
5605	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5606		return -EINVAL;
5607
5608	wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5609	if (!wq_dev)
5610		return -ENOMEM;
5611
5612	wq_dev->wq = wq;
5613	wq_dev->dev.bus = &wq_subsys;
5614	wq_dev->dev.release = wq_device_release;
5615	dev_set_name(&wq_dev->dev, "%s", wq->name);
5616
5617	/*
5618	 * unbound_attrs are created separately.  Suppress uevent until
5619	 * everything is ready.
5620	 */
5621	dev_set_uevent_suppress(&wq_dev->dev, true);
5622
5623	ret = device_register(&wq_dev->dev);
5624	if (ret) {
5625		put_device(&wq_dev->dev);
5626		wq->wq_dev = NULL;
5627		return ret;
5628	}
5629
5630	if (wq->flags & WQ_UNBOUND) {
5631		struct device_attribute *attr;
5632
5633		for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5634			ret = device_create_file(&wq_dev->dev, attr);
5635			if (ret) {
5636				device_unregister(&wq_dev->dev);
5637				wq->wq_dev = NULL;
5638				return ret;
5639			}
5640		}
5641	}
5642
5643	dev_set_uevent_suppress(&wq_dev->dev, false);
5644	kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5645	return 0;
5646}
5647
5648/**
5649 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5650 * @wq: the workqueue to unregister
5651 *
5652 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5653 */
5654static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5655{
5656	struct wq_device *wq_dev = wq->wq_dev;
5657
5658	if (!wq->wq_dev)
5659		return;
5660
5661	wq->wq_dev = NULL;
5662	device_unregister(&wq_dev->dev);
5663}
5664#else	/* CONFIG_SYSFS */
5665static void workqueue_sysfs_unregister(struct workqueue_struct *wq)	{ }
5666#endif	/* CONFIG_SYSFS */
5667
5668/*
5669 * Workqueue watchdog.
5670 *
5671 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5672 * flush dependency, a concurrency managed work item which stays RUNNING
5673 * indefinitely.  Workqueue stalls can be very difficult to debug as the
5674 * usual warning mechanisms don't trigger and internal workqueue state is
5675 * largely opaque.
5676 *
5677 * Workqueue watchdog monitors all worker pools periodically and dumps
5678 * state if some pools failed to make forward progress for a while where
5679 * forward progress is defined as the first item on ->worklist changing.
5680 *
5681 * This mechanism is controlled through the kernel parameter
5682 * "workqueue.watchdog_thresh" which can be updated at runtime through the
5683 * corresponding sysfs parameter file.
5684 */
5685#ifdef CONFIG_WQ_WATCHDOG
5686
5687static unsigned long wq_watchdog_thresh = 30;
5688static struct timer_list wq_watchdog_timer;
5689
5690static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5691static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5692
5693static void wq_watchdog_reset_touched(void)
5694{
5695	int cpu;
5696
5697	wq_watchdog_touched = jiffies;
5698	for_each_possible_cpu(cpu)
5699		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5700}
5701
5702static void wq_watchdog_timer_fn(struct timer_list *unused)
5703{
5704	unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5705	bool lockup_detected = false;
5706	struct worker_pool *pool;
5707	int pi;
5708
5709	if (!thresh)
5710		return;
5711
5712	rcu_read_lock();
5713
5714	for_each_pool(pool, pi) {
5715		unsigned long pool_ts, touched, ts;
5716
5717		if (list_empty(&pool->worklist))
5718			continue;
5719
5720		/* get the latest of pool and touched timestamps */
5721		pool_ts = READ_ONCE(pool->watchdog_ts);
5722		touched = READ_ONCE(wq_watchdog_touched);
5723
5724		if (time_after(pool_ts, touched))
5725			ts = pool_ts;
5726		else
5727			ts = touched;
5728
5729		if (pool->cpu >= 0) {
5730			unsigned long cpu_touched =
5731				READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5732						  pool->cpu));
5733			if (time_after(cpu_touched, ts))
5734				ts = cpu_touched;
5735		}
5736
5737		/* did we stall? */
5738		if (time_after(jiffies, ts + thresh)) {
5739			lockup_detected = true;
5740			pr_emerg("BUG: workqueue lockup - pool");
5741			pr_cont_pool_info(pool);
5742			pr_cont(" stuck for %us!\n",
5743				jiffies_to_msecs(jiffies - pool_ts) / 1000);
5744		}
5745	}
5746
5747	rcu_read_unlock();
5748
5749	if (lockup_detected)
5750		show_workqueue_state();
5751
5752	wq_watchdog_reset_touched();
5753	mod_timer(&wq_watchdog_timer, jiffies + thresh);
5754}
5755
5756notrace void wq_watchdog_touch(int cpu)
5757{
5758	if (cpu >= 0)
5759		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5760	else
5761		wq_watchdog_touched = jiffies;
5762}
5763
5764static void wq_watchdog_set_thresh(unsigned long thresh)
5765{
5766	wq_watchdog_thresh = 0;
5767	del_timer_sync(&wq_watchdog_timer);
5768
5769	if (thresh) {
5770		wq_watchdog_thresh = thresh;
5771		wq_watchdog_reset_touched();
5772		mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5773	}
5774}
5775
5776static int wq_watchdog_param_set_thresh(const char *val,
5777					const struct kernel_param *kp)
5778{
5779	unsigned long thresh;
5780	int ret;
5781
5782	ret = kstrtoul(val, 0, &thresh);
5783	if (ret)
5784		return ret;
5785
5786	if (system_wq)
5787		wq_watchdog_set_thresh(thresh);
5788	else
5789		wq_watchdog_thresh = thresh;
5790
5791	return 0;
5792}
5793
5794static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5795	.set	= wq_watchdog_param_set_thresh,
5796	.get	= param_get_ulong,
5797};
5798
5799module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5800		0644);
5801
5802static void wq_watchdog_init(void)
5803{
5804	timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
5805	wq_watchdog_set_thresh(wq_watchdog_thresh);
5806}
5807
5808#else	/* CONFIG_WQ_WATCHDOG */
5809
5810static inline void wq_watchdog_init(void) { }
5811
5812#endif	/* CONFIG_WQ_WATCHDOG */
5813
5814static void __init wq_numa_init(void)
5815{
5816	cpumask_var_t *tbl;
5817	int node, cpu;
5818
5819	if (num_possible_nodes() <= 1)
5820		return;
5821
5822	if (wq_disable_numa) {
5823		pr_info("workqueue: NUMA affinity support disabled\n");
5824		return;
5825	}
5826
5827	wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
5828	BUG_ON(!wq_update_unbound_numa_attrs_buf);
5829
5830	/*
5831	 * We want masks of possible CPUs of each node which isn't readily
5832	 * available.  Build one from cpu_to_node() which should have been
5833	 * fully initialized by now.
5834	 */
5835	tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
5836	BUG_ON(!tbl);
5837
5838	for_each_node(node)
5839		BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5840				node_online(node) ? node : NUMA_NO_NODE));
5841
5842	for_each_possible_cpu(cpu) {
5843		node = cpu_to_node(cpu);
5844		if (WARN_ON(node == NUMA_NO_NODE)) {
5845			pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5846			/* happens iff arch is bonkers, let's just proceed */
5847			return;
5848		}
5849		cpumask_set_cpu(cpu, tbl[node]);
5850	}
5851
5852	wq_numa_possible_cpumask = tbl;
5853	wq_numa_enabled = true;
5854}
5855
5856/**
5857 * workqueue_init_early - early init for workqueue subsystem
5858 *
5859 * This is the first half of two-staged workqueue subsystem initialization
5860 * and invoked as soon as the bare basics - memory allocation, cpumasks and
5861 * idr are up.  It sets up all the data structures and system workqueues
5862 * and allows early boot code to create workqueues and queue/cancel work
5863 * items.  Actual work item execution starts only after kthreads can be
5864 * created and scheduled right before early initcalls.
5865 */
5866int __init workqueue_init_early(void)
5867{
5868	int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5869	int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
5870	int i, cpu;
5871
5872	WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
 
 
 
5873
5874	BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5875	cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
 
5876
5877	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
 
5878
5879	/* initialize CPU pools */
5880	for_each_possible_cpu(cpu) {
5881		struct worker_pool *pool;
5882
5883		i = 0;
5884		for_each_cpu_worker_pool(pool, cpu) {
5885			BUG_ON(init_worker_pool(pool));
5886			pool->cpu = cpu;
5887			cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5888			pool->attrs->nice = std_nice[i++];
5889			pool->node = cpu_to_node(cpu);
5890
5891			/* alloc pool ID */
5892			mutex_lock(&wq_pool_mutex);
5893			BUG_ON(worker_pool_assign_id(pool));
5894			mutex_unlock(&wq_pool_mutex);
5895		}
5896	}
5897
5898	/* create default unbound and ordered wq attrs */
5899	for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5900		struct workqueue_attrs *attrs;
5901
5902		BUG_ON(!(attrs = alloc_workqueue_attrs()));
5903		attrs->nice = std_nice[i];
5904		unbound_std_wq_attrs[i] = attrs;
5905
5906		/*
5907		 * An ordered wq should have only one pwq as ordering is
5908		 * guaranteed by max_active which is enforced by pwqs.
5909		 * Turn off NUMA so that dfl_pwq is used for all nodes.
5910		 */
5911		BUG_ON(!(attrs = alloc_workqueue_attrs()));
5912		attrs->nice = std_nice[i];
5913		attrs->no_numa = true;
5914		ordered_wq_attrs[i] = attrs;
5915	}
5916
5917	system_wq = alloc_workqueue("events", 0, 0);
5918	system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5919	system_long_wq = alloc_workqueue("events_long", 0, 0);
 
5920	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5921					    WQ_UNBOUND_MAX_ACTIVE);
5922	system_freezable_wq = alloc_workqueue("events_freezable",
5923					      WQ_FREEZABLE, 0);
5924	system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5925					      WQ_POWER_EFFICIENT, 0);
5926	system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5927					      WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5928					      0);
5929	BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5930	       !system_unbound_wq || !system_freezable_wq ||
5931	       !system_power_efficient_wq ||
5932	       !system_freezable_power_efficient_wq);
5933
5934	return 0;
5935}
5936
5937/**
5938 * workqueue_init - bring workqueue subsystem fully online
5939 *
5940 * This is the latter half of two-staged workqueue subsystem initialization
5941 * and invoked as soon as kthreads can be created and scheduled.
5942 * Workqueues have been created and work items queued on them, but there
5943 * are no kworkers executing the work items yet.  Populate the worker pools
5944 * with the initial workers and enable future kworker creations.
5945 */
5946int __init workqueue_init(void)
5947{
5948	struct workqueue_struct *wq;
5949	struct worker_pool *pool;
5950	int cpu, bkt;
5951
5952	/*
5953	 * It'd be simpler to initialize NUMA in workqueue_init_early() but
5954	 * CPU to node mapping may not be available that early on some
5955	 * archs such as power and arm64.  As per-cpu pools created
5956	 * previously could be missing node hint and unbound pools NUMA
5957	 * affinity, fix them up.
5958	 *
5959	 * Also, while iterating workqueues, create rescuers if requested.
5960	 */
5961	wq_numa_init();
5962
5963	mutex_lock(&wq_pool_mutex);
5964
5965	for_each_possible_cpu(cpu) {
5966		for_each_cpu_worker_pool(pool, cpu) {
5967			pool->node = cpu_to_node(cpu);
5968		}
5969	}
5970
5971	list_for_each_entry(wq, &workqueues, list) {
5972		wq_update_unbound_numa(wq, smp_processor_id(), true);
5973		WARN(init_rescuer(wq),
5974		     "workqueue: failed to create early rescuer for %s",
5975		     wq->name);
5976	}
5977
5978	mutex_unlock(&wq_pool_mutex);
5979
5980	/* create the initial workers */
5981	for_each_online_cpu(cpu) {
5982		for_each_cpu_worker_pool(pool, cpu) {
5983			pool->flags &= ~POOL_DISASSOCIATED;
5984			BUG_ON(!create_worker(pool));
5985		}
5986	}
5987
5988	hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
5989		BUG_ON(!create_worker(pool));
5990
5991	wq_online = true;
5992	wq_watchdog_init();
5993
5994	return 0;
5995}
v3.1
 
   1/*
   2 * kernel/workqueue.c - generic async execution with shared worker pool
   3 *
   4 * Copyright (C) 2002		Ingo Molnar
   5 *
   6 *   Derived from the taskqueue/keventd code by:
   7 *     David Woodhouse <dwmw2@infradead.org>
   8 *     Andrew Morton
   9 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10 *     Theodore Ts'o <tytso@mit.edu>
  11 *
  12 * Made to use alloc_percpu by Christoph Lameter.
  13 *
  14 * Copyright (C) 2010		SUSE Linux Products GmbH
  15 * Copyright (C) 2010		Tejun Heo <tj@kernel.org>
  16 *
  17 * This is the generic async execution mechanism.  Work items as are
  18 * executed in process context.  The worker pool is shared and
  19 * automatically managed.  There is one worker pool for each CPU and
  20 * one extra for works which are better served by workers which are
  21 * not bound to any specific CPU.
 
  22 *
  23 * Please read Documentation/workqueue.txt for details.
  24 */
  25
  26#include <linux/module.h>
  27#include <linux/kernel.h>
  28#include <linux/sched.h>
  29#include <linux/init.h>
  30#include <linux/signal.h>
  31#include <linux/completion.h>
  32#include <linux/workqueue.h>
  33#include <linux/slab.h>
  34#include <linux/cpu.h>
  35#include <linux/notifier.h>
  36#include <linux/kthread.h>
  37#include <linux/hardirq.h>
  38#include <linux/mempolicy.h>
  39#include <linux/freezer.h>
  40#include <linux/kallsyms.h>
  41#include <linux/debug_locks.h>
  42#include <linux/lockdep.h>
  43#include <linux/idr.h>
 
 
 
 
 
 
 
 
  44
  45#include "workqueue_sched.h"
  46
  47enum {
  48	/* global_cwq flags */
  49	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
  50	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */
  51	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
  52	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
  53	GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */
 
 
 
 
 
 
 
 
 
 
 
 
  54
  55	/* worker flags */
  56	WORKER_STARTED		= 1 << 0,	/* started */
  57	WORKER_DIE		= 1 << 1,	/* die die die */
  58	WORKER_IDLE		= 1 << 2,	/* is idle */
  59	WORKER_PREP		= 1 << 3,	/* preparing to run works */
  60	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
  61	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
  62	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
  63	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
 
  64
  65	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
  66				  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
  67
  68	/* gcwq->trustee_state */
  69	TRUSTEE_START		= 0,		/* start */
  70	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
  71	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
  72	TRUSTEE_RELEASE		= 3,		/* release workers */
  73	TRUSTEE_DONE		= 4,		/* trustee is done */
  74
 
  75	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
  76	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
  77	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
  78
  79	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
  80	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
  81
  82	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  83						/* call for help after 10ms
  84						   (min two ticks) */
  85	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
  86	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
  87	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
  88
  89	/*
  90	 * Rescue workers are used only on emergencies and shared by
  91	 * all cpus.  Give -20.
  92	 */
  93	RESCUER_NICE_LEVEL	= -20,
 
 
 
  94};
  95
  96/*
  97 * Structure fields follow one of the following exclusion rules.
  98 *
  99 * I: Modifiable by initialization/destruction paths and read-only for
 100 *    everyone else.
 101 *
 102 * P: Preemption protected.  Disabling preemption is enough and should
 103 *    only be modified and accessed from the local cpu.
 104 *
 105 * L: gcwq->lock protected.  Access with gcwq->lock held.
 
 
 
 
 
 
 
 106 *
 107 * X: During normal operation, modification requires gcwq->lock and
 108 *    should be done only from local cpu.  Either disabling preemption
 109 *    on local cpu or grabbing gcwq->lock is enough for read access.
 110 *    If GCWQ_DISASSOCIATED is set, it's identical to L.
 111 *
 112 * F: wq->flush_mutex protected.
 113 *
 114 * W: workqueue_lock protected.
 
 
 
 
 
 
 
 
 
 115 */
 116
 117struct global_cwq;
 118
 119/*
 120 * The poor guys doing the actual heavy lifting.  All on-duty workers
 121 * are either serving the manager role, on idle list or on busy hash.
 122 */
 123struct worker {
 124	/* on idle list while idle, on busy hash table while busy */
 125	union {
 126		struct list_head	entry;	/* L: while idle */
 127		struct hlist_node	hentry;	/* L: while busy */
 128	};
 129
 130	struct work_struct	*current_work;	/* L: work being processed */
 131	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
 132	struct list_head	scheduled;	/* L: scheduled works */
 133	struct task_struct	*task;		/* I: worker task */
 134	struct global_cwq	*gcwq;		/* I: the associated gcwq */
 135	/* 64 bytes boundary on 64bit, 32 on 32bit */
 136	unsigned long		last_active;	/* L: last active timestamp */
 137	unsigned int		flags;		/* X: flags */
 138	int			id;		/* I: worker id */
 139	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
 140};
 141
 142/*
 143 * Global per-cpu workqueue.  There's one and only one for each cpu
 144 * and all works are queued and processed here regardless of their
 145 * target workqueues.
 146 */
 147struct global_cwq {
 148	spinlock_t		lock;		/* the gcwq lock */
 149	struct list_head	worklist;	/* L: list of pending works */
 150	unsigned int		cpu;		/* I: the associated cpu */
 151	unsigned int		flags;		/* L: GCWQ_* flags */
 152
 153	int			nr_workers;	/* L: total number of workers */
 154	int			nr_idle;	/* L: currently idle ones */
 155
 156	/* workers are chained either in the idle_list or busy_hash */
 157	struct list_head	idle_list;	/* X: list of idle workers */
 158	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
 
 
 
 
 159						/* L: hash of busy workers */
 160
 161	struct timer_list	idle_timer;	/* L: worker idle timeout */
 162	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */
 
 
 
 
 
 
 
 163
 164	struct ida		worker_ida;	/* L: for worker IDs */
 
 
 
 
 
 165
 166	struct task_struct	*trustee;	/* L: for gcwq shutdown */
 167	unsigned int		trustee_state;	/* L: trustee state */
 168	wait_queue_head_t	trustee_wait;	/* trustee wait */
 169	struct worker		*first_idle;	/* L: first idle worker */
 
 170} ____cacheline_aligned_in_smp;
 171
 172/*
 173 * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
 174 * work_struct->data are used for flags and thus cwqs need to be
 175 * aligned at two's power of the number of flag bits.
 
 176 */
 177struct cpu_workqueue_struct {
 178	struct global_cwq	*gcwq;		/* I: the associated gcwq */
 179	struct workqueue_struct *wq;		/* I: the owning workqueue */
 180	int			work_color;	/* L: current color */
 181	int			flush_color;	/* L: flushing color */
 
 182	int			nr_in_flight[WORK_NR_COLORS];
 183						/* L: nr of in_flight works */
 184	int			nr_active;	/* L: nr of active works */
 185	int			max_active;	/* L: max active works */
 186	struct list_head	delayed_works;	/* L: delayed works */
 187};
 
 
 
 
 
 
 
 
 
 
 
 188
 189/*
 190 * Structure used to wait for workqueue flush.
 191 */
 192struct wq_flusher {
 193	struct list_head	list;		/* F: list of flushers */
 194	int			flush_color;	/* F: flush color waiting for */
 195	struct completion	done;		/* flush completion */
 196};
 197
 198/*
 199 * All cpumasks are assumed to be always set on UP and thus can't be
 200 * used to determine whether there's something to be done.
 201 */
 202#ifdef CONFIG_SMP
 203typedef cpumask_var_t mayday_mask_t;
 204#define mayday_test_and_set_cpu(cpu, mask)	\
 205	cpumask_test_and_set_cpu((cpu), (mask))
 206#define mayday_clear_cpu(cpu, mask)		cpumask_clear_cpu((cpu), (mask))
 207#define for_each_mayday_cpu(cpu, mask)		for_each_cpu((cpu), (mask))
 208#define alloc_mayday_mask(maskp, gfp)		zalloc_cpumask_var((maskp), (gfp))
 209#define free_mayday_mask(mask)			free_cpumask_var((mask))
 210#else
 211typedef unsigned long mayday_mask_t;
 212#define mayday_test_and_set_cpu(cpu, mask)	test_and_set_bit(0, &(mask))
 213#define mayday_clear_cpu(cpu, mask)		clear_bit(0, &(mask))
 214#define for_each_mayday_cpu(cpu, mask)		if ((cpu) = 0, (mask))
 215#define alloc_mayday_mask(maskp, gfp)		true
 216#define free_mayday_mask(mask)			do { } while (0)
 217#endif
 218
 219/*
 220 * The externally visible workqueue abstraction is an array of
 221 * per-CPU workqueues:
 222 */
 223struct workqueue_struct {
 224	unsigned int		flags;		/* W: WQ_* flags */
 225	union {
 226		struct cpu_workqueue_struct __percpu	*pcpu;
 227		struct cpu_workqueue_struct		*single;
 228		unsigned long				v;
 229	} cpu_wq;				/* I: cwq's */
 230	struct list_head	list;		/* W: list of all workqueues */
 231
 232	struct mutex		flush_mutex;	/* protects wq flushing */
 233	int			work_color;	/* F: current work color */
 234	int			flush_color;	/* F: current flush color */
 235	atomic_t		nr_cwqs_to_flush; /* flush in progress */
 236	struct wq_flusher	*first_flusher;	/* F: first flusher */
 237	struct list_head	flusher_queue;	/* F: flush waiters */
 238	struct list_head	flusher_overflow; /* F: flush overflow list */
 239
 240	mayday_mask_t		mayday_mask;	/* cpus requesting rescue */
 
 
 
 
 
 
 
 
 241	struct worker		*rescuer;	/* I: rescue worker */
 242
 243	int			nr_drainers;	/* W: drain in progress */
 244	int			saved_max_active; /* W: saved cwq max_active */
 245	const char		*name;		/* I: workqueue name */
 
 
 
 
 
 
 246#ifdef CONFIG_LOCKDEP
 
 
 247	struct lockdep_map	lockdep_map;
 248#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 249};
 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251struct workqueue_struct *system_wq __read_mostly;
 
 
 
 252struct workqueue_struct *system_long_wq __read_mostly;
 253struct workqueue_struct *system_nrt_wq __read_mostly;
 254struct workqueue_struct *system_unbound_wq __read_mostly;
 
 255struct workqueue_struct *system_freezable_wq __read_mostly;
 256EXPORT_SYMBOL_GPL(system_wq);
 257EXPORT_SYMBOL_GPL(system_long_wq);
 258EXPORT_SYMBOL_GPL(system_nrt_wq);
 259EXPORT_SYMBOL_GPL(system_unbound_wq);
 260EXPORT_SYMBOL_GPL(system_freezable_wq);
 
 
 
 
 
 
 
 261
 262#define CREATE_TRACE_POINTS
 263#include <trace/events/workqueue.h>
 264
 265#define for_each_busy_worker(worker, i, pos, gcwq)			\
 266	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
 267		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 268
 269static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
 270				  unsigned int sw)
 271{
 272	if (cpu < nr_cpu_ids) {
 273		if (sw & 1) {
 274			cpu = cpumask_next(cpu, mask);
 275			if (cpu < nr_cpu_ids)
 276				return cpu;
 277		}
 278		if (sw & 2)
 279			return WORK_CPU_UNBOUND;
 280	}
 281	return WORK_CPU_NONE;
 282}
 283
 284static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
 285				struct workqueue_struct *wq)
 286{
 287	return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
 288}
 289
 290/*
 291 * CPU iterators
 292 *
 293 * An extra gcwq is defined for an invalid cpu number
 294 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
 295 * specific CPU.  The following iterators are similar to
 296 * for_each_*_cpu() iterators but also considers the unbound gcwq.
 297 *
 298 * for_each_gcwq_cpu()		: possible CPUs + WORK_CPU_UNBOUND
 299 * for_each_online_gcwq_cpu()	: online CPUs + WORK_CPU_UNBOUND
 300 * for_each_cwq_cpu()		: possible CPUs for bound workqueues,
 301 *				  WORK_CPU_UNBOUND for unbound workqueues
 302 */
 303#define for_each_gcwq_cpu(cpu)						\
 304	for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\
 305	     (cpu) < WORK_CPU_NONE;					\
 306	     (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
 307
 308#define for_each_online_gcwq_cpu(cpu)					\
 309	for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\
 310	     (cpu) < WORK_CPU_NONE;					\
 311	     (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
 312
 313#define for_each_cwq_cpu(cpu, wq)					\
 314	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\
 315	     (cpu) < WORK_CPU_NONE;					\
 316	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
 317
 318#ifdef CONFIG_DEBUG_OBJECTS_WORK
 319
 320static struct debug_obj_descr work_debug_descr;
 321
 322static void *work_debug_hint(void *addr)
 323{
 324	return ((struct work_struct *) addr)->func;
 325}
 326
 327/*
 328 * fixup_init is called when:
 329 * - an active object is initialized
 330 */
 331static int work_fixup_init(void *addr, enum debug_obj_state state)
 332{
 333	struct work_struct *work = addr;
 334
 335	switch (state) {
 336	case ODEBUG_STATE_ACTIVE:
 337		cancel_work_sync(work);
 338		debug_object_init(work, &work_debug_descr);
 339		return 1;
 340	default:
 341		return 0;
 342	}
 343}
 344
 345/*
 346 * fixup_activate is called when:
 347 * - an active object is activated
 348 * - an unknown object is activated (might be a statically initialized object)
 349 */
 350static int work_fixup_activate(void *addr, enum debug_obj_state state)
 351{
 352	struct work_struct *work = addr;
 353
 354	switch (state) {
 355
 356	case ODEBUG_STATE_NOTAVAILABLE:
 357		/*
 358		 * This is not really a fixup. The work struct was
 359		 * statically initialized. We just make sure that it
 360		 * is tracked in the object tracker.
 361		 */
 362		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
 363			debug_object_init(work, &work_debug_descr);
 364			debug_object_activate(work, &work_debug_descr);
 365			return 0;
 366		}
 367		WARN_ON_ONCE(1);
 368		return 0;
 369
 370	case ODEBUG_STATE_ACTIVE:
 371		WARN_ON(1);
 372
 
 373	default:
 374		return 0;
 375	}
 376}
 377
 378/*
 379 * fixup_free is called when:
 380 * - an active object is freed
 381 */
 382static int work_fixup_free(void *addr, enum debug_obj_state state)
 383{
 384	struct work_struct *work = addr;
 385
 386	switch (state) {
 387	case ODEBUG_STATE_ACTIVE:
 388		cancel_work_sync(work);
 389		debug_object_free(work, &work_debug_descr);
 390		return 1;
 391	default:
 392		return 0;
 393	}
 394}
 395
 396static struct debug_obj_descr work_debug_descr = {
 397	.name		= "work_struct",
 398	.debug_hint	= work_debug_hint,
 
 399	.fixup_init	= work_fixup_init,
 400	.fixup_activate	= work_fixup_activate,
 401	.fixup_free	= work_fixup_free,
 402};
 403
 404static inline void debug_work_activate(struct work_struct *work)
 405{
 406	debug_object_activate(work, &work_debug_descr);
 407}
 408
 409static inline void debug_work_deactivate(struct work_struct *work)
 410{
 411	debug_object_deactivate(work, &work_debug_descr);
 412}
 413
 414void __init_work(struct work_struct *work, int onstack)
 415{
 416	if (onstack)
 417		debug_object_init_on_stack(work, &work_debug_descr);
 418	else
 419		debug_object_init(work, &work_debug_descr);
 420}
 421EXPORT_SYMBOL_GPL(__init_work);
 422
 423void destroy_work_on_stack(struct work_struct *work)
 424{
 425	debug_object_free(work, &work_debug_descr);
 426}
 427EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 428
 
 
 
 
 
 
 
 429#else
 430static inline void debug_work_activate(struct work_struct *work) { }
 431static inline void debug_work_deactivate(struct work_struct *work) { }
 432#endif
 433
 434/* Serializes the accesses to the list of workqueues. */
 435static DEFINE_SPINLOCK(workqueue_lock);
 436static LIST_HEAD(workqueues);
 437static bool workqueue_freezing;		/* W: have wqs started freezing? */
 438
 439/*
 440 * The almighty global cpu workqueues.  nr_running is the only field
 441 * which is expected to be used frequently by other cpus via
 442 * try_to_wake_up().  Put it in a separate cacheline.
 443 */
 444static DEFINE_PER_CPU(struct global_cwq, global_cwq);
 445static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
 
 446
 447/*
 448 * Global cpu workqueue and nr_running counter for unbound gcwq.  The
 449 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
 450 * workers have WORKER_UNBOUND set.
 451 */
 452static struct global_cwq unbound_global_cwq;
 453static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);	/* always 0 */
 454
 455static int worker_thread(void *__worker);
 
 
 
 
 
 
 
 456
 457static struct global_cwq *get_gcwq(unsigned int cpu)
 
 
 
 
 
 
 
 
 
 
 
 
 
 458{
 459	if (cpu != WORK_CPU_UNBOUND)
 460		return &per_cpu(global_cwq, cpu);
 461	else
 462		return &unbound_global_cwq;
 463}
 464
 465static atomic_t *get_gcwq_nr_running(unsigned int cpu)
 466{
 467	if (cpu != WORK_CPU_UNBOUND)
 468		return &per_cpu(gcwq_nr_running, cpu);
 469	else
 470		return &unbound_gcwq_nr_running;
 471}
 
 472
 473static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
 474					    struct workqueue_struct *wq)
 475{
 476	if (!(wq->flags & WQ_UNBOUND)) {
 477		if (likely(cpu < nr_cpu_ids)) {
 478#ifdef CONFIG_SMP
 479			return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
 480#else
 481			return wq->cpu_wq.single;
 482#endif
 483		}
 484	} else if (likely(cpu == WORK_CPU_UNBOUND))
 485		return wq->cpu_wq.single;
 486	return NULL;
 487}
 488
 489static unsigned int work_color_to_flags(int color)
 490{
 491	return color << WORK_STRUCT_COLOR_SHIFT;
 492}
 493
 494static int get_work_color(struct work_struct *work)
 495{
 496	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 497		((1 << WORK_STRUCT_COLOR_BITS) - 1);
 498}
 499
 500static int work_next_color(int color)
 501{
 502	return (color + 1) % WORK_NR_COLORS;
 503}
 504
 505/*
 506 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
 507 * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
 508 * cleared and the work data contains the cpu number it was last on.
 509 *
 510 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
 511 * cwq, cpu or clear work->data.  These functions should only be
 512 * called while the work is owned - ie. while the PENDING bit is set.
 513 *
 514 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
 515 * corresponding to a work.  gcwq is available once the work has been
 516 * queued anywhere after initialization.  cwq is available only from
 517 * queueing until execution starts.
 
 
 
 
 
 
 518 */
 519static inline void set_work_data(struct work_struct *work, unsigned long data,
 520				 unsigned long flags)
 521{
 522	BUG_ON(!work_pending(work));
 523	atomic_long_set(&work->data, data | flags | work_static(work));
 524}
 525
 526static void set_work_cwq(struct work_struct *work,
 527			 struct cpu_workqueue_struct *cwq,
 528			 unsigned long extra_flags)
 529{
 530	set_work_data(work, (unsigned long)cwq,
 531		      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
 
 
 
 
 
 
 
 532}
 533
 534static void set_work_cpu(struct work_struct *work, unsigned int cpu)
 
 535{
 536	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 537}
 538
 539static void clear_work_data(struct work_struct *work)
 540{
 541	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
 
 542}
 543
 544static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
 545{
 546	unsigned long data = atomic_long_read(&work->data);
 547
 548	if (data & WORK_STRUCT_CWQ)
 549		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 550	else
 551		return NULL;
 552}
 553
 554static struct global_cwq *get_work_gcwq(struct work_struct *work)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555{
 556	unsigned long data = atomic_long_read(&work->data);
 557	unsigned int cpu;
 
 
 558
 559	if (data & WORK_STRUCT_CWQ)
 560		return ((struct cpu_workqueue_struct *)
 561			(data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
 562
 563	cpu = data >> WORK_STRUCT_FLAG_BITS;
 564	if (cpu == WORK_CPU_NONE)
 565		return NULL;
 566
 567	BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
 568	return get_gcwq(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569}
 570
 571/*
 572 * Policy functions.  These define the policies on how the global
 573 * worker pool is managed.  Unless noted otherwise, these functions
 574 * assume that they're being called with gcwq->lock held.
 575 */
 576
 577static bool __need_more_worker(struct global_cwq *gcwq)
 578{
 579	return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
 580		gcwq->flags & GCWQ_HIGHPRI_PENDING;
 581}
 582
 583/*
 584 * Need to wake up a worker?  Called from anything but currently
 585 * running workers.
 
 
 
 
 586 */
 587static bool need_more_worker(struct global_cwq *gcwq)
 588{
 589	return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
 590}
 591
 592/* Can I start working?  Called from busy but !running workers. */
 593static bool may_start_working(struct global_cwq *gcwq)
 594{
 595	return gcwq->nr_idle;
 596}
 597
 598/* Do I need to keep working?  Called from currently running workers. */
 599static bool keep_working(struct global_cwq *gcwq)
 600{
 601	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
 602
 603	return !list_empty(&gcwq->worklist) &&
 604		(atomic_read(nr_running) <= 1 ||
 605		 gcwq->flags & GCWQ_HIGHPRI_PENDING);
 606}
 607
 608/* Do we need a new worker?  Called from manager. */
 609static bool need_to_create_worker(struct global_cwq *gcwq)
 610{
 611	return need_more_worker(gcwq) && !may_start_working(gcwq);
 612}
 613
 614/* Do I need to be the manager? */
 615static bool need_to_manage_workers(struct global_cwq *gcwq)
 616{
 617	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
 618}
 619
 620/* Do we have too many workers and should some go away? */
 621static bool too_many_workers(struct global_cwq *gcwq)
 622{
 623	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
 624	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
 625	int nr_busy = gcwq->nr_workers - nr_idle;
 626
 627	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 628}
 629
 630/*
 631 * Wake up functions.
 632 */
 633
 634/* Return the first worker.  Safe with preemption disabled */
 635static struct worker *first_worker(struct global_cwq *gcwq)
 636{
 637	if (unlikely(list_empty(&gcwq->idle_list)))
 638		return NULL;
 639
 640	return list_first_entry(&gcwq->idle_list, struct worker, entry);
 641}
 642
 643/**
 644 * wake_up_worker - wake up an idle worker
 645 * @gcwq: gcwq to wake worker for
 646 *
 647 * Wake up the first idle worker of @gcwq.
 648 *
 649 * CONTEXT:
 650 * spin_lock_irq(gcwq->lock).
 651 */
 652static void wake_up_worker(struct global_cwq *gcwq)
 653{
 654	struct worker *worker = first_worker(gcwq);
 655
 656	if (likely(worker))
 657		wake_up_process(worker->task);
 658}
 659
 660/**
 661 * wq_worker_waking_up - a worker is waking up
 662 * @task: task waking up
 663 * @cpu: CPU @task is waking up to
 664 *
 665 * This function is called during try_to_wake_up() when a worker is
 666 * being awoken.
 667 *
 668 * CONTEXT:
 669 * spin_lock_irq(rq->lock)
 670 */
 671void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
 672{
 673	struct worker *worker = kthread_data(task);
 674
 
 
 675	if (!(worker->flags & WORKER_NOT_RUNNING))
 676		atomic_inc(get_gcwq_nr_running(cpu));
 
 677}
 678
 679/**
 680 * wq_worker_sleeping - a worker is going to sleep
 681 * @task: task going to sleep
 682 * @cpu: CPU in question, must be the current CPU number
 683 *
 684 * This function is called during schedule() when a busy worker is
 685 * going to sleep.  Worker on the same cpu can be woken up by
 686 * returning pointer to its task.
 687 *
 688 * CONTEXT:
 689 * spin_lock_irq(rq->lock)
 690 *
 691 * RETURNS:
 692 * Worker task on @cpu to wake up, %NULL if none.
 693 */
 694struct task_struct *wq_worker_sleeping(struct task_struct *task,
 695				       unsigned int cpu)
 696{
 697	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
 698	struct global_cwq *gcwq = get_gcwq(cpu);
 699	atomic_t *nr_running = get_gcwq_nr_running(cpu);
 700
 
 
 
 
 
 701	if (worker->flags & WORKER_NOT_RUNNING)
 702		return NULL;
 
 
 
 
 
 703
 704	/* this can only happen on the local cpu */
 705	BUG_ON(cpu != raw_smp_processor_id());
 706
 707	/*
 708	 * The counterpart of the following dec_and_test, implied mb,
 709	 * worklist not empty test sequence is in insert_work().
 710	 * Please read comment there.
 711	 *
 712	 * NOT_RUNNING is clear.  This means that trustee is not in
 713	 * charge and we're running on the local cpu w/ rq lock held
 714	 * and preemption disabled, which in turn means that none else
 715	 * could be manipulating idle_list, so dereferencing idle_list
 716	 * without gcwq lock is safe.
 717	 */
 718	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
 719		to_wakeup = first_worker(gcwq);
 720	return to_wakeup ? to_wakeup->task : NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721}
 722
 723/**
 724 * worker_set_flags - set worker flags and adjust nr_running accordingly
 725 * @worker: self
 726 * @flags: flags to set
 727 * @wakeup: wakeup an idle worker if necessary
 728 *
 729 * Set @flags in @worker->flags and adjust nr_running accordingly.  If
 730 * nr_running becomes zero and @wakeup is %true, an idle worker is
 731 * woken up.
 732 *
 733 * CONTEXT:
 734 * spin_lock_irq(gcwq->lock)
 735 */
 736static inline void worker_set_flags(struct worker *worker, unsigned int flags,
 737				    bool wakeup)
 738{
 739	struct global_cwq *gcwq = worker->gcwq;
 740
 741	WARN_ON_ONCE(worker->task != current);
 742
 743	/*
 744	 * If transitioning into NOT_RUNNING, adjust nr_running and
 745	 * wake up an idle worker as necessary if requested by
 746	 * @wakeup.
 747	 */
 748	if ((flags & WORKER_NOT_RUNNING) &&
 749	    !(worker->flags & WORKER_NOT_RUNNING)) {
 750		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
 751
 752		if (wakeup) {
 753			if (atomic_dec_and_test(nr_running) &&
 754			    !list_empty(&gcwq->worklist))
 755				wake_up_worker(gcwq);
 756		} else
 757			atomic_dec(nr_running);
 758	}
 759
 760	worker->flags |= flags;
 761}
 762
 763/**
 764 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 765 * @worker: self
 766 * @flags: flags to clear
 767 *
 768 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 769 *
 770 * CONTEXT:
 771 * spin_lock_irq(gcwq->lock)
 772 */
 773static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 774{
 775	struct global_cwq *gcwq = worker->gcwq;
 776	unsigned int oflags = worker->flags;
 777
 778	WARN_ON_ONCE(worker->task != current);
 779
 780	worker->flags &= ~flags;
 781
 782	/*
 783	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 784	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 785	 * of multiple flags, not a single flag.
 786	 */
 787	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 788		if (!(worker->flags & WORKER_NOT_RUNNING))
 789			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
 790}
 791
 792/**
 793 * busy_worker_head - return the busy hash head for a work
 794 * @gcwq: gcwq of interest
 795 * @work: work to be hashed
 796 *
 797 * Return hash head of @gcwq for @work.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798 *
 799 * CONTEXT:
 800 * spin_lock_irq(gcwq->lock).
 801 *
 802 * RETURNS:
 803 * Pointer to the hash head.
 
 804 */
 805static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
 806					   struct work_struct *work)
 807{
 808	const int base_shift = ilog2(sizeof(struct work_struct));
 809	unsigned long v = (unsigned long)work;
 810
 811	/* simple shift and fold hash, do we need something better? */
 812	v >>= base_shift;
 813	v += v >> BUSY_WORKER_HASH_ORDER;
 814	v &= BUSY_WORKER_HASH_MASK;
 
 815
 816	return &gcwq->busy_hash[v];
 817}
 818
 819/**
 820 * __find_worker_executing_work - find worker which is executing a work
 821 * @gcwq: gcwq of interest
 822 * @bwh: hash head as returned by busy_worker_head()
 823 * @work: work to find worker for
 
 
 
 
 824 *
 825 * Find a worker which is executing @work on @gcwq.  @bwh should be
 826 * the hash head obtained by calling busy_worker_head() with the same
 827 * work.
 828 *
 829 * CONTEXT:
 830 * spin_lock_irq(gcwq->lock).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 831 *
 832 * RETURNS:
 833 * Pointer to worker which is executing @work if found, NULL
 834 * otherwise.
 835 */
 836static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
 837						   struct hlist_head *bwh,
 838						   struct work_struct *work)
 839{
 840	struct worker *worker;
 841	struct hlist_node *tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 842
 843	hlist_for_each_entry(worker, tmp, bwh, hentry)
 844		if (worker->current_work == work)
 845			return worker;
 846	return NULL;
 847}
 848
 849/**
 850 * find_worker_executing_work - find worker which is executing a work
 851 * @gcwq: gcwq of interest
 852 * @work: work to find worker for
 853 *
 854 * Find a worker which is executing @work on @gcwq.  This function is
 855 * identical to __find_worker_executing_work() except that this
 856 * function calculates @bwh itself.
 857 *
 858 * CONTEXT:
 859 * spin_lock_irq(gcwq->lock).
 860 *
 861 * RETURNS:
 862 * Pointer to worker which is executing @work if found, NULL
 863 * otherwise.
 864 */
 865static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
 866						 struct work_struct *work)
 867{
 868	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
 869					    work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870}
 871
 872/**
 873 * gcwq_determine_ins_pos - find insertion position
 874 * @gcwq: gcwq of interest
 875 * @cwq: cwq a work is being queued for
 
 
 
 
 876 *
 877 * A work for @cwq is about to be queued on @gcwq, determine insertion
 878 * position for the work.  If @cwq is for HIGHPRI wq, the work is
 879 * queued at the head of the queue but in FIFO order with respect to
 880 * other HIGHPRI works; otherwise, at the end of the queue.  This
 881 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
 882 * there are HIGHPRI works pending.
 883 *
 884 * CONTEXT:
 885 * spin_lock_irq(gcwq->lock).
 
 
 
 
 
 
 886 *
 887 * RETURNS:
 888 * Pointer to inserstion position.
 889 */
 890static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
 891					       struct cpu_workqueue_struct *cwq)
 892{
 893	struct work_struct *twork;
 
 894
 895	if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
 896		return &gcwq->worklist;
 897
 898	list_for_each_entry(twork, &gcwq->worklist, entry) {
 899		struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
 
 900
 901		if (!(tcwq->wq->flags & WQ_HIGHPRI))
 902			break;
 
 
 
 
 
 903	}
 904
 905	gcwq->flags |= GCWQ_HIGHPRI_PENDING;
 906	return &twork->entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 907}
 908
 909/**
 910 * insert_work - insert a work into gcwq
 911 * @cwq: cwq @work belongs to
 912 * @work: work to insert
 913 * @head: insertion point
 914 * @extra_flags: extra WORK_STRUCT_* flags to set
 915 *
 916 * Insert @work which belongs to @cwq into @gcwq after @head.
 917 * @extra_flags is or'd to work_struct flags.
 918 *
 919 * CONTEXT:
 920 * spin_lock_irq(gcwq->lock).
 921 */
 922static void insert_work(struct cpu_workqueue_struct *cwq,
 923			struct work_struct *work, struct list_head *head,
 924			unsigned int extra_flags)
 925{
 926	struct global_cwq *gcwq = cwq->gcwq;
 927
 928	/* we own @work, set data and link */
 929	set_work_cwq(work, cwq, extra_flags);
 930
 931	/*
 932	 * Ensure that we get the right work->data if we see the
 933	 * result of list_add() below, see try_to_grab_pending().
 934	 */
 935	smp_wmb();
 936
 937	list_add_tail(&work->entry, head);
 
 938
 939	/*
 940	 * Ensure either worker_sched_deactivated() sees the above
 941	 * list_add_tail() or we see zero nr_running to avoid workers
 942	 * lying around lazily while there are works to be processed.
 943	 */
 944	smp_mb();
 945
 946	if (__need_more_worker(gcwq))
 947		wake_up_worker(gcwq);
 948}
 949
 950/*
 951 * Test whether @work is being queued from another work executing on the
 952 * same workqueue.  This is rather expensive and should only be used from
 953 * cold paths.
 954 */
 955static bool is_chained_work(struct workqueue_struct *wq)
 956{
 957	unsigned long flags;
 958	unsigned int cpu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 959
 960	for_each_gcwq_cpu(cpu) {
 961		struct global_cwq *gcwq = get_gcwq(cpu);
 962		struct worker *worker;
 963		struct hlist_node *pos;
 964		int i;
 965
 966		spin_lock_irqsave(&gcwq->lock, flags);
 967		for_each_busy_worker(worker, i, pos, gcwq) {
 968			if (worker->task != current)
 969				continue;
 970			spin_unlock_irqrestore(&gcwq->lock, flags);
 971			/*
 972			 * I'm @worker, no locking necessary.  See if @work
 973			 * is headed to the same workqueue.
 974			 */
 975			return worker->current_cwq->wq == wq;
 976		}
 977		spin_unlock_irqrestore(&gcwq->lock, flags);
 978	}
 979	return false;
 
 
 980}
 981
 982static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 983			 struct work_struct *work)
 984{
 985	struct global_cwq *gcwq;
 986	struct cpu_workqueue_struct *cwq;
 987	struct list_head *worklist;
 988	unsigned int work_flags;
 989	unsigned long flags;
 
 
 
 
 
 
 
 
 990
 991	debug_work_activate(work);
 992
 993	/* if dying, only works from the same workqueue are allowed */
 994	if (unlikely(wq->flags & WQ_DRAINING) &&
 995	    WARN_ON_ONCE(!is_chained_work(wq)))
 996		return;
 
 
 
 
 
 
 
 
 
 
 997
 998	/* determine gcwq to use */
 999	if (!(wq->flags & WQ_UNBOUND)) {
1000		struct global_cwq *last_gcwq;
 
 
 
 
 
1001
1002		if (unlikely(cpu == WORK_CPU_UNBOUND))
1003			cpu = raw_smp_processor_id();
1004
1005		/*
1006		 * It's multi cpu.  If @wq is non-reentrant and @work
1007		 * was previously on a different cpu, it might still
1008		 * be running there, in which case the work needs to
1009		 * be queued on that cpu to guarantee non-reentrance.
1010		 */
1011		gcwq = get_gcwq(cpu);
1012		if (wq->flags & WQ_NON_REENTRANT &&
1013		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1014			struct worker *worker;
1015
1016			spin_lock_irqsave(&last_gcwq->lock, flags);
 
 
 
 
 
 
 
 
 
1017
1018			worker = find_worker_executing_work(last_gcwq, work);
1019
1020			if (worker && worker->current_cwq->wq == wq)
1021				gcwq = last_gcwq;
1022			else {
1023				/* meh... not running there, queue here */
1024				spin_unlock_irqrestore(&last_gcwq->lock, flags);
1025				spin_lock_irqsave(&gcwq->lock, flags);
1026			}
1027		} else
1028			spin_lock_irqsave(&gcwq->lock, flags);
1029	} else {
1030		gcwq = get_gcwq(WORK_CPU_UNBOUND);
1031		spin_lock_irqsave(&gcwq->lock, flags);
 
 
 
1032	}
1033
1034	/* gcwq determined, get cwq and queue */
1035	cwq = get_cwq(gcwq->cpu, wq);
1036	trace_workqueue_queue_work(cpu, cwq, work);
1037
1038	BUG_ON(!list_empty(&work->entry));
 
1039
1040	cwq->nr_in_flight[cwq->work_color]++;
1041	work_flags = work_color_to_flags(cwq->work_color);
1042
1043	if (likely(cwq->nr_active < cwq->max_active)) {
1044		trace_workqueue_activate_work(work);
1045		cwq->nr_active++;
1046		worklist = gcwq_determine_ins_pos(gcwq, cwq);
 
 
1047	} else {
1048		work_flags |= WORK_STRUCT_DELAYED;
1049		worklist = &cwq->delayed_works;
1050	}
1051
1052	insert_work(cwq, work, worklist, work_flags);
1053
1054	spin_unlock_irqrestore(&gcwq->lock, flags);
 
 
1055}
1056
1057/**
1058 * queue_work - queue work on a workqueue
 
1059 * @wq: workqueue to use
1060 * @work: work to queue
1061 *
1062 * Returns 0 if @work was already on a queue, non-zero otherwise.
 
1063 *
1064 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1065 * it can be processed by another CPU.
1066 */
1067int queue_work(struct workqueue_struct *wq, struct work_struct *work)
 
1068{
1069	int ret;
 
 
 
1070
1071	ret = queue_work_on(get_cpu(), wq, work);
1072	put_cpu();
 
 
1073
 
1074	return ret;
1075}
1076EXPORT_SYMBOL_GPL(queue_work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077
1078/**
1079 * queue_work_on - queue work on specific cpu
1080 * @cpu: CPU number to execute work on
1081 * @wq: workqueue to use
1082 * @work: work to queue
1083 *
1084 * Returns 0 if @work was already on a queue, non-zero otherwise.
 
 
 
 
 
 
 
 
 
 
1085 *
1086 * We queue the work to a specific CPU, the caller must ensure it
1087 * can't go away.
1088 */
1089int
1090queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1091{
1092	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1093
1094	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
 
 
1095		__queue_work(cpu, wq, work);
1096		ret = 1;
1097	}
 
 
1098	return ret;
1099}
1100EXPORT_SYMBOL_GPL(queue_work_on);
 
 
 
 
 
 
 
 
 
1101
1102static void delayed_work_timer_fn(unsigned long __data)
 
1103{
1104	struct delayed_work *dwork = (struct delayed_work *)__data;
1105	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106
1107	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
 
 
 
 
 
 
 
1108}
1109
1110/**
1111 * queue_delayed_work - queue work on a workqueue after delay
 
1112 * @wq: workqueue to use
1113 * @dwork: delayable work to queue
1114 * @delay: number of jiffies to wait before queueing
1115 *
1116 * Returns 0 if @work was already on a queue, non-zero otherwise.
 
 
1117 */
1118int queue_delayed_work(struct workqueue_struct *wq,
1119			struct delayed_work *dwork, unsigned long delay)
1120{
1121	if (delay == 0)
1122		return queue_work(wq, &dwork->work);
 
 
 
 
1123
1124	return queue_delayed_work_on(-1, wq, dwork, delay);
 
 
 
 
 
 
1125}
1126EXPORT_SYMBOL_GPL(queue_delayed_work);
1127
1128/**
1129 * queue_delayed_work_on - queue work on specific CPU after delay
1130 * @cpu: CPU number to execute work on
1131 * @wq: workqueue to use
1132 * @dwork: work to queue
1133 * @delay: number of jiffies to wait before queueing
1134 *
1135 * Returns 0 if @work was already on a queue, non-zero otherwise.
 
 
 
 
 
 
 
 
 
1136 */
1137int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1138			struct delayed_work *dwork, unsigned long delay)
1139{
1140	int ret = 0;
1141	struct timer_list *timer = &dwork->timer;
1142	struct work_struct *work = &dwork->work;
1143
1144	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1145		unsigned int lcpu;
 
1146
1147		BUG_ON(timer_pending(timer));
1148		BUG_ON(!list_empty(&work->entry));
 
 
1149
1150		timer_stats_timer_set_start_info(&dwork->timer);
 
 
 
1151
1152		/*
1153		 * This stores cwq for the moment, for the timer_fn.
1154		 * Note that the work's gcwq is preserved to allow
1155		 * reentrance detection for delayed works.
1156		 */
1157		if (!(wq->flags & WQ_UNBOUND)) {
1158			struct global_cwq *gcwq = get_work_gcwq(work);
1159
1160			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1161				lcpu = gcwq->cpu;
1162			else
1163				lcpu = raw_smp_processor_id();
1164		} else
1165			lcpu = WORK_CPU_UNBOUND;
1166
1167		set_work_cwq(work, get_cwq(lcpu, wq), 0);
 
 
 
 
 
 
 
 
 
 
 
 
1168
1169		timer->expires = jiffies + delay;
1170		timer->data = (unsigned long)dwork;
1171		timer->function = delayed_work_timer_fn;
 
 
1172
1173		if (unlikely(cpu >= 0))
1174			add_timer_on(timer, cpu);
1175		else
1176			add_timer(timer);
1177		ret = 1;
1178	}
1179	return ret;
1180}
1181EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1182
1183/**
1184 * worker_enter_idle - enter idle state
1185 * @worker: worker which is entering idle state
1186 *
1187 * @worker is entering idle state.  Update stats and idle timer if
1188 * necessary.
1189 *
1190 * LOCKING:
1191 * spin_lock_irq(gcwq->lock).
1192 */
1193static void worker_enter_idle(struct worker *worker)
1194{
1195	struct global_cwq *gcwq = worker->gcwq;
1196
1197	BUG_ON(worker->flags & WORKER_IDLE);
1198	BUG_ON(!list_empty(&worker->entry) &&
1199	       (worker->hentry.next || worker->hentry.pprev));
 
1200
1201	/* can't use worker_set_flags(), also called from start_worker() */
1202	worker->flags |= WORKER_IDLE;
1203	gcwq->nr_idle++;
1204	worker->last_active = jiffies;
1205
1206	/* idle_list is LIFO */
1207	list_add(&worker->entry, &gcwq->idle_list);
 
 
 
1208
1209	if (likely(!(worker->flags & WORKER_ROGUE))) {
1210		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1211			mod_timer(&gcwq->idle_timer,
1212				  jiffies + IDLE_WORKER_TIMEOUT);
1213	} else
1214		wake_up_all(&gcwq->trustee_wait);
1215
1216	/* sanity check nr_running */
1217	WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1218		     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1219}
1220
1221/**
1222 * worker_leave_idle - leave idle state
1223 * @worker: worker which is leaving idle state
1224 *
1225 * @worker is leaving idle state.  Update stats.
1226 *
1227 * LOCKING:
1228 * spin_lock_irq(gcwq->lock).
1229 */
1230static void worker_leave_idle(struct worker *worker)
1231{
1232	struct global_cwq *gcwq = worker->gcwq;
1233
1234	BUG_ON(!(worker->flags & WORKER_IDLE));
 
1235	worker_clr_flags(worker, WORKER_IDLE);
1236	gcwq->nr_idle--;
1237	list_del_init(&worker->entry);
1238}
1239
1240/**
1241 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1242 * @worker: self
1243 *
1244 * Works which are scheduled while the cpu is online must at least be
1245 * scheduled to a worker which is bound to the cpu so that if they are
1246 * flushed from cpu callbacks while cpu is going down, they are
1247 * guaranteed to execute on the cpu.
1248 *
1249 * This function is to be used by rogue workers and rescuers to bind
1250 * themselves to the target cpu and may race with cpu going down or
1251 * coming online.  kthread_bind() can't be used because it may put the
1252 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1253 * verbatim as it's best effort and blocking and gcwq may be
1254 * [dis]associated in the meantime.
1255 *
1256 * This function tries set_cpus_allowed() and locks gcwq and verifies
1257 * the binding against GCWQ_DISASSOCIATED which is set during
1258 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1259 * idle state or fetches works without dropping lock, it can guarantee
1260 * the scheduling requirement described in the first paragraph.
1261 *
1262 * CONTEXT:
1263 * Might sleep.  Called without any lock but returns with gcwq->lock
1264 * held.
1265 *
1266 * RETURNS:
1267 * %true if the associated gcwq is online (@worker is successfully
1268 * bound), %false if offline.
1269 */
1270static bool worker_maybe_bind_and_lock(struct worker *worker)
1271__acquires(&gcwq->lock)
1272{
1273	struct global_cwq *gcwq = worker->gcwq;
1274	struct task_struct *task = worker->task;
1275
1276	while (true) {
1277		/*
1278		 * The following call may fail, succeed or succeed
1279		 * without actually migrating the task to the cpu if
1280		 * it races with cpu hotunplug operation.  Verify
1281		 * against GCWQ_DISASSOCIATED.
1282		 */
1283		if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1284			set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1285
1286		spin_lock_irq(&gcwq->lock);
1287		if (gcwq->flags & GCWQ_DISASSOCIATED)
1288			return false;
1289		if (task_cpu(task) == gcwq->cpu &&
1290		    cpumask_equal(&current->cpus_allowed,
1291				  get_cpu_mask(gcwq->cpu)))
1292			return true;
1293		spin_unlock_irq(&gcwq->lock);
1294
1295		/*
1296		 * We've raced with CPU hot[un]plug.  Give it a breather
1297		 * and retry migration.  cond_resched() is required here;
1298		 * otherwise, we might deadlock against cpu_stop trying to
1299		 * bring down the CPU on non-preemptive kernel.
1300		 */
1301		cpu_relax();
1302		cond_resched();
1303	}
 
1304}
1305
1306/*
1307 * Function for worker->rebind_work used to rebind rogue busy workers
1308 * to the associated cpu which is coming back online.  This is
1309 * scheduled by cpu up but can race with other cpu hotplug operations
1310 * and may be executed twice without intervening cpu down.
 
 
 
1311 */
1312static void worker_rebind_fn(struct work_struct *work)
 
1313{
1314	struct worker *worker = container_of(work, struct worker, rebind_work);
1315	struct global_cwq *gcwq = worker->gcwq;
1316
1317	if (worker_maybe_bind_and_lock(worker))
1318		worker_clr_flags(worker, WORKER_REBIND);
 
 
 
1319
1320	spin_unlock_irq(&gcwq->lock);
 
 
 
 
 
 
 
 
 
 
 
1321}
1322
1323static struct worker *alloc_worker(void)
 
 
 
 
 
 
 
 
1324{
1325	struct worker *worker;
 
 
 
 
 
 
 
 
 
 
 
 
 
1326
1327	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1328	if (worker) {
1329		INIT_LIST_HEAD(&worker->entry);
1330		INIT_LIST_HEAD(&worker->scheduled);
1331		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1332		/* on creation a worker is in !idle && prep state */
1333		worker->flags = WORKER_PREP;
1334	}
1335	return worker;
1336}
1337
1338/**
1339 * create_worker - create a new workqueue worker
1340 * @gcwq: gcwq the new worker will belong to
1341 * @bind: whether to set affinity to @cpu or not
1342 *
1343 * Create a new worker which is bound to @gcwq.  The returned worker
1344 * can be started by calling start_worker() or destroyed using
1345 * destroy_worker().
1346 *
1347 * CONTEXT:
1348 * Might sleep.  Does GFP_KERNEL allocations.
1349 *
1350 * RETURNS:
1351 * Pointer to the newly created worker.
1352 */
1353static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1354{
1355	bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1356	struct worker *worker = NULL;
1357	int id = -1;
 
1358
1359	spin_lock_irq(&gcwq->lock);
1360	while (ida_get_new(&gcwq->worker_ida, &id)) {
1361		spin_unlock_irq(&gcwq->lock);
1362		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1363			goto fail;
1364		spin_lock_irq(&gcwq->lock);
1365	}
1366	spin_unlock_irq(&gcwq->lock);
1367
1368	worker = alloc_worker();
1369	if (!worker)
1370		goto fail;
1371
1372	worker->gcwq = gcwq;
1373	worker->id = id;
1374
1375	if (!on_unbound_cpu)
1376		worker->task = kthread_create_on_node(worker_thread,
1377						      worker,
1378						      cpu_to_node(gcwq->cpu),
1379						      "kworker/%u:%d", gcwq->cpu, id);
1380	else
1381		worker->task = kthread_create(worker_thread, worker,
1382					      "kworker/u:%d", id);
 
 
1383	if (IS_ERR(worker->task))
1384		goto fail;
1385
1386	/*
1387	 * A rogue worker will become a regular one if CPU comes
1388	 * online later on.  Make sure every worker has
1389	 * PF_THREAD_BOUND set.
1390	 */
1391	if (bind && !on_unbound_cpu)
1392		kthread_bind(worker->task, gcwq->cpu);
1393	else {
1394		worker->task->flags |= PF_THREAD_BOUND;
1395		if (on_unbound_cpu)
1396			worker->flags |= WORKER_UNBOUND;
1397	}
1398
1399	return worker;
 
1400fail:
1401	if (id >= 0) {
1402		spin_lock_irq(&gcwq->lock);
1403		ida_remove(&gcwq->worker_ida, id);
1404		spin_unlock_irq(&gcwq->lock);
1405	}
1406	kfree(worker);
1407	return NULL;
1408}
1409
1410/**
1411 * start_worker - start a newly created worker
1412 * @worker: worker to start
1413 *
1414 * Make the gcwq aware of @worker and start it.
1415 *
1416 * CONTEXT:
1417 * spin_lock_irq(gcwq->lock).
1418 */
1419static void start_worker(struct worker *worker)
1420{
1421	worker->flags |= WORKER_STARTED;
1422	worker->gcwq->nr_workers++;
1423	worker_enter_idle(worker);
1424	wake_up_process(worker->task);
1425}
1426
1427/**
1428 * destroy_worker - destroy a workqueue worker
1429 * @worker: worker to be destroyed
1430 *
1431 * Destroy @worker and adjust @gcwq stats accordingly.
 
1432 *
1433 * CONTEXT:
1434 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1435 */
1436static void destroy_worker(struct worker *worker)
1437{
1438	struct global_cwq *gcwq = worker->gcwq;
1439	int id = worker->id;
 
1440
1441	/* sanity check frenzy */
1442	BUG_ON(worker->current_work);
1443	BUG_ON(!list_empty(&worker->scheduled));
 
 
1444
1445	if (worker->flags & WORKER_STARTED)
1446		gcwq->nr_workers--;
1447	if (worker->flags & WORKER_IDLE)
1448		gcwq->nr_idle--;
1449
1450	list_del_init(&worker->entry);
1451	worker->flags |= WORKER_DIE;
1452
1453	spin_unlock_irq(&gcwq->lock);
1454
1455	kthread_stop(worker->task);
1456	kfree(worker);
1457
1458	spin_lock_irq(&gcwq->lock);
1459	ida_remove(&gcwq->worker_ida, id);
1460}
1461
1462static void idle_worker_timeout(unsigned long __gcwq)
1463{
1464	struct global_cwq *gcwq = (void *)__gcwq;
1465
1466	spin_lock_irq(&gcwq->lock);
1467
1468	if (too_many_workers(gcwq)) {
1469		struct worker *worker;
1470		unsigned long expires;
1471
1472		/* idle_list is kept in LIFO order, check the last one */
1473		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1474		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1475
1476		if (time_before(jiffies, expires))
1477			mod_timer(&gcwq->idle_timer, expires);
1478		else {
1479			/* it's been idle for too long, wake up manager */
1480			gcwq->flags |= GCWQ_MANAGE_WORKERS;
1481			wake_up_worker(gcwq);
1482		}
 
 
1483	}
1484
1485	spin_unlock_irq(&gcwq->lock);
1486}
1487
1488static bool send_mayday(struct work_struct *work)
1489{
1490	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1491	struct workqueue_struct *wq = cwq->wq;
1492	unsigned int cpu;
 
1493
1494	if (!(wq->flags & WQ_RESCUER))
1495		return false;
1496
1497	/* mayday mayday mayday */
1498	cpu = cwq->gcwq->cpu;
1499	/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1500	if (cpu == WORK_CPU_UNBOUND)
1501		cpu = 0;
1502	if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
 
 
 
1503		wake_up_process(wq->rescuer->task);
1504	return true;
1505}
1506
1507static void gcwq_mayday_timeout(unsigned long __gcwq)
1508{
1509	struct global_cwq *gcwq = (void *)__gcwq;
1510	struct work_struct *work;
1511
1512	spin_lock_irq(&gcwq->lock);
 
1513
1514	if (need_to_create_worker(gcwq)) {
1515		/*
1516		 * We've been trying to create a new worker but
1517		 * haven't been successful.  We might be hitting an
1518		 * allocation deadlock.  Send distress signals to
1519		 * rescuers.
1520		 */
1521		list_for_each_entry(work, &gcwq->worklist, entry)
1522			send_mayday(work);
1523	}
1524
1525	spin_unlock_irq(&gcwq->lock);
 
1526
1527	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1528}
1529
1530/**
1531 * maybe_create_worker - create a new worker if necessary
1532 * @gcwq: gcwq to create a new worker for
1533 *
1534 * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1535 * have at least one idle worker on return from this function.  If
1536 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1537 * sent to all rescuers with works scheduled on @gcwq to resolve
1538 * possible allocation deadlock.
1539 *
1540 * On return, need_to_create_worker() is guaranteed to be false and
1541 * may_start_working() true.
1542 *
1543 * LOCKING:
1544 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1545 * multiple times.  Does GFP_KERNEL allocations.  Called only from
1546 * manager.
1547 *
1548 * RETURNS:
1549 * false if no action was taken and gcwq->lock stayed locked, true
1550 * otherwise.
1551 */
1552static bool maybe_create_worker(struct global_cwq *gcwq)
1553__releases(&gcwq->lock)
1554__acquires(&gcwq->lock)
1555{
1556	if (!need_to_create_worker(gcwq))
1557		return false;
1558restart:
1559	spin_unlock_irq(&gcwq->lock);
1560
1561	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1562	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1563
1564	while (true) {
1565		struct worker *worker;
1566
1567		worker = create_worker(gcwq, true);
1568		if (worker) {
1569			del_timer_sync(&gcwq->mayday_timer);
1570			spin_lock_irq(&gcwq->lock);
1571			start_worker(worker);
1572			BUG_ON(need_to_create_worker(gcwq));
1573			return true;
1574		}
1575
1576		if (!need_to_create_worker(gcwq))
1577			break;
1578
1579		__set_current_state(TASK_INTERRUPTIBLE);
1580		schedule_timeout(CREATE_COOLDOWN);
1581
1582		if (!need_to_create_worker(gcwq))
1583			break;
1584	}
1585
1586	del_timer_sync(&gcwq->mayday_timer);
1587	spin_lock_irq(&gcwq->lock);
1588	if (need_to_create_worker(gcwq))
 
 
 
 
 
1589		goto restart;
1590	return true;
1591}
1592
1593/**
1594 * maybe_destroy_worker - destroy workers which have been idle for a while
1595 * @gcwq: gcwq to destroy workers for
1596 *
1597 * Destroy @gcwq workers which have been idle for longer than
1598 * IDLE_WORKER_TIMEOUT.
1599 *
1600 * LOCKING:
1601 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1602 * multiple times.  Called only from manager.
1603 *
1604 * RETURNS:
1605 * false if no action was taken and gcwq->lock stayed locked, true
1606 * otherwise.
1607 */
1608static bool maybe_destroy_workers(struct global_cwq *gcwq)
1609{
1610	bool ret = false;
1611
1612	while (too_many_workers(gcwq)) {
1613		struct worker *worker;
1614		unsigned long expires;
1615
1616		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1617		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1618
1619		if (time_before(jiffies, expires)) {
1620			mod_timer(&gcwq->idle_timer, expires);
1621			break;
1622		}
1623
1624		destroy_worker(worker);
1625		ret = true;
1626	}
1627
1628	return ret;
1629}
1630
1631/**
1632 * manage_workers - manage worker pool
1633 * @worker: self
1634 *
1635 * Assume the manager role and manage gcwq worker pool @worker belongs
1636 * to.  At any given time, there can be only zero or one manager per
1637 * gcwq.  The exclusion is handled automatically by this function.
1638 *
1639 * The caller can safely start processing works on false return.  On
1640 * true return, it's guaranteed that need_to_create_worker() is false
1641 * and may_start_working() is true.
1642 *
1643 * CONTEXT:
1644 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1645 * multiple times.  Does GFP_KERNEL allocations.
1646 *
1647 * RETURNS:
1648 * false if no action was taken and gcwq->lock stayed locked, true if
1649 * some action was taken.
 
 
1650 */
1651static bool manage_workers(struct worker *worker)
1652{
1653	struct global_cwq *gcwq = worker->gcwq;
1654	bool ret = false;
1655
1656	if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1657		return ret;
1658
1659	gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1660	gcwq->flags |= GCWQ_MANAGING_WORKERS;
1661
1662	/*
1663	 * Destroy and then create so that may_start_working() is true
1664	 * on return.
1665	 */
1666	ret |= maybe_destroy_workers(gcwq);
1667	ret |= maybe_create_worker(gcwq);
1668
1669	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1670
1671	/*
1672	 * The trustee might be waiting to take over the manager
1673	 * position, tell it we're done.
1674	 */
1675	if (unlikely(gcwq->trustee))
1676		wake_up_all(&gcwq->trustee_wait);
1677
1678	return ret;
1679}
1680
1681/**
1682 * move_linked_works - move linked works to a list
1683 * @work: start of series of works to be scheduled
1684 * @head: target list to append @work to
1685 * @nextp: out paramter for nested worklist walking
1686 *
1687 * Schedule linked works starting from @work to @head.  Work series to
1688 * be scheduled starts at @work and includes any consecutive work with
1689 * WORK_STRUCT_LINKED set in its predecessor.
1690 *
1691 * If @nextp is not NULL, it's updated to point to the next work of
1692 * the last scheduled work.  This allows move_linked_works() to be
1693 * nested inside outer list_for_each_entry_safe().
1694 *
1695 * CONTEXT:
1696 * spin_lock_irq(gcwq->lock).
1697 */
1698static void move_linked_works(struct work_struct *work, struct list_head *head,
1699			      struct work_struct **nextp)
1700{
1701	struct work_struct *n;
1702
1703	/*
1704	 * Linked worklist will always end before the end of the list,
1705	 * use NULL for list head.
1706	 */
1707	list_for_each_entry_safe_from(work, n, NULL, entry) {
1708		list_move_tail(&work->entry, head);
1709		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1710			break;
1711	}
1712
1713	/*
1714	 * If we're already inside safe list traversal and have moved
1715	 * multiple works to the scheduled queue, the next position
1716	 * needs to be updated.
1717	 */
1718	if (nextp)
1719		*nextp = n;
1720}
1721
1722static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1723{
1724	struct work_struct *work = list_first_entry(&cwq->delayed_works,
1725						    struct work_struct, entry);
1726	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1727
1728	trace_workqueue_activate_work(work);
1729	move_linked_works(work, pos, NULL);
1730	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1731	cwq->nr_active++;
1732}
1733
1734/**
1735 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1736 * @cwq: cwq of interest
1737 * @color: color of work which left the queue
1738 * @delayed: for a delayed work
1739 *
1740 * A work either has completed or is removed from pending queue,
1741 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1742 *
1743 * CONTEXT:
1744 * spin_lock_irq(gcwq->lock).
1745 */
1746static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1747				 bool delayed)
1748{
1749	/* ignore uncolored works */
1750	if (color == WORK_NO_COLOR)
1751		return;
1752
1753	cwq->nr_in_flight[color]--;
1754
1755	if (!delayed) {
1756		cwq->nr_active--;
1757		if (!list_empty(&cwq->delayed_works)) {
1758			/* one down, submit a delayed one */
1759			if (cwq->nr_active < cwq->max_active)
1760				cwq_activate_first_delayed(cwq);
1761		}
1762	}
1763
1764	/* is flush in progress and are we at the flushing tip? */
1765	if (likely(cwq->flush_color != color))
1766		return;
1767
1768	/* are there still in-flight works? */
1769	if (cwq->nr_in_flight[color])
1770		return;
1771
1772	/* this cwq is done, clear flush_color */
1773	cwq->flush_color = -1;
1774
1775	/*
1776	 * If this was the last cwq, wake up the first flusher.  It
1777	 * will handle the rest.
1778	 */
1779	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1780		complete(&cwq->wq->first_flusher->done);
1781}
1782
1783/**
1784 * process_one_work - process single work
1785 * @worker: self
1786 * @work: work to process
1787 *
1788 * Process @work.  This function contains all the logics necessary to
1789 * process a single work including synchronization against and
1790 * interaction with other workers on the same cpu, queueing and
1791 * flushing.  As long as context requirement is met, any worker can
1792 * call this function to process a work.
1793 *
1794 * CONTEXT:
1795 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1796 */
1797static void process_one_work(struct worker *worker, struct work_struct *work)
1798__releases(&gcwq->lock)
1799__acquires(&gcwq->lock)
1800{
1801	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1802	struct global_cwq *gcwq = cwq->gcwq;
1803	struct hlist_head *bwh = busy_worker_head(gcwq, work);
1804	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1805	work_func_t f = work->func;
1806	int work_color;
1807	struct worker *collision;
1808#ifdef CONFIG_LOCKDEP
1809	/*
1810	 * It is permissible to free the struct work_struct from
1811	 * inside the function that is called from it, this we need to
1812	 * take into account for lockdep too.  To avoid bogus "held
1813	 * lock freed" warnings as well as problems when looking into
1814	 * work->lockdep_map, make a copy and use that here.
1815	 */
1816	struct lockdep_map lockdep_map = work->lockdep_map;
 
 
1817#endif
 
 
 
 
1818	/*
1819	 * A single work shouldn't be executed concurrently by
1820	 * multiple workers on a single cpu.  Check whether anyone is
1821	 * already processing the work.  If so, defer the work to the
1822	 * currently executing one.
1823	 */
1824	collision = __find_worker_executing_work(gcwq, bwh, work);
1825	if (unlikely(collision)) {
1826		move_linked_works(work, &collision->scheduled, NULL);
1827		return;
1828	}
1829
1830	/* claim and process */
1831	debug_work_deactivate(work);
1832	hlist_add_head(&worker->hentry, bwh);
1833	worker->current_work = work;
1834	worker->current_cwq = cwq;
 
1835	work_color = get_work_color(work);
1836
1837	/* record the current cpu number in the work data and dequeue */
1838	set_work_cpu(work, gcwq->cpu);
 
 
 
 
1839	list_del_init(&work->entry);
1840
1841	/*
1842	 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1843	 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
 
 
1844	 */
1845	if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1846		struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1847						struct work_struct, entry);
1848
1849		if (!list_empty(&gcwq->worklist) &&
1850		    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1851			wake_up_worker(gcwq);
1852		else
1853			gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1854	}
 
 
 
1855
1856	/*
1857	 * CPU intensive works don't participate in concurrency
1858	 * management.  They're the scheduler's responsibility.
 
 
1859	 */
1860	if (unlikely(cpu_intensive))
1861		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1862
1863	spin_unlock_irq(&gcwq->lock);
1864
1865	work_clear_pending(work);
1866	lock_map_acquire_read(&cwq->wq->lockdep_map);
1867	lock_map_acquire(&lockdep_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1868	trace_workqueue_execute_start(work);
1869	f(work);
1870	/*
1871	 * While we must be careful to not use "work" after this, the trace
1872	 * point will only record its address.
1873	 */
1874	trace_workqueue_execute_end(work);
1875	lock_map_release(&lockdep_map);
1876	lock_map_release(&cwq->wq->lockdep_map);
1877
1878	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1879		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1880		       "%s/0x%08x/%d\n",
1881		       current->comm, preempt_count(), task_pid_nr(current));
1882		printk(KERN_ERR "    last function: ");
1883		print_symbol("%s\n", (unsigned long)f);
1884		debug_show_held_locks(current);
1885		dump_stack();
1886	}
1887
1888	spin_lock_irq(&gcwq->lock);
 
 
 
 
 
 
 
 
 
 
1889
1890	/* clear cpu intensive status */
1891	if (unlikely(cpu_intensive))
1892		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1893
 
 
 
1894	/* we're done with it, release */
1895	hlist_del_init(&worker->hentry);
1896	worker->current_work = NULL;
1897	worker->current_cwq = NULL;
1898	cwq_dec_nr_in_flight(cwq, work_color, false);
 
1899}
1900
1901/**
1902 * process_scheduled_works - process scheduled works
1903 * @worker: self
1904 *
1905 * Process all scheduled works.  Please note that the scheduled list
1906 * may change while processing a work, so this function repeatedly
1907 * fetches a work from the top and executes it.
1908 *
1909 * CONTEXT:
1910 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1911 * multiple times.
1912 */
1913static void process_scheduled_works(struct worker *worker)
1914{
1915	while (!list_empty(&worker->scheduled)) {
1916		struct work_struct *work = list_first_entry(&worker->scheduled,
1917						struct work_struct, entry);
1918		process_one_work(worker, work);
1919	}
1920}
1921
 
 
 
 
 
 
 
 
 
 
1922/**
1923 * worker_thread - the worker thread function
1924 * @__worker: self
1925 *
1926 * The gcwq worker thread function.  There's a single dynamic pool of
1927 * these per each cpu.  These workers process all works regardless of
1928 * their specific target workqueue.  The only exception is works which
1929 * belong to workqueues with a rescuer which will be explained in
1930 * rescuer_thread().
 
 
1931 */
1932static int worker_thread(void *__worker)
1933{
1934	struct worker *worker = __worker;
1935	struct global_cwq *gcwq = worker->gcwq;
1936
1937	/* tell the scheduler that this is a workqueue worker */
1938	worker->task->flags |= PF_WQ_WORKER;
1939woke_up:
1940	spin_lock_irq(&gcwq->lock);
1941
1942	/* DIE can be set only while we're idle, checking here is enough */
1943	if (worker->flags & WORKER_DIE) {
1944		spin_unlock_irq(&gcwq->lock);
1945		worker->task->flags &= ~PF_WQ_WORKER;
 
 
 
 
 
 
1946		return 0;
1947	}
1948
1949	worker_leave_idle(worker);
1950recheck:
1951	/* no more worker necessary? */
1952	if (!need_more_worker(gcwq))
1953		goto sleep;
1954
1955	/* do we need to manage? */
1956	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1957		goto recheck;
1958
1959	/*
1960	 * ->scheduled list can only be filled while a worker is
1961	 * preparing to process a work or actually processing it.
1962	 * Make sure nobody diddled with it while I was sleeping.
1963	 */
1964	BUG_ON(!list_empty(&worker->scheduled));
1965
1966	/*
1967	 * When control reaches this point, we're guaranteed to have
1968	 * at least one idle worker or that someone else has already
1969	 * assumed the manager role.
 
 
1970	 */
1971	worker_clr_flags(worker, WORKER_PREP);
1972
1973	do {
1974		struct work_struct *work =
1975			list_first_entry(&gcwq->worklist,
1976					 struct work_struct, entry);
1977
 
 
1978		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1979			/* optimization path, not strictly necessary */
1980			process_one_work(worker, work);
1981			if (unlikely(!list_empty(&worker->scheduled)))
1982				process_scheduled_works(worker);
1983		} else {
1984			move_linked_works(work, &worker->scheduled, NULL);
1985			process_scheduled_works(worker);
1986		}
1987	} while (keep_working(gcwq));
1988
1989	worker_set_flags(worker, WORKER_PREP, false);
1990sleep:
1991	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1992		goto recheck;
1993
1994	/*
1995	 * gcwq->lock is held and there's no work to process and no
1996	 * need to manage, sleep.  Workers are woken up only while
1997	 * holding gcwq->lock or from local cpu, so setting the
1998	 * current state before releasing gcwq->lock is enough to
1999	 * prevent losing any event.
2000	 */
2001	worker_enter_idle(worker);
2002	__set_current_state(TASK_INTERRUPTIBLE);
2003	spin_unlock_irq(&gcwq->lock);
2004	schedule();
2005	goto woke_up;
2006}
2007
2008/**
2009 * rescuer_thread - the rescuer thread function
2010 * @__wq: the associated workqueue
2011 *
2012 * Workqueue rescuer thread function.  There's one rescuer for each
2013 * workqueue which has WQ_RESCUER set.
2014 *
2015 * Regular work processing on a gcwq may block trying to create a new
2016 * worker which uses GFP_KERNEL allocation which has slight chance of
2017 * developing into deadlock if some works currently on the same queue
2018 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2019 * the problem rescuer solves.
2020 *
2021 * When such condition is possible, the gcwq summons rescuers of all
2022 * workqueues which have works queued on the gcwq and let them process
2023 * those works so that forward progress can be guaranteed.
2024 *
2025 * This should happen rarely.
 
 
2026 */
2027static int rescuer_thread(void *__wq)
2028{
2029	struct workqueue_struct *wq = __wq;
2030	struct worker *rescuer = wq->rescuer;
2031	struct list_head *scheduled = &rescuer->scheduled;
2032	bool is_unbound = wq->flags & WQ_UNBOUND;
2033	unsigned int cpu;
2034
2035	set_user_nice(current, RESCUER_NICE_LEVEL);
 
 
 
 
 
 
2036repeat:
2037	set_current_state(TASK_INTERRUPTIBLE);
2038
2039	if (kthread_should_stop())
2040		return 0;
2041
2042	/*
2043	 * See whether any cpu is asking for help.  Unbounded
2044	 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
 
 
 
 
2045	 */
2046	for_each_mayday_cpu(cpu, wq->mayday_mask) {
2047		unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2048		struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2049		struct global_cwq *gcwq = cwq->gcwq;
 
 
 
 
 
2050		struct work_struct *work, *n;
 
2051
2052		__set_current_state(TASK_RUNNING);
2053		mayday_clear_cpu(cpu, wq->mayday_mask);
 
 
2054
2055		/* migrate to the target cpu if possible */
2056		rescuer->gcwq = gcwq;
2057		worker_maybe_bind_and_lock(rescuer);
2058
2059		/*
2060		 * Slurp in all works issued via this workqueue and
2061		 * process'em.
2062		 */
2063		BUG_ON(!list_empty(&rescuer->scheduled));
2064		list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2065			if (get_work_cwq(work) == cwq)
 
 
2066				move_linked_works(work, scheduled, &n);
 
 
 
2067
2068		process_scheduled_works(rescuer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2069
2070		/*
2071		 * Leave this gcwq.  If keep_working() is %true, notify a
 
 
 
 
 
 
2072		 * regular worker; otherwise, we end up with 0 concurrency
2073		 * and stalling the execution.
2074		 */
2075		if (keep_working(gcwq))
2076			wake_up_worker(gcwq);
 
 
2077
2078		spin_unlock_irq(&gcwq->lock);
 
 
2079	}
2080
 
 
 
 
 
 
 
 
 
 
2081	schedule();
2082	goto repeat;
2083}
2084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2085struct wq_barrier {
2086	struct work_struct	work;
2087	struct completion	done;
 
2088};
2089
2090static void wq_barrier_func(struct work_struct *work)
2091{
2092	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2093	complete(&barr->done);
2094}
2095
2096/**
2097 * insert_wq_barrier - insert a barrier work
2098 * @cwq: cwq to insert barrier into
2099 * @barr: wq_barrier to insert
2100 * @target: target work to attach @barr to
2101 * @worker: worker currently executing @target, NULL if @target is not executing
2102 *
2103 * @barr is linked to @target such that @barr is completed only after
2104 * @target finishes execution.  Please note that the ordering
2105 * guarantee is observed only with respect to @target and on the local
2106 * cpu.
2107 *
2108 * Currently, a queued barrier can't be canceled.  This is because
2109 * try_to_grab_pending() can't determine whether the work to be
2110 * grabbed is at the head of the queue and thus can't clear LINKED
2111 * flag of the previous work while there must be a valid next work
2112 * after a work with LINKED flag set.
2113 *
2114 * Note that when @worker is non-NULL, @target may be modified
2115 * underneath us, so we can't reliably determine cwq from @target.
2116 *
2117 * CONTEXT:
2118 * spin_lock_irq(gcwq->lock).
2119 */
2120static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2121			      struct wq_barrier *barr,
2122			      struct work_struct *target, struct worker *worker)
2123{
2124	struct list_head *head;
2125	unsigned int linked = 0;
2126
2127	/*
2128	 * debugobject calls are safe here even with gcwq->lock locked
2129	 * as we know for sure that this will not trigger any of the
2130	 * checks and call back into the fixup functions where we
2131	 * might deadlock.
2132	 */
2133	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2134	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2135	init_completion(&barr->done);
 
 
 
2136
2137	/*
2138	 * If @target is currently being executed, schedule the
2139	 * barrier to the worker; otherwise, put it after @target.
2140	 */
2141	if (worker)
2142		head = worker->scheduled.next;
2143	else {
2144		unsigned long *bits = work_data_bits(target);
2145
2146		head = target->entry.next;
2147		/* there can already be other linked works, inherit and set */
2148		linked = *bits & WORK_STRUCT_LINKED;
2149		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2150	}
2151
2152	debug_work_activate(&barr->work);
2153	insert_work(cwq, &barr->work, head,
2154		    work_color_to_flags(WORK_NO_COLOR) | linked);
2155}
2156
2157/**
2158 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2159 * @wq: workqueue being flushed
2160 * @flush_color: new flush color, < 0 for no-op
2161 * @work_color: new work color, < 0 for no-op
2162 *
2163 * Prepare cwqs for workqueue flushing.
2164 *
2165 * If @flush_color is non-negative, flush_color on all cwqs should be
2166 * -1.  If no cwq has in-flight commands at the specified color, all
2167 * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
2168 * has in flight commands, its cwq->flush_color is set to
2169 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2170 * wakeup logic is armed and %true is returned.
2171 *
2172 * The caller should have initialized @wq->first_flusher prior to
2173 * calling this function with non-negative @flush_color.  If
2174 * @flush_color is negative, no flush color update is done and %false
2175 * is returned.
2176 *
2177 * If @work_color is non-negative, all cwqs should have the same
2178 * work_color which is previous to @work_color and all will be
2179 * advanced to @work_color.
2180 *
2181 * CONTEXT:
2182 * mutex_lock(wq->flush_mutex).
2183 *
2184 * RETURNS:
2185 * %true if @flush_color >= 0 and there's something to flush.  %false
2186 * otherwise.
2187 */
2188static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2189				      int flush_color, int work_color)
2190{
2191	bool wait = false;
2192	unsigned int cpu;
2193
2194	if (flush_color >= 0) {
2195		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2196		atomic_set(&wq->nr_cwqs_to_flush, 1);
2197	}
2198
2199	for_each_cwq_cpu(cpu, wq) {
2200		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2201		struct global_cwq *gcwq = cwq->gcwq;
2202
2203		spin_lock_irq(&gcwq->lock);
2204
2205		if (flush_color >= 0) {
2206			BUG_ON(cwq->flush_color != -1);
2207
2208			if (cwq->nr_in_flight[flush_color]) {
2209				cwq->flush_color = flush_color;
2210				atomic_inc(&wq->nr_cwqs_to_flush);
2211				wait = true;
2212			}
2213		}
2214
2215		if (work_color >= 0) {
2216			BUG_ON(work_color != work_next_color(cwq->work_color));
2217			cwq->work_color = work_color;
2218		}
2219
2220		spin_unlock_irq(&gcwq->lock);
2221	}
2222
2223	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2224		complete(&wq->first_flusher->done);
2225
2226	return wait;
2227}
2228
2229/**
2230 * flush_workqueue - ensure that any scheduled work has run to completion.
2231 * @wq: workqueue to flush
2232 *
2233 * Forces execution of the workqueue and blocks until its completion.
2234 * This is typically used in driver shutdown handlers.
2235 *
2236 * We sleep until all works which were queued on entry have been handled,
2237 * but we are not livelocked by new incoming ones.
2238 */
2239void flush_workqueue(struct workqueue_struct *wq)
2240{
2241	struct wq_flusher this_flusher = {
2242		.list = LIST_HEAD_INIT(this_flusher.list),
2243		.flush_color = -1,
2244		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2245	};
2246	int next_color;
2247
 
 
 
2248	lock_map_acquire(&wq->lockdep_map);
2249	lock_map_release(&wq->lockdep_map);
2250
2251	mutex_lock(&wq->flush_mutex);
2252
2253	/*
2254	 * Start-to-wait phase
2255	 */
2256	next_color = work_next_color(wq->work_color);
2257
2258	if (next_color != wq->flush_color) {
2259		/*
2260		 * Color space is not full.  The current work_color
2261		 * becomes our flush_color and work_color is advanced
2262		 * by one.
2263		 */
2264		BUG_ON(!list_empty(&wq->flusher_overflow));
2265		this_flusher.flush_color = wq->work_color;
2266		wq->work_color = next_color;
2267
2268		if (!wq->first_flusher) {
2269			/* no flush in progress, become the first flusher */
2270			BUG_ON(wq->flush_color != this_flusher.flush_color);
2271
2272			wq->first_flusher = &this_flusher;
2273
2274			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2275						       wq->work_color)) {
2276				/* nothing to flush, done */
2277				wq->flush_color = next_color;
2278				wq->first_flusher = NULL;
2279				goto out_unlock;
2280			}
2281		} else {
2282			/* wait in queue */
2283			BUG_ON(wq->flush_color == this_flusher.flush_color);
2284			list_add_tail(&this_flusher.list, &wq->flusher_queue);
2285			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2286		}
2287	} else {
2288		/*
2289		 * Oops, color space is full, wait on overflow queue.
2290		 * The next flush completion will assign us
2291		 * flush_color and transfer to flusher_queue.
2292		 */
2293		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2294	}
2295
2296	mutex_unlock(&wq->flush_mutex);
 
 
2297
2298	wait_for_completion(&this_flusher.done);
2299
2300	/*
2301	 * Wake-up-and-cascade phase
2302	 *
2303	 * First flushers are responsible for cascading flushes and
2304	 * handling overflow.  Non-first flushers can simply return.
2305	 */
2306	if (wq->first_flusher != &this_flusher)
2307		return;
2308
2309	mutex_lock(&wq->flush_mutex);
2310
2311	/* we might have raced, check again with mutex held */
2312	if (wq->first_flusher != &this_flusher)
2313		goto out_unlock;
2314
2315	wq->first_flusher = NULL;
2316
2317	BUG_ON(!list_empty(&this_flusher.list));
2318	BUG_ON(wq->flush_color != this_flusher.flush_color);
2319
2320	while (true) {
2321		struct wq_flusher *next, *tmp;
2322
2323		/* complete all the flushers sharing the current flush color */
2324		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2325			if (next->flush_color != wq->flush_color)
2326				break;
2327			list_del_init(&next->list);
2328			complete(&next->done);
2329		}
2330
2331		BUG_ON(!list_empty(&wq->flusher_overflow) &&
2332		       wq->flush_color != work_next_color(wq->work_color));
2333
2334		/* this flush_color is finished, advance by one */
2335		wq->flush_color = work_next_color(wq->flush_color);
2336
2337		/* one color has been freed, handle overflow queue */
2338		if (!list_empty(&wq->flusher_overflow)) {
2339			/*
2340			 * Assign the same color to all overflowed
2341			 * flushers, advance work_color and append to
2342			 * flusher_queue.  This is the start-to-wait
2343			 * phase for these overflowed flushers.
2344			 */
2345			list_for_each_entry(tmp, &wq->flusher_overflow, list)
2346				tmp->flush_color = wq->work_color;
2347
2348			wq->work_color = work_next_color(wq->work_color);
2349
2350			list_splice_tail_init(&wq->flusher_overflow,
2351					      &wq->flusher_queue);
2352			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2353		}
2354
2355		if (list_empty(&wq->flusher_queue)) {
2356			BUG_ON(wq->flush_color != wq->work_color);
2357			break;
2358		}
2359
2360		/*
2361		 * Need to flush more colors.  Make the next flusher
2362		 * the new first flusher and arm cwqs.
2363		 */
2364		BUG_ON(wq->flush_color == wq->work_color);
2365		BUG_ON(wq->flush_color != next->flush_color);
2366
2367		list_del_init(&next->list);
2368		wq->first_flusher = next;
2369
2370		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2371			break;
2372
2373		/*
2374		 * Meh... this color is already done, clear first
2375		 * flusher and repeat cascading.
2376		 */
2377		wq->first_flusher = NULL;
2378	}
2379
2380out_unlock:
2381	mutex_unlock(&wq->flush_mutex);
2382}
2383EXPORT_SYMBOL_GPL(flush_workqueue);
2384
2385/**
2386 * drain_workqueue - drain a workqueue
2387 * @wq: workqueue to drain
2388 *
2389 * Wait until the workqueue becomes empty.  While draining is in progress,
2390 * only chain queueing is allowed.  IOW, only currently pending or running
2391 * work items on @wq can queue further work items on it.  @wq is flushed
2392 * repeatedly until it becomes empty.  The number of flushing is detemined
2393 * by the depth of chaining and should be relatively short.  Whine if it
2394 * takes too long.
2395 */
2396void drain_workqueue(struct workqueue_struct *wq)
2397{
2398	unsigned int flush_cnt = 0;
2399	unsigned int cpu;
2400
2401	/*
2402	 * __queue_work() needs to test whether there are drainers, is much
2403	 * hotter than drain_workqueue() and already looks at @wq->flags.
2404	 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2405	 */
2406	spin_lock(&workqueue_lock);
2407	if (!wq->nr_drainers++)
2408		wq->flags |= WQ_DRAINING;
2409	spin_unlock(&workqueue_lock);
2410reflush:
2411	flush_workqueue(wq);
2412
2413	for_each_cwq_cpu(cpu, wq) {
2414		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
2415		bool drained;
2416
2417		spin_lock_irq(&cwq->gcwq->lock);
2418		drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2419		spin_unlock_irq(&cwq->gcwq->lock);
2420
2421		if (drained)
2422			continue;
2423
2424		if (++flush_cnt == 10 ||
2425		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2426			pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
2427				   wq->name, flush_cnt);
 
 
2428		goto reflush;
2429	}
2430
2431	spin_lock(&workqueue_lock);
2432	if (!--wq->nr_drainers)
2433		wq->flags &= ~WQ_DRAINING;
2434	spin_unlock(&workqueue_lock);
2435}
2436EXPORT_SYMBOL_GPL(drain_workqueue);
2437
2438static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2439			     bool wait_executing)
2440{
2441	struct worker *worker = NULL;
2442	struct global_cwq *gcwq;
2443	struct cpu_workqueue_struct *cwq;
2444
2445	might_sleep();
2446	gcwq = get_work_gcwq(work);
2447	if (!gcwq)
 
 
 
2448		return false;
 
2449
2450	spin_lock_irq(&gcwq->lock);
2451	if (!list_empty(&work->entry)) {
2452		/*
2453		 * See the comment near try_to_grab_pending()->smp_rmb().
2454		 * If it was re-queued to a different gcwq under us, we
2455		 * are not going to wait.
2456		 */
2457		smp_rmb();
2458		cwq = get_work_cwq(work);
2459		if (unlikely(!cwq || gcwq != cwq->gcwq))
2460			goto already_gone;
2461	} else if (wait_executing) {
2462		worker = find_worker_executing_work(gcwq, work);
2463		if (!worker)
2464			goto already_gone;
2465		cwq = worker->current_cwq;
2466	} else
2467		goto already_gone;
 
2468
2469	insert_wq_barrier(cwq, barr, work, worker);
2470	spin_unlock_irq(&gcwq->lock);
2471
2472	/*
2473	 * If @max_active is 1 or rescuer is in use, flushing another work
2474	 * item on the same workqueue may lead to deadlock.  Make sure the
2475	 * flusher is not running on the same workqueue by verifying write
2476	 * access.
 
 
 
2477	 */
2478	if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2479		lock_map_acquire(&cwq->wq->lockdep_map);
2480	else
2481		lock_map_acquire_read(&cwq->wq->lockdep_map);
2482	lock_map_release(&cwq->wq->lockdep_map);
2483
2484	return true;
2485already_gone:
2486	spin_unlock_irq(&gcwq->lock);
 
2487	return false;
2488}
2489
2490/**
2491 * flush_work - wait for a work to finish executing the last queueing instance
2492 * @work: the work to flush
2493 *
2494 * Wait until @work has finished execution.  This function considers
2495 * only the last queueing instance of @work.  If @work has been
2496 * enqueued across different CPUs on a non-reentrant workqueue or on
2497 * multiple workqueues, @work might still be executing on return on
2498 * some of the CPUs from earlier queueing.
2499 *
2500 * If @work was queued only on a non-reentrant, ordered or unbound
2501 * workqueue, @work is guaranteed to be idle on return if it hasn't
2502 * been requeued since flush started.
2503 *
2504 * RETURNS:
2505 * %true if flush_work() waited for the work to finish execution,
2506 * %false if it was already idle.
2507 */
2508bool flush_work(struct work_struct *work)
2509{
2510	struct wq_barrier barr;
2511
2512	if (start_flush_work(work, &barr, true)) {
2513		wait_for_completion(&barr.done);
2514		destroy_work_on_stack(&barr.work);
2515		return true;
2516	} else
2517		return false;
2518}
2519EXPORT_SYMBOL_GPL(flush_work);
2520
2521static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2522{
2523	struct wq_barrier barr;
2524	struct worker *worker;
2525
2526	spin_lock_irq(&gcwq->lock);
 
 
 
2527
2528	worker = find_worker_executing_work(gcwq, work);
2529	if (unlikely(worker))
2530		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2531
2532	spin_unlock_irq(&gcwq->lock);
2533
2534	if (unlikely(worker)) {
2535		wait_for_completion(&barr.done);
2536		destroy_work_on_stack(&barr.work);
2537		return true;
2538	} else
2539		return false;
2540}
2541
2542static bool wait_on_work(struct work_struct *work)
2543{
2544	bool ret = false;
2545	int cpu;
2546
2547	might_sleep();
2548
2549	lock_map_acquire(&work->lockdep_map);
2550	lock_map_release(&work->lockdep_map);
2551
2552	for_each_gcwq_cpu(cpu)
2553		ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2554	return ret;
2555}
2556
2557/**
2558 * flush_work_sync - wait until a work has finished execution
2559 * @work: the work to flush
2560 *
2561 * Wait until @work has finished execution.  On return, it's
2562 * guaranteed that all queueing instances of @work which happened
2563 * before this function is called are finished.  In other words, if
2564 * @work hasn't been requeued since this function was called, @work is
2565 * guaranteed to be idle on return.
2566 *
2567 * RETURNS:
2568 * %true if flush_work_sync() waited for the work to finish execution,
2569 * %false if it was already idle.
2570 */
2571bool flush_work_sync(struct work_struct *work)
2572{
2573	struct wq_barrier barr;
2574	bool pending, waited;
 
2575
2576	/* we'll wait for executions separately, queue barr only if pending */
2577	pending = start_flush_work(work, &barr, false);
2578
2579	/* wait for executions to finish */
2580	waited = wait_on_work(work);
2581
2582	/* wait for the pending one */
2583	if (pending) {
2584		wait_for_completion(&barr.done);
2585		destroy_work_on_stack(&barr.work);
2586	}
2587
2588	return pending || waited;
2589}
2590EXPORT_SYMBOL_GPL(flush_work_sync);
2591
2592/*
2593 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2594 * so this work can't be re-armed in any way.
2595 */
2596static int try_to_grab_pending(struct work_struct *work)
2597{
2598	struct global_cwq *gcwq;
2599	int ret = -1;
2600
2601	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2602		return 0;
2603
2604	/*
2605	 * The queueing is in progress, or it is already queued. Try to
2606	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2607	 */
2608	gcwq = get_work_gcwq(work);
2609	if (!gcwq)
2610		return ret;
2611
2612	spin_lock_irq(&gcwq->lock);
2613	if (!list_empty(&work->entry)) {
2614		/*
2615		 * This work is queued, but perhaps we locked the wrong gcwq.
2616		 * In that case we must see the new value after rmb(), see
2617		 * insert_work()->wmb().
2618		 */
2619		smp_rmb();
2620		if (gcwq == get_work_gcwq(work)) {
2621			debug_work_deactivate(work);
2622			list_del_init(&work->entry);
2623			cwq_dec_nr_in_flight(get_work_cwq(work),
2624				get_work_color(work),
2625				*work_data_bits(work) & WORK_STRUCT_DELAYED);
2626			ret = 1;
2627		}
2628	}
2629	spin_unlock_irq(&gcwq->lock);
2630
2631	return ret;
2632}
2633
2634static bool __cancel_work_timer(struct work_struct *work,
2635				struct timer_list* timer)
2636{
 
 
2637	int ret;
2638
2639	do {
2640		ret = (timer && likely(del_timer(timer)));
2641		if (!ret)
2642			ret = try_to_grab_pending(work);
2643		wait_on_work(work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2644	} while (unlikely(ret < 0));
2645
 
 
 
 
 
 
 
 
 
 
 
2646	clear_work_data(work);
 
 
 
 
 
 
 
 
 
 
2647	return ret;
2648}
2649
2650/**
2651 * cancel_work_sync - cancel a work and wait for it to finish
2652 * @work: the work to cancel
2653 *
2654 * Cancel @work and wait for its execution to finish.  This function
2655 * can be used even if the work re-queues itself or migrates to
2656 * another workqueue.  On return from this function, @work is
2657 * guaranteed to be not pending or executing on any CPU.
2658 *
2659 * cancel_work_sync(&delayed_work->work) must not be used for
2660 * delayed_work's.  Use cancel_delayed_work_sync() instead.
2661 *
2662 * The caller must ensure that the workqueue on which @work was last
2663 * queued can't be destroyed before this function returns.
2664 *
2665 * RETURNS:
2666 * %true if @work was pending, %false otherwise.
2667 */
2668bool cancel_work_sync(struct work_struct *work)
2669{
2670	return __cancel_work_timer(work, NULL);
2671}
2672EXPORT_SYMBOL_GPL(cancel_work_sync);
2673
2674/**
2675 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2676 * @dwork: the delayed work to flush
2677 *
2678 * Delayed timer is cancelled and the pending work is queued for
2679 * immediate execution.  Like flush_work(), this function only
2680 * considers the last queueing instance of @dwork.
2681 *
2682 * RETURNS:
2683 * %true if flush_work() waited for the work to finish execution,
2684 * %false if it was already idle.
2685 */
2686bool flush_delayed_work(struct delayed_work *dwork)
2687{
 
2688	if (del_timer_sync(&dwork->timer))
2689		__queue_work(raw_smp_processor_id(),
2690			     get_work_cwq(&dwork->work)->wq, &dwork->work);
2691	return flush_work(&dwork->work);
2692}
2693EXPORT_SYMBOL(flush_delayed_work);
2694
2695/**
2696 * flush_delayed_work_sync - wait for a dwork to finish
2697 * @dwork: the delayed work to flush
2698 *
2699 * Delayed timer is cancelled and the pending work is queued for
2700 * execution immediately.  Other than timer handling, its behavior
2701 * is identical to flush_work_sync().
2702 *
2703 * RETURNS:
2704 * %true if flush_work_sync() waited for the work to finish execution,
2705 * %false if it was already idle.
2706 */
2707bool flush_delayed_work_sync(struct delayed_work *dwork)
2708{
2709	if (del_timer_sync(&dwork->timer))
2710		__queue_work(raw_smp_processor_id(),
2711			     get_work_cwq(&dwork->work)->wq, &dwork->work);
2712	return flush_work_sync(&dwork->work);
 
 
 
2713}
2714EXPORT_SYMBOL(flush_delayed_work_sync);
2715
2716/**
2717 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2718 * @dwork: the delayed work cancel
2719 *
2720 * This is cancel_work_sync() for delayed works.
2721 *
2722 * RETURNS:
2723 * %true if @dwork was pending, %false otherwise.
2724 */
2725bool cancel_delayed_work_sync(struct delayed_work *dwork)
2726{
2727	return __cancel_work_timer(&dwork->work, &dwork->timer);
 
 
 
 
 
 
 
 
 
 
 
 
2728}
2729EXPORT_SYMBOL(cancel_delayed_work_sync);
2730
2731/**
2732 * schedule_work - put work task in global workqueue
2733 * @work: job to be done
2734 *
2735 * Returns zero if @work was already on the kernel-global workqueue and
2736 * non-zero otherwise.
2737 *
2738 * This puts a job in the kernel-global workqueue if it was not already
2739 * queued and leaves it in the same position on the kernel-global
2740 * workqueue otherwise.
2741 */
2742int schedule_work(struct work_struct *work)
2743{
2744	return queue_work(system_wq, work);
2745}
2746EXPORT_SYMBOL(schedule_work);
2747
2748/*
2749 * schedule_work_on - put work task on a specific cpu
2750 * @cpu: cpu to put the work task on
2751 * @work: job to be done
2752 *
2753 * This puts a job on a specific cpu
2754 */
2755int schedule_work_on(int cpu, struct work_struct *work)
2756{
2757	return queue_work_on(cpu, system_wq, work);
2758}
2759EXPORT_SYMBOL(schedule_work_on);
2760
2761/**
2762 * schedule_delayed_work - put work task in global workqueue after delay
2763 * @dwork: job to be done
2764 * @delay: number of jiffies to wait or 0 for immediate execution
2765 *
2766 * After waiting for a given time this puts a job in the kernel-global
2767 * workqueue.
2768 */
2769int schedule_delayed_work(struct delayed_work *dwork,
2770					unsigned long delay)
2771{
2772	return queue_delayed_work(system_wq, dwork, delay);
2773}
2774EXPORT_SYMBOL(schedule_delayed_work);
2775
2776/**
2777 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2778 * @cpu: cpu to use
2779 * @dwork: job to be done
2780 * @delay: number of jiffies to wait
2781 *
2782 * After waiting for a given time this puts a job in the kernel-global
2783 * workqueue on the specified CPU.
2784 */
2785int schedule_delayed_work_on(int cpu,
2786			struct delayed_work *dwork, unsigned long delay)
2787{
2788	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2789}
2790EXPORT_SYMBOL(schedule_delayed_work_on);
2791
2792/**
2793 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2794 * @func: the function to call
2795 *
2796 * schedule_on_each_cpu() executes @func on each online CPU using the
2797 * system workqueue and blocks until all CPUs have completed.
2798 * schedule_on_each_cpu() is very slow.
2799 *
2800 * RETURNS:
2801 * 0 on success, -errno on failure.
2802 */
2803int schedule_on_each_cpu(work_func_t func)
2804{
2805	int cpu;
2806	struct work_struct __percpu *works;
2807
2808	works = alloc_percpu(struct work_struct);
2809	if (!works)
2810		return -ENOMEM;
2811
2812	get_online_cpus();
2813
2814	for_each_online_cpu(cpu) {
2815		struct work_struct *work = per_cpu_ptr(works, cpu);
2816
2817		INIT_WORK(work, func);
2818		schedule_work_on(cpu, work);
2819	}
2820
2821	for_each_online_cpu(cpu)
2822		flush_work(per_cpu_ptr(works, cpu));
2823
2824	put_online_cpus();
2825	free_percpu(works);
2826	return 0;
2827}
2828
2829/**
2830 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2831 *
2832 * Forces execution of the kernel-global workqueue and blocks until its
2833 * completion.
2834 *
2835 * Think twice before calling this function!  It's very easy to get into
2836 * trouble if you don't take great care.  Either of the following situations
2837 * will lead to deadlock:
2838 *
2839 *	One of the work items currently on the workqueue needs to acquire
2840 *	a lock held by your code or its caller.
2841 *
2842 *	Your code is running in the context of a work routine.
2843 *
2844 * They will be detected by lockdep when they occur, but the first might not
2845 * occur very often.  It depends on what work items are on the workqueue and
2846 * what locks they need, which you have no control over.
2847 *
2848 * In most situations flushing the entire workqueue is overkill; you merely
2849 * need to know that a particular work item isn't queued and isn't running.
2850 * In such cases you should use cancel_delayed_work_sync() or
2851 * cancel_work_sync() instead.
2852 */
2853void flush_scheduled_work(void)
2854{
2855	flush_workqueue(system_wq);
2856}
2857EXPORT_SYMBOL(flush_scheduled_work);
2858
2859/**
2860 * execute_in_process_context - reliably execute the routine with user context
2861 * @fn:		the function to execute
2862 * @ew:		guaranteed storage for the execute work structure (must
2863 *		be available when the work executes)
2864 *
2865 * Executes the function immediately if process context is available,
2866 * otherwise schedules the function for delayed execution.
2867 *
2868 * Returns:	0 - function was executed
2869 *		1 - function was scheduled for execution
2870 */
2871int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2872{
2873	if (!in_interrupt()) {
2874		fn(&ew->work);
2875		return 0;
2876	}
2877
2878	INIT_WORK(&ew->work, fn);
2879	schedule_work(&ew->work);
2880
2881	return 1;
2882}
2883EXPORT_SYMBOL_GPL(execute_in_process_context);
2884
2885int keventd_up(void)
 
 
 
 
 
 
2886{
2887	return system_wq != NULL;
 
 
 
2888}
2889
2890static int alloc_cwqs(struct workqueue_struct *wq)
 
 
 
 
 
 
 
 
2891{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2892	/*
2893	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2894	 * Make sure that the alignment isn't lower than that of
2895	 * unsigned long long.
2896	 */
2897	const size_t size = sizeof(struct cpu_workqueue_struct);
2898	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2899				   __alignof__(unsigned long long));
2900#ifdef CONFIG_SMP
2901	bool percpu = !(wq->flags & WQ_UNBOUND);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2902#else
2903	bool percpu = false;
 
 
 
 
 
 
 
 
 
 
2904#endif
2905
2906	if (percpu)
2907		wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2908	else {
2909		void *ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2910
2911		/*
2912		 * Allocate enough room to align cwq and put an extra
2913		 * pointer at the end pointing back to the originally
2914		 * allocated pointer which will be used for free.
2915		 */
2916		ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2917		if (ptr) {
2918			wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2919			*(void **)(wq->cpu_wq.single + 1) = ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2920		}
2921	}
2922
2923	/* just in case, make sure it's actually aligned */
2924	BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2925	return wq->cpu_wq.v ? 0 : -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
2926}
2927
2928static void free_cwqs(struct workqueue_struct *wq)
 
2929{
2930#ifdef CONFIG_SMP
2931	bool percpu = !(wq->flags & WQ_UNBOUND);
2932#else
2933	bool percpu = false;
2934#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2935
2936	if (percpu)
2937		free_percpu(wq->cpu_wq.pcpu);
2938	else if (wq->cpu_wq.single) {
2939		/* the pointer to free is stored right after the cwq */
2940		kfree(*(void **)(wq->cpu_wq.single + 1));
 
 
 
 
2941	}
 
 
 
2942}
2943
2944static int wq_clamp_max_active(int max_active, unsigned int flags,
2945			       const char *name)
2946{
2947	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2948
2949	if (max_active < 1 || max_active > lim)
2950		printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2951		       "is out of range, clamping between %d and %d\n",
2952		       max_active, name, 1, lim);
2953
2954	return clamp_val(max_active, 1, lim);
2955}
2956
2957struct workqueue_struct *__alloc_workqueue_key(const char *name,
2958					       unsigned int flags,
2959					       int max_active,
2960					       struct lock_class_key *key,
2961					       const char *lock_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2962{
 
 
2963	struct workqueue_struct *wq;
2964	unsigned int cpu;
2965
2966	/*
2967	 * Workqueues which may be used during memory reclaim should
2968	 * have a rescuer to guarantee forward progress.
 
 
 
2969	 */
2970	if (flags & WQ_MEM_RECLAIM)
2971		flags |= WQ_RESCUER;
 
 
 
 
2972
2973	/*
2974	 * Unbound workqueues aren't concurrency managed and should be
2975	 * dispatched to workers immediately.
2976	 */
2977	if (flags & WQ_UNBOUND)
2978		flags |= WQ_HIGHPRI;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2979
2980	max_active = max_active ?: WQ_DFL_ACTIVE;
2981	max_active = wq_clamp_max_active(max_active, flags, name);
2982
2983	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
2984	if (!wq)
2985		goto err;
2986
 
2987	wq->flags = flags;
2988	wq->saved_max_active = max_active;
2989	mutex_init(&wq->flush_mutex);
2990	atomic_set(&wq->nr_cwqs_to_flush, 0);
 
2991	INIT_LIST_HEAD(&wq->flusher_queue);
2992	INIT_LIST_HEAD(&wq->flusher_overflow);
 
2993
2994	wq->name = name;
2995	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2996	INIT_LIST_HEAD(&wq->list);
2997
2998	if (alloc_cwqs(wq) < 0)
2999		goto err;
3000
3001	for_each_cwq_cpu(cpu, wq) {
3002		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3003		struct global_cwq *gcwq = get_gcwq(cpu);
3004
3005		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
3006		cwq->gcwq = gcwq;
3007		cwq->wq = wq;
3008		cwq->flush_color = -1;
3009		cwq->max_active = max_active;
3010		INIT_LIST_HEAD(&cwq->delayed_works);
3011	}
3012
3013	if (flags & WQ_RESCUER) {
3014		struct worker *rescuer;
3015
3016		if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
3017			goto err;
3018
3019		wq->rescuer = rescuer = alloc_worker();
3020		if (!rescuer)
3021			goto err;
3022
3023		rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
3024		if (IS_ERR(rescuer->task))
3025			goto err;
3026
3027		rescuer->task->flags |= PF_THREAD_BOUND;
3028		wake_up_process(rescuer->task);
3029	}
3030
3031	/*
3032	 * workqueue_lock protects global freeze state and workqueues
3033	 * list.  Grab it, set max_active accordingly and add the new
3034	 * workqueue to workqueues list.
3035	 */
3036	spin_lock(&workqueue_lock);
3037
3038	if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3039		for_each_cwq_cpu(cpu, wq)
3040			get_cwq(cpu, wq)->max_active = 0;
 
3041
3042	list_add(&wq->list, &workqueues);
3043
3044	spin_unlock(&workqueue_lock);
3045
3046	return wq;
3047err:
3048	if (wq) {
3049		free_cwqs(wq);
3050		free_mayday_mask(wq->mayday_mask);
3051		kfree(wq->rescuer);
3052		kfree(wq);
3053	}
 
 
 
3054	return NULL;
3055}
3056EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3057
3058/**
3059 * destroy_workqueue - safely terminate a workqueue
3060 * @wq: target workqueue
3061 *
3062 * Safely destroy a workqueue. All work currently pending will be done first.
3063 */
3064void destroy_workqueue(struct workqueue_struct *wq)
3065{
3066	unsigned int cpu;
 
3067
3068	/* drain it before proceeding with destruction */
3069	drain_workqueue(wq);
3070
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3071	/*
3072	 * wq list is used to freeze wq, remove from list after
3073	 * flushing is complete in case freeze races us.
3074	 */
3075	spin_lock(&workqueue_lock);
3076	list_del(&wq->list);
3077	spin_unlock(&workqueue_lock);
3078
3079	/* sanity check */
3080	for_each_cwq_cpu(cpu, wq) {
3081		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3082		int i;
3083
3084		for (i = 0; i < WORK_NR_COLORS; i++)
3085			BUG_ON(cwq->nr_in_flight[i]);
3086		BUG_ON(cwq->nr_active);
3087		BUG_ON(!list_empty(&cwq->delayed_works));
3088	}
3089
3090	if (wq->flags & WQ_RESCUER) {
3091		kthread_stop(wq->rescuer->task);
3092		free_mayday_mask(wq->mayday_mask);
3093		kfree(wq->rescuer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3094	}
3095
3096	free_cwqs(wq);
3097	kfree(wq);
3098}
3099EXPORT_SYMBOL_GPL(destroy_workqueue);
3100
3101/**
3102 * workqueue_set_max_active - adjust max_active of a workqueue
3103 * @wq: target workqueue
3104 * @max_active: new max_active value.
3105 *
3106 * Set max_active of @wq to @max_active.
3107 *
3108 * CONTEXT:
3109 * Don't call from IRQ context.
3110 */
3111void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3112{
3113	unsigned int cpu;
 
 
 
 
3114
3115	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3116
3117	spin_lock(&workqueue_lock);
3118
 
3119	wq->saved_max_active = max_active;
3120
3121	for_each_cwq_cpu(cpu, wq) {
3122		struct global_cwq *gcwq = get_gcwq(cpu);
3123
3124		spin_lock_irq(&gcwq->lock);
 
 
3125
3126		if (!(wq->flags & WQ_FREEZABLE) ||
3127		    !(gcwq->flags & GCWQ_FREEZING))
3128			get_cwq(gcwq->cpu, wq)->max_active = max_active;
 
 
 
 
 
 
 
 
 
 
 
 
3129
3130		spin_unlock_irq(&gcwq->lock);
3131	}
 
 
 
 
 
 
 
 
 
3132
3133	spin_unlock(&workqueue_lock);
3134}
3135EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3136
3137/**
3138 * workqueue_congested - test whether a workqueue is congested
3139 * @cpu: CPU in question
3140 * @wq: target workqueue
3141 *
3142 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
3143 * no synchronization around this function and the test result is
3144 * unreliable and only useful as advisory hints or for debugging.
3145 *
3146 * RETURNS:
 
 
 
 
 
 
3147 * %true if congested, %false otherwise.
3148 */
3149bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3150{
3151	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
 
 
 
 
 
 
3152
3153	return !list_empty(&cwq->delayed_works);
3154}
3155EXPORT_SYMBOL_GPL(workqueue_congested);
 
3156
3157/**
3158 * work_cpu - return the last known associated cpu for @work
3159 * @work: the work of interest
3160 *
3161 * RETURNS:
3162 * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
3163 */
3164unsigned int work_cpu(struct work_struct *work)
3165{
3166	struct global_cwq *gcwq = get_work_gcwq(work);
3167
3168	return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3169}
3170EXPORT_SYMBOL_GPL(work_cpu);
3171
3172/**
3173 * work_busy - test whether a work is currently pending or running
3174 * @work: the work to be tested
3175 *
3176 * Test whether @work is currently pending or running.  There is no
3177 * synchronization around this function and the test result is
3178 * unreliable and only useful as advisory hints or for debugging.
3179 * Especially for reentrant wqs, the pending state might hide the
3180 * running state.
3181 *
3182 * RETURNS:
3183 * OR'd bitmask of WORK_BUSY_* bits.
3184 */
3185unsigned int work_busy(struct work_struct *work)
3186{
3187	struct global_cwq *gcwq = get_work_gcwq(work);
3188	unsigned long flags;
3189	unsigned int ret = 0;
3190
3191	if (!gcwq)
3192		return false;
3193
3194	spin_lock_irqsave(&gcwq->lock, flags);
3195
3196	if (work_pending(work))
3197		ret |= WORK_BUSY_PENDING;
3198	if (find_worker_executing_work(gcwq, work))
3199		ret |= WORK_BUSY_RUNNING;
3200
3201	spin_unlock_irqrestore(&gcwq->lock, flags);
 
 
 
 
 
 
 
 
3202
3203	return ret;
3204}
3205EXPORT_SYMBOL_GPL(work_busy);
3206
3207/*
3208 * CPU hotplug.
3209 *
3210 * There are two challenges in supporting CPU hotplug.  Firstly, there
3211 * are a lot of assumptions on strong associations among work, cwq and
3212 * gcwq which make migrating pending and scheduled works very
3213 * difficult to implement without impacting hot paths.  Secondly,
3214 * gcwqs serve mix of short, long and very long running works making
3215 * blocked draining impractical.
3216 *
3217 * This is solved by allowing a gcwq to be detached from CPU, running
3218 * it with unbound (rogue) workers and allowing it to be reattached
3219 * later if the cpu comes back online.  A separate thread is created
3220 * to govern a gcwq in such state and is called the trustee of the
3221 * gcwq.
3222 *
3223 * Trustee states and their descriptions.
3224 *
3225 * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
3226 *		new trustee is started with this state.
3227 *
3228 * IN_CHARGE	Once started, trustee will enter this state after
3229 *		assuming the manager role and making all existing
3230 *		workers rogue.  DOWN_PREPARE waits for trustee to
3231 *		enter this state.  After reaching IN_CHARGE, trustee
3232 *		tries to execute the pending worklist until it's empty
3233 *		and the state is set to BUTCHER, or the state is set
3234 *		to RELEASE.
3235 *
3236 * BUTCHER	Command state which is set by the cpu callback after
3237 *		the cpu has went down.  Once this state is set trustee
3238 *		knows that there will be no new works on the worklist
3239 *		and once the worklist is empty it can proceed to
3240 *		killing idle workers.
3241 *
3242 * RELEASE	Command state which is set by the cpu callback if the
3243 *		cpu down has been canceled or it has come online
3244 *		again.  After recognizing this state, trustee stops
3245 *		trying to drain or butcher and clears ROGUE, rebinds
3246 *		all remaining workers back to the cpu and releases
3247 *		manager role.
3248 *
3249 * DONE		Trustee will enter this state after BUTCHER or RELEASE
3250 *		is complete.
3251 *
3252 *          trustee                 CPU                draining
3253 *         took over                down               complete
3254 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3255 *                        |                     |                  ^
3256 *                        | CPU is back online  v   return workers |
3257 *                         ----------------> RELEASE --------------
3258 */
 
 
 
 
3259
3260/**
3261 * trustee_wait_event_timeout - timed event wait for trustee
3262 * @cond: condition to wait for
3263 * @timeout: timeout in jiffies
3264 *
3265 * wait_event_timeout() for trustee to use.  Handles locking and
3266 * checks for RELEASE request.
3267 *
3268 * CONTEXT:
3269 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3270 * multiple times.  To be used by trustee.
3271 *
3272 * RETURNS:
3273 * Positive indicating left time if @cond is satisfied, 0 if timed
3274 * out, -1 if canceled.
3275 */
3276#define trustee_wait_event_timeout(cond, timeout) ({			\
3277	long __ret = (timeout);						\
3278	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
3279	       __ret) {							\
3280		spin_unlock_irq(&gcwq->lock);				\
3281		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
3282			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
3283			__ret);						\
3284		spin_lock_irq(&gcwq->lock);				\
3285	}								\
3286	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
3287})
3288
3289/**
3290 * trustee_wait_event - event wait for trustee
3291 * @cond: condition to wait for
3292 *
3293 * wait_event() for trustee to use.  Automatically handles locking and
3294 * checks for CANCEL request.
3295 *
3296 * CONTEXT:
3297 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3298 * multiple times.  To be used by trustee.
3299 *
3300 * RETURNS:
3301 * 0 if @cond is satisfied, -1 if canceled.
 
3302 */
3303#define trustee_wait_event(cond) ({					\
3304	long __ret1;							\
3305	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3306	__ret1 < 0 ? -1 : 0;						\
3307})
3308
3309static int __cpuinit trustee_thread(void *__gcwq)
3310{
3311	struct global_cwq *gcwq = __gcwq;
 
 
 
 
3312	struct worker *worker;
3313	struct work_struct *work;
3314	struct hlist_node *pos;
3315	long rc;
3316	int i;
3317
3318	BUG_ON(gcwq->cpu != smp_processor_id());
 
 
 
 
 
 
 
3319
3320	spin_lock_irq(&gcwq->lock);
3321	/*
3322	 * Claim the manager position and make all workers rogue.
3323	 * Trustee must be bound to the target cpu and can't be
3324	 * cancelled.
3325	 */
3326	BUG_ON(gcwq->cpu != smp_processor_id());
3327	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3328	BUG_ON(rc < 0);
 
 
3329
3330	gcwq->flags |= GCWQ_MANAGING_WORKERS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3331
3332	list_for_each_entry(worker, &gcwq->idle_list, entry)
3333		worker->flags |= WORKER_ROGUE;
 
 
 
 
 
 
3334
3335	for_each_busy_worker(worker, i, pos, gcwq)
3336		worker->flags |= WORKER_ROGUE;
 
 
3337
3338	/*
3339	 * Call schedule() so that we cross rq->lock and thus can
3340	 * guarantee sched callbacks see the rogue flag.  This is
3341	 * necessary as scheduler callbacks may be invoked from other
3342	 * cpus.
3343	 */
3344	spin_unlock_irq(&gcwq->lock);
3345	schedule();
3346	spin_lock_irq(&gcwq->lock);
 
3347
3348	/*
3349	 * Sched callbacks are disabled now.  Zap nr_running.  After
3350	 * this, nr_running stays zero and need_more_worker() and
3351	 * keep_working() are always true as long as the worklist is
3352	 * not empty.
3353	 */
3354	atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
 
3355
3356	spin_unlock_irq(&gcwq->lock);
3357	del_timer_sync(&gcwq->idle_timer);
3358	spin_lock_irq(&gcwq->lock);
 
3359
3360	/*
3361	 * We're now in charge.  Notify and proceed to drain.  We need
3362	 * to keep the gcwq running during the whole CPU down
3363	 * procedure as other cpu hotunplug callbacks may need to
3364	 * flush currently running tasks.
3365	 */
3366	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3367	wake_up_all(&gcwq->trustee_wait);
3368
3369	/*
3370	 * The original cpu is in the process of dying and may go away
3371	 * anytime now.  When that happens, we and all workers would
3372	 * be migrated to other cpus.  Try draining any left work.  We
3373	 * want to get it over with ASAP - spam rescuers, wake up as
3374	 * many idlers as necessary and create new ones till the
3375	 * worklist is empty.  Note that if the gcwq is frozen, there
3376	 * may be frozen works in freezable cwqs.  Don't declare
3377	 * completion while frozen.
3378	 */
3379	while (gcwq->nr_workers != gcwq->nr_idle ||
3380	       gcwq->flags & GCWQ_FREEZING ||
3381	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3382		int nr_works = 0;
3383
3384		list_for_each_entry(work, &gcwq->worklist, entry) {
3385			send_mayday(work);
3386			nr_works++;
 
3387		}
 
 
 
3388
3389		list_for_each_entry(worker, &gcwq->idle_list, entry) {
3390			if (!nr_works--)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3391				break;
3392			wake_up_process(worker->task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3393		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3394
3395		if (need_to_create_worker(gcwq)) {
3396			spin_unlock_irq(&gcwq->lock);
3397			worker = create_worker(gcwq, false);
3398			spin_lock_irq(&gcwq->lock);
3399			if (worker) {
3400				worker->flags |= WORKER_ROGUE;
3401				start_worker(worker);
 
 
 
 
 
 
 
 
 
 
 
3402			}
 
3403		}
 
 
 
 
3404
3405		/* give a breather */
3406		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3407			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3408	}
 
 
 
 
 
 
 
 
 
 
 
 
 
3409
3410	/*
3411	 * Either all works have been scheduled and cpu is down, or
3412	 * cpu down has already been canceled.  Wait for and butcher
3413	 * all workers till we're canceled.
 
 
3414	 */
3415	do {
3416		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3417		while (!list_empty(&gcwq->idle_list))
3418			destroy_worker(list_first_entry(&gcwq->idle_list,
3419							struct worker, entry));
3420	} while (gcwq->nr_workers && rc >= 0);
3421
3422	/*
3423	 * At this point, either draining has completed and no worker
3424	 * is left, or cpu down has been canceled or the cpu is being
3425	 * brought back up.  There shouldn't be any idle one left.
3426	 * Tell the remaining busy ones to rebind once it finishes the
3427	 * currently scheduled works by scheduling the rebind_work.
3428	 */
3429	WARN_ON(!list_empty(&gcwq->idle_list));
3430
3431	for_each_busy_worker(worker, i, pos, gcwq) {
3432		struct work_struct *rebind_work = &worker->rebind_work;
3433
3434		/*
3435		 * Rebind_work may race with future cpu hotplug
3436		 * operations.  Use a separate flag to mark that
3437		 * rebinding is scheduled.
 
 
 
3438		 */
3439		worker->flags |= WORKER_REBIND;
3440		worker->flags &= ~WORKER_ROGUE;
3441
3442		/* queue rebind_work, wq doesn't matter, use the default one */
3443		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3444				     work_data_bits(rebind_work)))
3445			continue;
3446
3447		debug_work_activate(rebind_work);
3448		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3449			    worker->scheduled.next,
3450			    work_color_to_flags(WORK_NO_COLOR));
 
 
 
 
 
 
 
 
 
 
3451	}
3452
3453	/* relinquish manager role */
3454	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3455
3456	/* notify completion */
3457	gcwq->trustee = NULL;
3458	gcwq->trustee_state = TRUSTEE_DONE;
3459	wake_up_all(&gcwq->trustee_wait);
3460	spin_unlock_irq(&gcwq->lock);
3461	return 0;
3462}
3463
3464/**
3465 * wait_trustee_state - wait for trustee to enter the specified state
3466 * @gcwq: gcwq the trustee of interest belongs to
3467 * @state: target state to wait for
3468 *
3469 * Wait for the trustee to reach @state.  DONE is already matched.
3470 *
3471 * CONTEXT:
3472 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3473 * multiple times.  To be used by cpu_callback.
 
3474 */
3475static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3476__releases(&gcwq->lock)
3477__acquires(&gcwq->lock)
3478{
3479	if (!(gcwq->trustee_state == state ||
3480	      gcwq->trustee_state == TRUSTEE_DONE)) {
3481		spin_unlock_irq(&gcwq->lock);
3482		__wait_event(gcwq->trustee_wait,
3483			     gcwq->trustee_state == state ||
3484			     gcwq->trustee_state == TRUSTEE_DONE);
3485		spin_lock_irq(&gcwq->lock);
3486	}
 
 
 
 
3487}
3488
3489static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3490						unsigned long action,
3491						void *hcpu)
3492{
3493	unsigned int cpu = (unsigned long)hcpu;
3494	struct global_cwq *gcwq = get_gcwq(cpu);
3495	struct task_struct *new_trustee = NULL;
3496	struct worker *uninitialized_var(new_worker);
3497	unsigned long flags;
3498
3499	action &= ~CPU_TASKS_FROZEN;
3500
3501	switch (action) {
3502	case CPU_DOWN_PREPARE:
3503		new_trustee = kthread_create(trustee_thread, gcwq,
3504					     "workqueue_trustee/%d\n", cpu);
3505		if (IS_ERR(new_trustee))
3506			return notifier_from_errno(PTR_ERR(new_trustee));
3507		kthread_bind(new_trustee, cpu);
3508		/* fall through */
3509	case CPU_UP_PREPARE:
3510		BUG_ON(gcwq->first_idle);
3511		new_worker = create_worker(gcwq, false);
3512		if (!new_worker) {
3513			if (new_trustee)
3514				kthread_stop(new_trustee);
3515			return NOTIFY_BAD;
3516		}
3517	}
 
 
3518
3519	/* some are called w/ irq disabled, don't disturb irq status */
3520	spin_lock_irqsave(&gcwq->lock, flags);
3521
3522	switch (action) {
3523	case CPU_DOWN_PREPARE:
3524		/* initialize trustee and tell it to acquire the gcwq */
3525		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3526		gcwq->trustee = new_trustee;
3527		gcwq->trustee_state = TRUSTEE_START;
3528		wake_up_process(gcwq->trustee);
3529		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3530		/* fall through */
3531	case CPU_UP_PREPARE:
3532		BUG_ON(gcwq->first_idle);
3533		gcwq->first_idle = new_worker;
3534		break;
3535
3536	case CPU_DYING:
3537		/*
3538		 * Before this, the trustee and all workers except for
3539		 * the ones which are still executing works from
3540		 * before the last CPU down must be on the cpu.  After
3541		 * this, they'll all be diasporas.
3542		 */
3543		gcwq->flags |= GCWQ_DISASSOCIATED;
3544		break;
3545
3546	case CPU_POST_DEAD:
3547		gcwq->trustee_state = TRUSTEE_BUTCHER;
3548		/* fall through */
3549	case CPU_UP_CANCELED:
3550		destroy_worker(gcwq->first_idle);
3551		gcwq->first_idle = NULL;
3552		break;
3553
3554	case CPU_DOWN_FAILED:
3555	case CPU_ONLINE:
3556		gcwq->flags &= ~GCWQ_DISASSOCIATED;
3557		if (gcwq->trustee_state != TRUSTEE_DONE) {
3558			gcwq->trustee_state = TRUSTEE_RELEASE;
3559			wake_up_process(gcwq->trustee);
3560			wait_trustee_state(gcwq, TRUSTEE_DONE);
3561		}
3562
3563		/*
3564		 * Trustee is done and there might be no worker left.
3565		 * Put the first_idle in and request a real manager to
3566		 * take a look.
3567		 */
3568		spin_unlock_irq(&gcwq->lock);
3569		kthread_bind(gcwq->first_idle->task, cpu);
3570		spin_lock_irq(&gcwq->lock);
3571		gcwq->flags |= GCWQ_MANAGE_WORKERS;
3572		start_worker(gcwq->first_idle);
3573		gcwq->first_idle = NULL;
3574		break;
3575	}
3576
3577	spin_unlock_irqrestore(&gcwq->lock, flags);
 
 
3578
3579	return notifier_from_errno(0);
 
3580}
3581
3582#ifdef CONFIG_SMP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3583
3584struct work_for_cpu {
3585	struct completion completion;
3586	long (*fn)(void *);
3587	void *arg;
3588	long ret;
3589};
3590
3591static int do_work_for_cpu(void *_wfc)
3592{
3593	struct work_for_cpu *wfc = _wfc;
 
3594	wfc->ret = wfc->fn(wfc->arg);
3595	complete(&wfc->completion);
3596	return 0;
3597}
3598
3599/**
3600 * work_on_cpu - run a function in user context on a particular cpu
3601 * @cpu: the cpu to run on
3602 * @fn: the function to run
3603 * @arg: the function arg
3604 *
3605 * This will return the value @fn returns.
3606 * It is up to the caller to ensure that the cpu doesn't go offline.
3607 * The caller must not hold any locks which would prevent @fn from completing.
 
 
3608 */
3609long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3610{
3611	struct task_struct *sub_thread;
3612	struct work_for_cpu wfc = {
3613		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3614		.fn = fn,
3615		.arg = arg,
3616	};
3617
3618	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3619	if (IS_ERR(sub_thread))
3620		return PTR_ERR(sub_thread);
3621	kthread_bind(sub_thread, cpu);
3622	wake_up_process(sub_thread);
3623	wait_for_completion(&wfc.completion);
3624	return wfc.ret;
3625}
3626EXPORT_SYMBOL_GPL(work_on_cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3627#endif /* CONFIG_SMP */
3628
3629#ifdef CONFIG_FREEZER
3630
3631/**
3632 * freeze_workqueues_begin - begin freezing workqueues
3633 *
3634 * Start freezing workqueues.  After this function returns, all freezable
3635 * workqueues will queue new works to their frozen_works list instead of
3636 * gcwq->worklist.
3637 *
3638 * CONTEXT:
3639 * Grabs and releases workqueue_lock and gcwq->lock's.
3640 */
3641void freeze_workqueues_begin(void)
3642{
3643	unsigned int cpu;
 
3644
3645	spin_lock(&workqueue_lock);
3646
3647	BUG_ON(workqueue_freezing);
3648	workqueue_freezing = true;
3649
3650	for_each_gcwq_cpu(cpu) {
3651		struct global_cwq *gcwq = get_gcwq(cpu);
3652		struct workqueue_struct *wq;
3653
3654		spin_lock_irq(&gcwq->lock);
3655
3656		BUG_ON(gcwq->flags & GCWQ_FREEZING);
3657		gcwq->flags |= GCWQ_FREEZING;
3658
3659		list_for_each_entry(wq, &workqueues, list) {
3660			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3661
3662			if (cwq && wq->flags & WQ_FREEZABLE)
3663				cwq->max_active = 0;
3664		}
3665
3666		spin_unlock_irq(&gcwq->lock);
3667	}
3668
3669	spin_unlock(&workqueue_lock);
3670}
3671
3672/**
3673 * freeze_workqueues_busy - are freezable workqueues still busy?
3674 *
3675 * Check whether freezing is complete.  This function must be called
3676 * between freeze_workqueues_begin() and thaw_workqueues().
3677 *
3678 * CONTEXT:
3679 * Grabs and releases workqueue_lock.
3680 *
3681 * RETURNS:
3682 * %true if some freezable workqueues are still busy.  %false if freezing
3683 * is complete.
3684 */
3685bool freeze_workqueues_busy(void)
3686{
3687	unsigned int cpu;
3688	bool busy = false;
 
 
3689
3690	spin_lock(&workqueue_lock);
3691
3692	BUG_ON(!workqueue_freezing);
3693
3694	for_each_gcwq_cpu(cpu) {
3695		struct workqueue_struct *wq;
 
3696		/*
3697		 * nr_active is monotonically decreasing.  It's safe
3698		 * to peek without lock.
3699		 */
3700		list_for_each_entry(wq, &workqueues, list) {
3701			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3702
3703			if (!cwq || !(wq->flags & WQ_FREEZABLE))
3704				continue;
3705
3706			BUG_ON(cwq->nr_active < 0);
3707			if (cwq->nr_active) {
3708				busy = true;
 
3709				goto out_unlock;
3710			}
3711		}
 
3712	}
3713out_unlock:
3714	spin_unlock(&workqueue_lock);
3715	return busy;
3716}
3717
3718/**
3719 * thaw_workqueues - thaw workqueues
3720 *
3721 * Thaw workqueues.  Normal queueing is restored and all collected
3722 * frozen works are transferred to their respective gcwq worklists.
3723 *
3724 * CONTEXT:
3725 * Grabs and releases workqueue_lock and gcwq->lock's.
3726 */
3727void thaw_workqueues(void)
3728{
3729	unsigned int cpu;
 
3730
3731	spin_lock(&workqueue_lock);
3732
3733	if (!workqueue_freezing)
3734		goto out_unlock;
3735
3736	for_each_gcwq_cpu(cpu) {
3737		struct global_cwq *gcwq = get_gcwq(cpu);
3738		struct workqueue_struct *wq;
3739
3740		spin_lock_irq(&gcwq->lock);
 
 
 
 
 
 
3741
3742		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3743		gcwq->flags &= ~GCWQ_FREEZING;
 
 
3744
3745		list_for_each_entry(wq, &workqueues, list) {
3746			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
 
 
 
3747
3748			if (!cwq || !(wq->flags & WQ_FREEZABLE))
3749				continue;
3750
3751			/* restore max_active and repopulate worklist */
3752			cwq->max_active = wq->saved_max_active;
 
 
 
 
3753
3754			while (!list_empty(&cwq->delayed_works) &&
3755			       cwq->nr_active < cwq->max_active)
3756				cwq_activate_first_delayed(cwq);
 
3757		}
3758
3759		wake_up_worker(gcwq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3760
3761		spin_unlock_irq(&gcwq->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3762	}
3763
3764	workqueue_freezing = false;
3765out_unlock:
3766	spin_unlock(&workqueue_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3767}
3768#endif /* CONFIG_FREEZER */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3769
3770static int __init init_workqueues(void)
3771{
3772	unsigned int cpu;
3773	int i;
 
 
 
 
 
 
 
 
 
 
 
3774
3775	cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3776
3777	/* initialize gcwqs */
3778	for_each_gcwq_cpu(cpu) {
3779		struct global_cwq *gcwq = get_gcwq(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
3780
3781		spin_lock_init(&gcwq->lock);
3782		INIT_LIST_HEAD(&gcwq->worklist);
3783		gcwq->cpu = cpu;
3784		gcwq->flags |= GCWQ_DISASSOCIATED;
3785
3786		INIT_LIST_HEAD(&gcwq->idle_list);
3787		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3788			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3789
3790		init_timer_deferrable(&gcwq->idle_timer);
3791		gcwq->idle_timer.function = idle_worker_timeout;
3792		gcwq->idle_timer.data = (unsigned long)gcwq;
3793
3794		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3795			    (unsigned long)gcwq);
 
3796
3797		ida_init(&gcwq->worker_ida);
 
 
 
 
 
 
3798
3799		gcwq->trustee_state = TRUSTEE_DONE;
3800		init_waitqueue_head(&gcwq->trustee_wait);
 
 
 
3801	}
3802
3803	/* create the initial worker */
3804	for_each_online_gcwq_cpu(cpu) {
3805		struct global_cwq *gcwq = get_gcwq(cpu);
3806		struct worker *worker;
 
 
 
3807
3808		if (cpu != WORK_CPU_UNBOUND)
3809			gcwq->flags &= ~GCWQ_DISASSOCIATED;
3810		worker = create_worker(gcwq, true);
3811		BUG_ON(!worker);
3812		spin_lock_irq(&gcwq->lock);
3813		start_worker(worker);
3814		spin_unlock_irq(&gcwq->lock);
 
 
3815	}
3816
3817	system_wq = alloc_workqueue("events", 0, 0);
 
3818	system_long_wq = alloc_workqueue("events_long", 0, 0);
3819	system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3820	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3821					    WQ_UNBOUND_MAX_ACTIVE);
3822	system_freezable_wq = alloc_workqueue("events_freezable",
3823					      WQ_FREEZABLE, 0);
3824	BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3825	       !system_unbound_wq || !system_freezable_wq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3826	return 0;
3827}
3828early_initcall(init_workqueues);