Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Basic worker thread pool for io_uring
   4 *
   5 * Copyright (C) 2019 Jens Axboe
   6 *
   7 */
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/errno.h>
  11#include <linux/sched/signal.h>
  12#include <linux/percpu.h>
  13#include <linux/slab.h>
  14#include <linux/rculist_nulls.h>
  15#include <linux/cpu.h>
  16#include <linux/task_work.h>
  17#include <linux/audit.h>
 
  18#include <uapi/linux/io_uring.h>
  19
  20#include "io-wq.h"
  21#include "slist.h"
  22#include "io_uring.h"
  23
  24#define WORKER_IDLE_TIMEOUT	(5 * HZ)
  25
  26enum {
  27	IO_WORKER_F_UP		= 1,	/* up and active */
  28	IO_WORKER_F_RUNNING	= 2,	/* account as running */
  29	IO_WORKER_F_FREE	= 4,	/* worker on free list */
  30	IO_WORKER_F_BOUND	= 8,	/* is doing bounded work */
  31};
  32
  33enum {
  34	IO_WQ_BIT_EXIT		= 0,	/* wq exiting */
  35};
  36
  37enum {
  38	IO_ACCT_STALLED_BIT	= 0,	/* stalled on hash */
  39};
  40
  41/*
  42 * One for each thread in a wqe pool
  43 */
  44struct io_worker {
  45	refcount_t ref;
  46	unsigned flags;
  47	struct hlist_nulls_node nulls_node;
  48	struct list_head all_list;
  49	struct task_struct *task;
  50	struct io_wqe *wqe;
  51
  52	struct io_wq_work *cur_work;
  53	struct io_wq_work *next_work;
  54	raw_spinlock_t lock;
  55
  56	struct completion ref_done;
  57
  58	unsigned long create_state;
  59	struct callback_head create_work;
  60	int create_index;
  61
  62	union {
  63		struct rcu_head rcu;
  64		struct work_struct work;
  65	};
  66};
  67
  68#if BITS_PER_LONG == 64
  69#define IO_WQ_HASH_ORDER	6
  70#else
  71#define IO_WQ_HASH_ORDER	5
  72#endif
  73
  74#define IO_WQ_NR_HASH_BUCKETS	(1u << IO_WQ_HASH_ORDER)
  75
  76struct io_wqe_acct {
  77	unsigned nr_workers;
  78	unsigned max_workers;
  79	int index;
  80	atomic_t nr_running;
  81	raw_spinlock_t lock;
  82	struct io_wq_work_list work_list;
  83	unsigned long flags;
  84};
  85
  86enum {
  87	IO_WQ_ACCT_BOUND,
  88	IO_WQ_ACCT_UNBOUND,
  89	IO_WQ_ACCT_NR,
  90};
  91
  92/*
  93 * Per-node worker thread pool
  94 */
  95struct io_wqe {
  96	raw_spinlock_t lock;
  97	struct io_wqe_acct acct[IO_WQ_ACCT_NR];
  98
  99	int node;
 100
 101	struct hlist_nulls_head free_list;
 102	struct list_head all_list;
 103
 104	struct wait_queue_entry wait;
 105
 106	struct io_wq *wq;
 107	struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
 108
 109	cpumask_var_t cpu_mask;
 110};
 111
 112/*
 113 * Per io_wq state
 114  */
 115struct io_wq {
 116	unsigned long state;
 117
 118	free_work_fn *free_work;
 119	io_wq_work_fn *do_work;
 120
 121	struct io_wq_hash *hash;
 122
 123	atomic_t worker_refs;
 124	struct completion worker_done;
 125
 126	struct hlist_node cpuhp_node;
 127
 128	struct task_struct *task;
 129
 130	struct io_wqe *wqes[];
 
 
 
 
 
 
 
 
 
 
 
 
 131};
 132
 133static enum cpuhp_state io_wq_online;
 134
 135struct io_cb_cancel_data {
 136	work_cancel_fn *fn;
 137	void *data;
 138	int nr_running;
 139	int nr_pending;
 140	bool cancel_all;
 141};
 142
 143static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
 144static void io_wqe_dec_running(struct io_worker *worker);
 145static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
 146					struct io_wqe_acct *acct,
 147					struct io_cb_cancel_data *match);
 148static void create_worker_cb(struct callback_head *cb);
 149static void io_wq_cancel_tw_create(struct io_wq *wq);
 150
 151static bool io_worker_get(struct io_worker *worker)
 152{
 153	return refcount_inc_not_zero(&worker->ref);
 154}
 155
 156static void io_worker_release(struct io_worker *worker)
 157{
 158	if (refcount_dec_and_test(&worker->ref))
 159		complete(&worker->ref_done);
 160}
 161
 162static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
 163{
 164	return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
 165}
 166
 167static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
 168						   struct io_wq_work *work)
 169{
 170	return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
 171}
 172
 173static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
 174{
 175	return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
 176}
 177
 178static void io_worker_ref_put(struct io_wq *wq)
 179{
 180	if (atomic_dec_and_test(&wq->worker_refs))
 181		complete(&wq->worker_done);
 182}
 183
 
 
 
 
 
 
 
 
 
 
 184static void io_worker_cancel_cb(struct io_worker *worker)
 185{
 186	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 187	struct io_wqe *wqe = worker->wqe;
 188	struct io_wq *wq = wqe->wq;
 189
 190	atomic_dec(&acct->nr_running);
 191	raw_spin_lock(&worker->wqe->lock);
 192	acct->nr_workers--;
 193	raw_spin_unlock(&worker->wqe->lock);
 194	io_worker_ref_put(wq);
 195	clear_bit_unlock(0, &worker->create_state);
 196	io_worker_release(worker);
 197}
 198
 199static bool io_task_worker_match(struct callback_head *cb, void *data)
 200{
 201	struct io_worker *worker;
 202
 203	if (cb->func != create_worker_cb)
 204		return false;
 205	worker = container_of(cb, struct io_worker, create_work);
 206	return worker == data;
 207}
 208
 209static void io_worker_exit(struct io_worker *worker)
 210{
 211	struct io_wqe *wqe = worker->wqe;
 212	struct io_wq *wq = wqe->wq;
 213
 214	while (1) {
 215		struct callback_head *cb = task_work_cancel_match(wq->task,
 216						io_task_worker_match, worker);
 217
 218		if (!cb)
 219			break;
 220		io_worker_cancel_cb(worker);
 221	}
 222
 223	io_worker_release(worker);
 224	wait_for_completion(&worker->ref_done);
 225
 226	raw_spin_lock(&wqe->lock);
 227	if (worker->flags & IO_WORKER_F_FREE)
 228		hlist_nulls_del_rcu(&worker->nulls_node);
 229	list_del_rcu(&worker->all_list);
 230	raw_spin_unlock(&wqe->lock);
 231	io_wqe_dec_running(worker);
 232	worker->flags = 0;
 233	preempt_disable();
 234	current->flags &= ~PF_IO_WORKER;
 235	preempt_enable();
 
 
 236
 237	kfree_rcu(worker, rcu);
 238	io_worker_ref_put(wqe->wq);
 239	do_exit(0);
 240}
 241
 242static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
 243{
 244	bool ret = false;
 
 
 245
 
 
 
 
 
 
 
 246	raw_spin_lock(&acct->lock);
 247	if (!wq_list_empty(&acct->work_list) &&
 248	    !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
 249		ret = true;
 250	raw_spin_unlock(&acct->lock);
 251
 252	return ret;
 
 253}
 254
 255/*
 256 * Check head of free list for an available worker. If one isn't available,
 257 * caller must create one.
 258 */
 259static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
 260					struct io_wqe_acct *acct)
 261	__must_hold(RCU)
 262{
 263	struct hlist_nulls_node *n;
 264	struct io_worker *worker;
 265
 266	/*
 267	 * Iterate free_list and see if we can find an idle worker to
 268	 * activate. If a given worker is on the free_list but in the process
 269	 * of exiting, keep trying.
 270	 */
 271	hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
 272		if (!io_worker_get(worker))
 273			continue;
 274		if (io_wqe_get_acct(worker) != acct) {
 275			io_worker_release(worker);
 276			continue;
 277		}
 278		if (wake_up_process(worker->task)) {
 279			io_worker_release(worker);
 280			return true;
 281		}
 
 
 282		io_worker_release(worker);
 
 283	}
 284
 285	return false;
 286}
 287
 288/*
 289 * We need a worker. If we find a free one, we're good. If not, and we're
 290 * below the max number of workers, create one.
 291 */
 292static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
 293{
 294	/*
 295	 * Most likely an attempt to queue unbounded work on an io_wq that
 296	 * wasn't setup with any unbounded workers.
 297	 */
 298	if (unlikely(!acct->max_workers))
 299		pr_warn_once("io-wq is not configured for unbound workers");
 300
 301	raw_spin_lock(&wqe->lock);
 302	if (acct->nr_workers >= acct->max_workers) {
 303		raw_spin_unlock(&wqe->lock);
 304		return true;
 305	}
 306	acct->nr_workers++;
 307	raw_spin_unlock(&wqe->lock);
 308	atomic_inc(&acct->nr_running);
 309	atomic_inc(&wqe->wq->worker_refs);
 310	return create_io_worker(wqe->wq, wqe, acct->index);
 311}
 312
 313static void io_wqe_inc_running(struct io_worker *worker)
 314{
 315	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 316
 317	atomic_inc(&acct->nr_running);
 318}
 319
 320static void create_worker_cb(struct callback_head *cb)
 321{
 322	struct io_worker *worker;
 323	struct io_wq *wq;
 324	struct io_wqe *wqe;
 325	struct io_wqe_acct *acct;
 326	bool do_create = false;
 327
 328	worker = container_of(cb, struct io_worker, create_work);
 329	wqe = worker->wqe;
 330	wq = wqe->wq;
 331	acct = &wqe->acct[worker->create_index];
 332	raw_spin_lock(&wqe->lock);
 333	if (acct->nr_workers < acct->max_workers) {
 334		acct->nr_workers++;
 335		do_create = true;
 336	}
 337	raw_spin_unlock(&wqe->lock);
 338	if (do_create) {
 339		create_io_worker(wq, wqe, worker->create_index);
 340	} else {
 341		atomic_dec(&acct->nr_running);
 342		io_worker_ref_put(wq);
 343	}
 344	clear_bit_unlock(0, &worker->create_state);
 345	io_worker_release(worker);
 346}
 347
 348static bool io_queue_worker_create(struct io_worker *worker,
 349				   struct io_wqe_acct *acct,
 350				   task_work_func_t func)
 351{
 352	struct io_wqe *wqe = worker->wqe;
 353	struct io_wq *wq = wqe->wq;
 354
 355	/* raced with exit, just ignore create call */
 356	if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
 357		goto fail;
 358	if (!io_worker_get(worker))
 359		goto fail;
 360	/*
 361	 * create_state manages ownership of create_work/index. We should
 362	 * only need one entry per worker, as the worker going to sleep
 363	 * will trigger the condition, and waking will clear it once it
 364	 * runs the task_work.
 365	 */
 366	if (test_bit(0, &worker->create_state) ||
 367	    test_and_set_bit_lock(0, &worker->create_state))
 368		goto fail_release;
 369
 370	atomic_inc(&wq->worker_refs);
 371	init_task_work(&worker->create_work, func);
 372	worker->create_index = acct->index;
 373	if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
 374		/*
 375		 * EXIT may have been set after checking it above, check after
 376		 * adding the task_work and remove any creation item if it is
 377		 * now set. wq exit does that too, but we can have added this
 378		 * work item after we canceled in io_wq_exit_workers().
 379		 */
 380		if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
 381			io_wq_cancel_tw_create(wq);
 382		io_worker_ref_put(wq);
 383		return true;
 384	}
 385	io_worker_ref_put(wq);
 386	clear_bit_unlock(0, &worker->create_state);
 387fail_release:
 388	io_worker_release(worker);
 389fail:
 390	atomic_dec(&acct->nr_running);
 391	io_worker_ref_put(wq);
 392	return false;
 393}
 394
 395static void io_wqe_dec_running(struct io_worker *worker)
 396{
 397	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 398	struct io_wqe *wqe = worker->wqe;
 399
 400	if (!(worker->flags & IO_WORKER_F_UP))
 401		return;
 402
 403	if (!atomic_dec_and_test(&acct->nr_running))
 404		return;
 405	if (!io_acct_run_queue(acct))
 406		return;
 407
 
 408	atomic_inc(&acct->nr_running);
 409	atomic_inc(&wqe->wq->worker_refs);
 410	io_queue_worker_create(worker, acct, create_worker_cb);
 411}
 412
 413/*
 414 * Worker will start processing some work. Move it to the busy list, if
 415 * it's currently on the freelist
 416 */
 417static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker)
 418{
 419	if (worker->flags & IO_WORKER_F_FREE) {
 420		worker->flags &= ~IO_WORKER_F_FREE;
 421		raw_spin_lock(&wqe->lock);
 422		hlist_nulls_del_init_rcu(&worker->nulls_node);
 423		raw_spin_unlock(&wqe->lock);
 424	}
 425}
 426
 427/*
 428 * No work, worker going to sleep. Move to freelist, and unuse mm if we
 429 * have one attached. Dropping the mm may potentially sleep, so we drop
 430 * the lock in that case and return success. Since the caller has to
 431 * retry the loop in that case (we changed task state), we don't regrab
 432 * the lock if we return success.
 433 */
 434static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
 435	__must_hold(wqe->lock)
 436{
 437	if (!(worker->flags & IO_WORKER_F_FREE)) {
 438		worker->flags |= IO_WORKER_F_FREE;
 439		hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
 440	}
 441}
 442
 443static inline unsigned int io_get_work_hash(struct io_wq_work *work)
 444{
 445	return work->flags >> IO_WQ_HASH_SHIFT;
 446}
 447
 448static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
 449{
 450	struct io_wq *wq = wqe->wq;
 451	bool ret = false;
 452
 453	spin_lock_irq(&wq->hash->wait.lock);
 454	if (list_empty(&wqe->wait.entry)) {
 455		__add_wait_queue(&wq->hash->wait, &wqe->wait);
 456		if (!test_bit(hash, &wq->hash->map)) {
 457			__set_current_state(TASK_RUNNING);
 458			list_del_init(&wqe->wait.entry);
 459			ret = true;
 460		}
 461	}
 462	spin_unlock_irq(&wq->hash->wait.lock);
 463	return ret;
 464}
 465
 466static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
 467					   struct io_worker *worker)
 468	__must_hold(acct->lock)
 469{
 470	struct io_wq_work_node *node, *prev;
 471	struct io_wq_work *work, *tail;
 472	unsigned int stall_hash = -1U;
 473	struct io_wqe *wqe = worker->wqe;
 474
 475	wq_list_for_each(node, prev, &acct->work_list) {
 476		unsigned int hash;
 477
 478		work = container_of(node, struct io_wq_work, list);
 479
 480		/* not hashed, can run anytime */
 481		if (!io_wq_is_hashed(work)) {
 482			wq_list_del(&acct->work_list, node, prev);
 483			return work;
 484		}
 485
 486		hash = io_get_work_hash(work);
 487		/* all items with this hash lie in [work, tail] */
 488		tail = wqe->hash_tail[hash];
 489
 490		/* hashed, can run if not already running */
 491		if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
 492			wqe->hash_tail[hash] = NULL;
 493			wq_list_cut(&acct->work_list, &tail->list, prev);
 494			return work;
 495		}
 496		if (stall_hash == -1U)
 497			stall_hash = hash;
 498		/* fast forward to a next hash, for-each will fix up @prev */
 499		node = &tail->list;
 500	}
 501
 502	if (stall_hash != -1U) {
 503		bool unstalled;
 504
 505		/*
 506		 * Set this before dropping the lock to avoid racing with new
 507		 * work being added and clearing the stalled bit.
 508		 */
 509		set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
 510		raw_spin_unlock(&acct->lock);
 511		unstalled = io_wait_on_hash(wqe, stall_hash);
 512		raw_spin_lock(&acct->lock);
 513		if (unstalled) {
 514			clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
 515			if (wq_has_sleeper(&wqe->wq->hash->wait))
 516				wake_up(&wqe->wq->hash->wait);
 517		}
 518	}
 519
 520	return NULL;
 521}
 522
 523static void io_assign_current_work(struct io_worker *worker,
 524				   struct io_wq_work *work)
 525{
 526	if (work) {
 527		io_run_task_work();
 528		cond_resched();
 529	}
 530
 531	raw_spin_lock(&worker->lock);
 532	worker->cur_work = work;
 533	worker->next_work = NULL;
 534	raw_spin_unlock(&worker->lock);
 535}
 536
 537static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
 538
 539static void io_worker_handle_work(struct io_worker *worker)
 
 
 
 540{
 541	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 542	struct io_wqe *wqe = worker->wqe;
 543	struct io_wq *wq = wqe->wq;
 544	bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
 545
 546	do {
 547		struct io_wq_work *work;
 548
 549		/*
 550		 * If we got some work, mark us as busy. If we didn't, but
 551		 * the list isn't empty, it means we stalled on hashed work.
 552		 * Mark us stalled so we don't keep looking for work when we
 553		 * can't make progress, any work completion or insertion will
 554		 * clear the stalled flag.
 555		 */
 556		raw_spin_lock(&acct->lock);
 557		work = io_get_next_work(acct, worker);
 558		raw_spin_unlock(&acct->lock);
 559		if (work) {
 560			__io_worker_busy(wqe, worker);
 561
 562			/*
 563			 * Make sure cancelation can find this, even before
 564			 * it becomes the active work. That avoids a window
 565			 * where the work has been removed from our general
 566			 * work list, but isn't yet discoverable as the
 567			 * current work item for this worker.
 568			 */
 569			raw_spin_lock(&worker->lock);
 570			worker->next_work = work;
 571			raw_spin_unlock(&worker->lock);
 572		} else {
 573			break;
 574		}
 575		io_assign_current_work(worker, work);
 576		__set_current_state(TASK_RUNNING);
 577
 578		/* handle a whole dependent link */
 579		do {
 580			struct io_wq_work *next_hashed, *linked;
 581			unsigned int hash = io_get_work_hash(work);
 582
 583			next_hashed = wq_next_work(work);
 584
 585			if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
 586				work->flags |= IO_WQ_WORK_CANCEL;
 587			wq->do_work(work);
 588			io_assign_current_work(worker, NULL);
 589
 590			linked = wq->free_work(work);
 591			work = next_hashed;
 592			if (!work && linked && !io_wq_is_hashed(linked)) {
 593				work = linked;
 594				linked = NULL;
 595			}
 596			io_assign_current_work(worker, work);
 597			if (linked)
 598				io_wqe_enqueue(wqe, linked);
 599
 600			if (hash != -1U && !next_hashed) {
 601				/* serialize hash clear with wake_up() */
 602				spin_lock_irq(&wq->hash->wait.lock);
 603				clear_bit(hash, &wq->hash->map);
 604				clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
 605				spin_unlock_irq(&wq->hash->wait.lock);
 606				if (wq_has_sleeper(&wq->hash->wait))
 607					wake_up(&wq->hash->wait);
 608			}
 609		} while (work);
 
 
 
 
 610	} while (1);
 611}
 612
 613static int io_wqe_worker(void *data)
 614{
 615	struct io_worker *worker = data;
 616	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 617	struct io_wqe *wqe = worker->wqe;
 618	struct io_wq *wq = wqe->wq;
 619	bool last_timeout = false;
 620	char buf[TASK_COMM_LEN];
 621
 622	worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
 623
 624	snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
 625	set_task_comm(current, buf);
 626
 627	while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
 628		long ret;
 629
 630		set_current_state(TASK_INTERRUPTIBLE);
 
 
 
 
 
 631		while (io_acct_run_queue(acct))
 632			io_worker_handle_work(worker);
 633
 634		raw_spin_lock(&wqe->lock);
 635		/* timed out, exit unless we're the last worker */
 636		if (last_timeout && acct->nr_workers > 1) {
 
 
 
 637			acct->nr_workers--;
 638			raw_spin_unlock(&wqe->lock);
 639			__set_current_state(TASK_RUNNING);
 640			break;
 641		}
 642		last_timeout = false;
 643		__io_worker_idle(wqe, worker);
 644		raw_spin_unlock(&wqe->lock);
 645		if (io_run_task_work())
 646			continue;
 647		ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
 648		if (signal_pending(current)) {
 649			struct ksignal ksig;
 650
 651			if (!get_signal(&ksig))
 652				continue;
 653			break;
 654		}
 655		last_timeout = !ret;
 
 
 
 
 656	}
 657
 658	if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
 659		io_worker_handle_work(worker);
 660
 661	io_worker_exit(worker);
 662	return 0;
 663}
 664
 665/*
 666 * Called when a worker is scheduled in. Mark us as currently running.
 667 */
 668void io_wq_worker_running(struct task_struct *tsk)
 669{
 670	struct io_worker *worker = tsk->worker_private;
 671
 672	if (!worker)
 673		return;
 674	if (!(worker->flags & IO_WORKER_F_UP))
 675		return;
 676	if (worker->flags & IO_WORKER_F_RUNNING)
 677		return;
 678	worker->flags |= IO_WORKER_F_RUNNING;
 679	io_wqe_inc_running(worker);
 680}
 681
 682/*
 683 * Called when worker is going to sleep. If there are no workers currently
 684 * running and we have work pending, wake up a free one or create a new one.
 685 */
 686void io_wq_worker_sleeping(struct task_struct *tsk)
 687{
 688	struct io_worker *worker = tsk->worker_private;
 689
 690	if (!worker)
 691		return;
 692	if (!(worker->flags & IO_WORKER_F_UP))
 693		return;
 694	if (!(worker->flags & IO_WORKER_F_RUNNING))
 695		return;
 696
 697	worker->flags &= ~IO_WORKER_F_RUNNING;
 698	io_wqe_dec_running(worker);
 699}
 700
 701static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
 702			       struct task_struct *tsk)
 703{
 704	tsk->worker_private = worker;
 705	worker->task = tsk;
 706	set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
 707	tsk->flags |= PF_NO_SETAFFINITY;
 708
 709	raw_spin_lock(&wqe->lock);
 710	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
 711	list_add_tail_rcu(&worker->all_list, &wqe->all_list);
 712	worker->flags |= IO_WORKER_F_FREE;
 713	raw_spin_unlock(&wqe->lock);
 714	wake_up_new_task(tsk);
 715}
 716
 717static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
 718{
 719	return true;
 720}
 721
 722static inline bool io_should_retry_thread(long err)
 723{
 724	/*
 725	 * Prevent perpetual task_work retry, if the task (or its group) is
 726	 * exiting.
 727	 */
 728	if (fatal_signal_pending(current))
 729		return false;
 730
 731	switch (err) {
 732	case -EAGAIN:
 733	case -ERESTARTSYS:
 734	case -ERESTARTNOINTR:
 735	case -ERESTARTNOHAND:
 736		return true;
 737	default:
 738		return false;
 739	}
 740}
 741
 742static void create_worker_cont(struct callback_head *cb)
 743{
 744	struct io_worker *worker;
 745	struct task_struct *tsk;
 746	struct io_wqe *wqe;
 747
 748	worker = container_of(cb, struct io_worker, create_work);
 749	clear_bit_unlock(0, &worker->create_state);
 750	wqe = worker->wqe;
 751	tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
 752	if (!IS_ERR(tsk)) {
 753		io_init_new_worker(wqe, worker, tsk);
 754		io_worker_release(worker);
 755		return;
 756	} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
 757		struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 758
 759		atomic_dec(&acct->nr_running);
 760		raw_spin_lock(&wqe->lock);
 761		acct->nr_workers--;
 762		if (!acct->nr_workers) {
 763			struct io_cb_cancel_data match = {
 764				.fn		= io_wq_work_match_all,
 765				.cancel_all	= true,
 766			};
 767
 768			raw_spin_unlock(&wqe->lock);
 769			while (io_acct_cancel_pending_work(wqe, acct, &match))
 770				;
 771		} else {
 772			raw_spin_unlock(&wqe->lock);
 773		}
 774		io_worker_ref_put(wqe->wq);
 775		kfree(worker);
 776		return;
 777	}
 778
 779	/* re-create attempts grab a new worker ref, drop the existing one */
 780	io_worker_release(worker);
 781	schedule_work(&worker->work);
 782}
 783
 784static void io_workqueue_create(struct work_struct *work)
 785{
 786	struct io_worker *worker = container_of(work, struct io_worker, work);
 787	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 788
 789	if (!io_queue_worker_create(worker, acct, create_worker_cont))
 790		kfree(worker);
 791}
 792
 793static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
 794{
 795	struct io_wqe_acct *acct = &wqe->acct[index];
 796	struct io_worker *worker;
 797	struct task_struct *tsk;
 798
 799	__set_current_state(TASK_RUNNING);
 800
 801	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
 802	if (!worker) {
 803fail:
 804		atomic_dec(&acct->nr_running);
 805		raw_spin_lock(&wqe->lock);
 806		acct->nr_workers--;
 807		raw_spin_unlock(&wqe->lock);
 808		io_worker_ref_put(wq);
 809		return false;
 810	}
 811
 812	refcount_set(&worker->ref, 1);
 813	worker->wqe = wqe;
 814	raw_spin_lock_init(&worker->lock);
 815	init_completion(&worker->ref_done);
 816
 817	if (index == IO_WQ_ACCT_BOUND)
 818		worker->flags |= IO_WORKER_F_BOUND;
 819
 820	tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
 821	if (!IS_ERR(tsk)) {
 822		io_init_new_worker(wqe, worker, tsk);
 823	} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
 824		kfree(worker);
 825		goto fail;
 826	} else {
 827		INIT_WORK(&worker->work, io_workqueue_create);
 828		schedule_work(&worker->work);
 829	}
 830
 831	return true;
 832}
 833
 834/*
 835 * Iterate the passed in list and call the specific function for each
 836 * worker that isn't exiting
 837 */
 838static bool io_wq_for_each_worker(struct io_wqe *wqe,
 839				  bool (*func)(struct io_worker *, void *),
 840				  void *data)
 841{
 842	struct io_worker *worker;
 843	bool ret = false;
 844
 845	list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
 846		if (io_worker_get(worker)) {
 847			/* no task if node is/was offline */
 848			if (worker->task)
 849				ret = func(worker, data);
 850			io_worker_release(worker);
 851			if (ret)
 852				break;
 853		}
 854	}
 855
 856	return ret;
 857}
 858
 859static bool io_wq_worker_wake(struct io_worker *worker, void *data)
 860{
 861	__set_notify_signal(worker->task);
 862	wake_up_process(worker->task);
 863	return false;
 864}
 865
 866static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
 867{
 868	struct io_wq *wq = wqe->wq;
 869
 870	do {
 871		work->flags |= IO_WQ_WORK_CANCEL;
 872		wq->do_work(work);
 873		work = wq->free_work(work);
 874	} while (work);
 875}
 876
 877static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
 878{
 879	struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
 880	unsigned int hash;
 881	struct io_wq_work *tail;
 882
 883	if (!io_wq_is_hashed(work)) {
 884append:
 885		wq_list_add_tail(&work->list, &acct->work_list);
 886		return;
 887	}
 888
 889	hash = io_get_work_hash(work);
 890	tail = wqe->hash_tail[hash];
 891	wqe->hash_tail[hash] = work;
 892	if (!tail)
 893		goto append;
 894
 895	wq_list_add_after(&work->list, &tail->list, &acct->work_list);
 896}
 897
 898static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
 899{
 900	return work == data;
 901}
 902
 903static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
 904{
 905	struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
 906	struct io_cb_cancel_data match;
 907	unsigned work_flags = work->flags;
 908	bool do_create;
 909
 910	/*
 911	 * If io-wq is exiting for this task, or if the request has explicitly
 912	 * been marked as one that should not get executed, cancel it here.
 913	 */
 914	if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
 915	    (work->flags & IO_WQ_WORK_CANCEL)) {
 916		io_run_cancel(work, wqe);
 917		return;
 918	}
 919
 920	raw_spin_lock(&acct->lock);
 921	io_wqe_insert_work(wqe, work);
 922	clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
 923	raw_spin_unlock(&acct->lock);
 924
 925	raw_spin_lock(&wqe->lock);
 926	rcu_read_lock();
 927	do_create = !io_wqe_activate_free_worker(wqe, acct);
 928	rcu_read_unlock();
 929
 930	raw_spin_unlock(&wqe->lock);
 931
 932	if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
 933	    !atomic_read(&acct->nr_running))) {
 934		bool did_create;
 935
 936		did_create = io_wqe_create_worker(wqe, acct);
 937		if (likely(did_create))
 938			return;
 939
 940		raw_spin_lock(&wqe->lock);
 941		if (acct->nr_workers) {
 942			raw_spin_unlock(&wqe->lock);
 943			return;
 944		}
 945		raw_spin_unlock(&wqe->lock);
 946
 947		/* fatal condition, failed to create the first worker */
 948		match.fn		= io_wq_work_match_item,
 949		match.data		= work,
 950		match.cancel_all	= false,
 951
 952		io_acct_cancel_pending_work(wqe, acct, &match);
 953	}
 954}
 955
 956void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
 957{
 958	struct io_wqe *wqe = wq->wqes[numa_node_id()];
 959
 960	io_wqe_enqueue(wqe, work);
 961}
 962
 963/*
 964 * Work items that hash to the same value will not be done in parallel.
 965 * Used to limit concurrent writes, generally hashed by inode.
 966 */
 967void io_wq_hash_work(struct io_wq_work *work, void *val)
 968{
 969	unsigned int bit;
 970
 971	bit = hash_ptr(val, IO_WQ_HASH_ORDER);
 972	work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
 973}
 974
 975static bool __io_wq_worker_cancel(struct io_worker *worker,
 976				  struct io_cb_cancel_data *match,
 977				  struct io_wq_work *work)
 978{
 979	if (work && match->fn(work, match->data)) {
 980		work->flags |= IO_WQ_WORK_CANCEL;
 981		__set_notify_signal(worker->task);
 982		return true;
 983	}
 984
 985	return false;
 986}
 987
 988static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
 989{
 990	struct io_cb_cancel_data *match = data;
 991
 992	/*
 993	 * Hold the lock to avoid ->cur_work going out of scope, caller
 994	 * may dereference the passed in work.
 995	 */
 996	raw_spin_lock(&worker->lock);
 997	if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
 998	    __io_wq_worker_cancel(worker, match, worker->next_work))
 999		match->nr_running++;
1000	raw_spin_unlock(&worker->lock);
1001
1002	return match->nr_running && !match->cancel_all;
1003}
1004
1005static inline void io_wqe_remove_pending(struct io_wqe *wqe,
1006					 struct io_wq_work *work,
1007					 struct io_wq_work_node *prev)
1008{
1009	struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
1010	unsigned int hash = io_get_work_hash(work);
1011	struct io_wq_work *prev_work = NULL;
1012
1013	if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
1014		if (prev)
1015			prev_work = container_of(prev, struct io_wq_work, list);
1016		if (prev_work && io_get_work_hash(prev_work) == hash)
1017			wqe->hash_tail[hash] = prev_work;
1018		else
1019			wqe->hash_tail[hash] = NULL;
1020	}
1021	wq_list_del(&acct->work_list, &work->list, prev);
1022}
1023
1024static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
1025					struct io_wqe_acct *acct,
1026					struct io_cb_cancel_data *match)
1027{
1028	struct io_wq_work_node *node, *prev;
1029	struct io_wq_work *work;
1030
1031	raw_spin_lock(&acct->lock);
1032	wq_list_for_each(node, prev, &acct->work_list) {
1033		work = container_of(node, struct io_wq_work, list);
1034		if (!match->fn(work, match->data))
1035			continue;
1036		io_wqe_remove_pending(wqe, work, prev);
1037		raw_spin_unlock(&acct->lock);
1038		io_run_cancel(work, wqe);
1039		match->nr_pending++;
1040		/* not safe to continue after unlock */
1041		return true;
1042	}
1043	raw_spin_unlock(&acct->lock);
1044
1045	return false;
1046}
1047
1048static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
1049				       struct io_cb_cancel_data *match)
1050{
1051	int i;
1052retry:
1053	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1054		struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
1055
1056		if (io_acct_cancel_pending_work(wqe, acct, match)) {
1057			if (match->cancel_all)
1058				goto retry;
1059			break;
1060		}
1061	}
1062}
1063
1064static void io_wqe_cancel_running_work(struct io_wqe *wqe,
1065				       struct io_cb_cancel_data *match)
1066{
1067	rcu_read_lock();
1068	io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
1069	rcu_read_unlock();
1070}
1071
1072enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1073				  void *data, bool cancel_all)
1074{
1075	struct io_cb_cancel_data match = {
1076		.fn		= cancel,
1077		.data		= data,
1078		.cancel_all	= cancel_all,
1079	};
1080	int node;
1081
1082	/*
1083	 * First check pending list, if we're lucky we can just remove it
1084	 * from there. CANCEL_OK means that the work is returned as-new,
1085	 * no completion will be posted for it.
1086	 *
1087	 * Then check if a free (going busy) or busy worker has the work
1088	 * currently running. If we find it there, we'll return CANCEL_RUNNING
1089	 * as an indication that we attempt to signal cancellation. The
1090	 * completion will run normally in this case.
1091	 *
1092	 * Do both of these while holding the wqe->lock, to ensure that
1093	 * we'll find a work item regardless of state.
1094	 */
1095	for_each_node(node) {
1096		struct io_wqe *wqe = wq->wqes[node];
 
1097
1098		io_wqe_cancel_pending_work(wqe, &match);
1099		if (match.nr_pending && !match.cancel_all)
1100			return IO_WQ_CANCEL_OK;
1101
1102		raw_spin_lock(&wqe->lock);
1103		io_wqe_cancel_running_work(wqe, &match);
1104		raw_spin_unlock(&wqe->lock);
1105		if (match.nr_running && !match.cancel_all)
1106			return IO_WQ_CANCEL_RUNNING;
1107	}
1108
1109	if (match.nr_running)
1110		return IO_WQ_CANCEL_RUNNING;
1111	if (match.nr_pending)
1112		return IO_WQ_CANCEL_OK;
1113	return IO_WQ_CANCEL_NOTFOUND;
1114}
1115
1116static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1117			    int sync, void *key)
1118{
1119	struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
1120	int i;
1121
1122	list_del_init(&wait->entry);
1123
1124	rcu_read_lock();
1125	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1126		struct io_wqe_acct *acct = &wqe->acct[i];
1127
1128		if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1129			io_wqe_activate_free_worker(wqe, acct);
1130	}
1131	rcu_read_unlock();
1132	return 1;
1133}
1134
1135struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1136{
1137	int ret, node, i;
1138	struct io_wq *wq;
1139
1140	if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1141		return ERR_PTR(-EINVAL);
1142	if (WARN_ON_ONCE(!bounded))
1143		return ERR_PTR(-EINVAL);
1144
1145	wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
1146	if (!wq)
1147		return ERR_PTR(-ENOMEM);
1148	ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1149	if (ret)
1150		goto err_wq;
1151
1152	refcount_inc(&data->hash->refs);
1153	wq->hash = data->hash;
1154	wq->free_work = data->free_work;
1155	wq->do_work = data->do_work;
1156
1157	ret = -ENOMEM;
1158	for_each_node(node) {
1159		struct io_wqe *wqe;
1160		int alloc_node = node;
1161
1162		if (!node_online(alloc_node))
1163			alloc_node = NUMA_NO_NODE;
1164		wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1165		if (!wqe)
1166			goto err;
1167		wq->wqes[node] = wqe;
1168		if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
1169			goto err;
1170		cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
1171		wqe->node = alloc_node;
1172		wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1173		wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1174					task_rlimit(current, RLIMIT_NPROC);
1175		INIT_LIST_HEAD(&wqe->wait.entry);
1176		wqe->wait.func = io_wqe_hash_wake;
1177		for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1178			struct io_wqe_acct *acct = &wqe->acct[i];
1179
1180			acct->index = i;
1181			atomic_set(&acct->nr_running, 0);
1182			INIT_WQ_LIST(&acct->work_list);
1183			raw_spin_lock_init(&acct->lock);
1184		}
1185		wqe->wq = wq;
1186		raw_spin_lock_init(&wqe->lock);
1187		INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1188		INIT_LIST_HEAD(&wqe->all_list);
1189	}
1190
 
 
 
 
1191	wq->task = get_task_struct(data->task);
1192	atomic_set(&wq->worker_refs, 1);
1193	init_completion(&wq->worker_done);
 
 
 
 
1194	return wq;
1195err:
1196	io_wq_put_hash(data->hash);
1197	cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1198	for_each_node(node) {
1199		if (!wq->wqes[node])
1200			continue;
1201		free_cpumask_var(wq->wqes[node]->cpu_mask);
1202		kfree(wq->wqes[node]);
1203	}
1204err_wq:
1205	kfree(wq);
1206	return ERR_PTR(ret);
1207}
1208
1209static bool io_task_work_match(struct callback_head *cb, void *data)
1210{
1211	struct io_worker *worker;
1212
1213	if (cb->func != create_worker_cb && cb->func != create_worker_cont)
1214		return false;
1215	worker = container_of(cb, struct io_worker, create_work);
1216	return worker->wqe->wq == data;
1217}
1218
1219void io_wq_exit_start(struct io_wq *wq)
1220{
1221	set_bit(IO_WQ_BIT_EXIT, &wq->state);
1222}
1223
1224static void io_wq_cancel_tw_create(struct io_wq *wq)
1225{
1226	struct callback_head *cb;
1227
1228	while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
1229		struct io_worker *worker;
1230
1231		worker = container_of(cb, struct io_worker, create_work);
1232		io_worker_cancel_cb(worker);
1233		/*
1234		 * Only the worker continuation helper has worker allocated and
1235		 * hence needs freeing.
1236		 */
1237		if (cb->func == create_worker_cont)
1238			kfree(worker);
1239	}
1240}
1241
1242static void io_wq_exit_workers(struct io_wq *wq)
1243{
1244	int node;
1245
1246	if (!wq->task)
1247		return;
1248
1249	io_wq_cancel_tw_create(wq);
1250
1251	rcu_read_lock();
1252	for_each_node(node) {
1253		struct io_wqe *wqe = wq->wqes[node];
1254
1255		io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
1256	}
1257	rcu_read_unlock();
1258	io_worker_ref_put(wq);
1259	wait_for_completion(&wq->worker_done);
1260
1261	for_each_node(node) {
1262		spin_lock_irq(&wq->hash->wait.lock);
1263		list_del_init(&wq->wqes[node]->wait.entry);
1264		spin_unlock_irq(&wq->hash->wait.lock);
1265	}
1266	put_task_struct(wq->task);
1267	wq->task = NULL;
1268}
1269
1270static void io_wq_destroy(struct io_wq *wq)
1271{
1272	int node;
 
 
 
1273
1274	cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1275
1276	for_each_node(node) {
1277		struct io_wqe *wqe = wq->wqes[node];
1278		struct io_cb_cancel_data match = {
1279			.fn		= io_wq_work_match_all,
1280			.cancel_all	= true,
1281		};
1282		io_wqe_cancel_pending_work(wqe, &match);
1283		free_cpumask_var(wqe->cpu_mask);
1284		kfree(wqe);
1285	}
1286	io_wq_put_hash(wq->hash);
1287	kfree(wq);
1288}
1289
1290void io_wq_put_and_exit(struct io_wq *wq)
1291{
1292	WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1293
1294	io_wq_exit_workers(wq);
1295	io_wq_destroy(wq);
1296}
1297
1298struct online_data {
1299	unsigned int cpu;
1300	bool online;
1301};
1302
1303static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1304{
1305	struct online_data *od = data;
1306
1307	if (od->online)
1308		cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
1309	else
1310		cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
1311	return false;
1312}
1313
1314static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
1315{
1316	struct online_data od = {
1317		.cpu = cpu,
1318		.online = online
1319	};
1320	int i;
1321
1322	rcu_read_lock();
1323	for_each_node(i)
1324		io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
1325	rcu_read_unlock();
1326	return 0;
1327}
1328
1329static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1330{
1331	struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1332
1333	return __io_wq_cpu_online(wq, cpu, true);
1334}
1335
1336static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1337{
1338	struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1339
1340	return __io_wq_cpu_online(wq, cpu, false);
1341}
1342
1343int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
1344{
1345	int i;
 
1346
1347	rcu_read_lock();
1348	for_each_node(i) {
1349		struct io_wqe *wqe = wq->wqes[i];
1350
1351		if (mask)
1352			cpumask_copy(wqe->cpu_mask, mask);
1353		else
1354			cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
1355	}
1356	rcu_read_unlock();
 
1357	return 0;
1358}
1359
1360/*
1361 * Set max number of unbounded workers, returns old value. If new_count is 0,
1362 * then just return the old value.
1363 */
1364int io_wq_max_workers(struct io_wq *wq, int *new_count)
1365{
 
1366	int prev[IO_WQ_ACCT_NR];
1367	bool first_node = true;
1368	int i, node;
1369
1370	BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
1371	BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1372	BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
1373
1374	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1375		if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1376			new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1377	}
1378
1379	for (i = 0; i < IO_WQ_ACCT_NR; i++)
1380		prev[i] = 0;
1381
1382	rcu_read_lock();
1383	for_each_node(node) {
1384		struct io_wqe *wqe = wq->wqes[node];
1385		struct io_wqe_acct *acct;
1386
1387		raw_spin_lock(&wqe->lock);
1388		for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1389			acct = &wqe->acct[i];
1390			if (first_node)
1391				prev[i] = max_t(int, acct->max_workers, prev[i]);
1392			if (new_count[i])
1393				acct->max_workers = new_count[i];
1394		}
1395		raw_spin_unlock(&wqe->lock);
1396		first_node = false;
1397	}
 
1398	rcu_read_unlock();
1399
1400	for (i = 0; i < IO_WQ_ACCT_NR; i++)
1401		new_count[i] = prev[i];
1402
1403	return 0;
1404}
1405
1406static __init int io_wq_init(void)
1407{
1408	int ret;
1409
1410	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1411					io_wq_cpu_online, io_wq_cpu_offline);
1412	if (ret < 0)
1413		return ret;
1414	io_wq_online = ret;
1415	return 0;
1416}
1417subsys_initcall(io_wq_init);
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Basic worker thread pool for io_uring
   4 *
   5 * Copyright (C) 2019 Jens Axboe
   6 *
   7 */
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/errno.h>
  11#include <linux/sched/signal.h>
  12#include <linux/percpu.h>
  13#include <linux/slab.h>
  14#include <linux/rculist_nulls.h>
  15#include <linux/cpu.h>
  16#include <linux/task_work.h>
  17#include <linux/audit.h>
  18#include <linux/mmu_context.h>
  19#include <uapi/linux/io_uring.h>
  20
  21#include "io-wq.h"
  22#include "slist.h"
  23#include "io_uring.h"
  24
  25#define WORKER_IDLE_TIMEOUT	(5 * HZ)
  26
  27enum {
  28	IO_WORKER_F_UP		= 1,	/* up and active */
  29	IO_WORKER_F_RUNNING	= 2,	/* account as running */
  30	IO_WORKER_F_FREE	= 4,	/* worker on free list */
  31	IO_WORKER_F_BOUND	= 8,	/* is doing bounded work */
  32};
  33
  34enum {
  35	IO_WQ_BIT_EXIT		= 0,	/* wq exiting */
  36};
  37
  38enum {
  39	IO_ACCT_STALLED_BIT	= 0,	/* stalled on hash */
  40};
  41
  42/*
  43 * One for each thread in a wq pool
  44 */
  45struct io_worker {
  46	refcount_t ref;
  47	unsigned flags;
  48	struct hlist_nulls_node nulls_node;
  49	struct list_head all_list;
  50	struct task_struct *task;
  51	struct io_wq *wq;
  52
  53	struct io_wq_work *cur_work;
  54	struct io_wq_work *next_work;
  55	raw_spinlock_t lock;
  56
  57	struct completion ref_done;
  58
  59	unsigned long create_state;
  60	struct callback_head create_work;
  61	int create_index;
  62
  63	union {
  64		struct rcu_head rcu;
  65		struct work_struct work;
  66	};
  67};
  68
  69#if BITS_PER_LONG == 64
  70#define IO_WQ_HASH_ORDER	6
  71#else
  72#define IO_WQ_HASH_ORDER	5
  73#endif
  74
  75#define IO_WQ_NR_HASH_BUCKETS	(1u << IO_WQ_HASH_ORDER)
  76
  77struct io_wq_acct {
  78	unsigned nr_workers;
  79	unsigned max_workers;
  80	int index;
  81	atomic_t nr_running;
  82	raw_spinlock_t lock;
  83	struct io_wq_work_list work_list;
  84	unsigned long flags;
  85};
  86
  87enum {
  88	IO_WQ_ACCT_BOUND,
  89	IO_WQ_ACCT_UNBOUND,
  90	IO_WQ_ACCT_NR,
  91};
  92
  93/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94 * Per io_wq state
  95  */
  96struct io_wq {
  97	unsigned long state;
  98
  99	free_work_fn *free_work;
 100	io_wq_work_fn *do_work;
 101
 102	struct io_wq_hash *hash;
 103
 104	atomic_t worker_refs;
 105	struct completion worker_done;
 106
 107	struct hlist_node cpuhp_node;
 108
 109	struct task_struct *task;
 110
 111	struct io_wq_acct acct[IO_WQ_ACCT_NR];
 112
 113	/* lock protects access to elements below */
 114	raw_spinlock_t lock;
 115
 116	struct hlist_nulls_head free_list;
 117	struct list_head all_list;
 118
 119	struct wait_queue_entry wait;
 120
 121	struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
 122
 123	cpumask_var_t cpu_mask;
 124};
 125
 126static enum cpuhp_state io_wq_online;
 127
 128struct io_cb_cancel_data {
 129	work_cancel_fn *fn;
 130	void *data;
 131	int nr_running;
 132	int nr_pending;
 133	bool cancel_all;
 134};
 135
 136static bool create_io_worker(struct io_wq *wq, int index);
 137static void io_wq_dec_running(struct io_worker *worker);
 138static bool io_acct_cancel_pending_work(struct io_wq *wq,
 139					struct io_wq_acct *acct,
 140					struct io_cb_cancel_data *match);
 141static void create_worker_cb(struct callback_head *cb);
 142static void io_wq_cancel_tw_create(struct io_wq *wq);
 143
 144static bool io_worker_get(struct io_worker *worker)
 145{
 146	return refcount_inc_not_zero(&worker->ref);
 147}
 148
 149static void io_worker_release(struct io_worker *worker)
 150{
 151	if (refcount_dec_and_test(&worker->ref))
 152		complete(&worker->ref_done);
 153}
 154
 155static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
 156{
 157	return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
 158}
 159
 160static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
 161						  struct io_wq_work *work)
 162{
 163	return io_get_acct(wq, !(work->flags & IO_WQ_WORK_UNBOUND));
 164}
 165
 166static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
 167{
 168	return io_get_acct(worker->wq, worker->flags & IO_WORKER_F_BOUND);
 169}
 170
 171static void io_worker_ref_put(struct io_wq *wq)
 172{
 173	if (atomic_dec_and_test(&wq->worker_refs))
 174		complete(&wq->worker_done);
 175}
 176
 177bool io_wq_worker_stopped(void)
 178{
 179	struct io_worker *worker = current->worker_private;
 180
 181	if (WARN_ON_ONCE(!io_wq_current_is_worker()))
 182		return true;
 183
 184	return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
 185}
 186
 187static void io_worker_cancel_cb(struct io_worker *worker)
 188{
 189	struct io_wq_acct *acct = io_wq_get_acct(worker);
 190	struct io_wq *wq = worker->wq;
 
 191
 192	atomic_dec(&acct->nr_running);
 193	raw_spin_lock(&wq->lock);
 194	acct->nr_workers--;
 195	raw_spin_unlock(&wq->lock);
 196	io_worker_ref_put(wq);
 197	clear_bit_unlock(0, &worker->create_state);
 198	io_worker_release(worker);
 199}
 200
 201static bool io_task_worker_match(struct callback_head *cb, void *data)
 202{
 203	struct io_worker *worker;
 204
 205	if (cb->func != create_worker_cb)
 206		return false;
 207	worker = container_of(cb, struct io_worker, create_work);
 208	return worker == data;
 209}
 210
 211static void io_worker_exit(struct io_worker *worker)
 212{
 213	struct io_wq *wq = worker->wq;
 
 214
 215	while (1) {
 216		struct callback_head *cb = task_work_cancel_match(wq->task,
 217						io_task_worker_match, worker);
 218
 219		if (!cb)
 220			break;
 221		io_worker_cancel_cb(worker);
 222	}
 223
 224	io_worker_release(worker);
 225	wait_for_completion(&worker->ref_done);
 226
 227	raw_spin_lock(&wq->lock);
 228	if (worker->flags & IO_WORKER_F_FREE)
 229		hlist_nulls_del_rcu(&worker->nulls_node);
 230	list_del_rcu(&worker->all_list);
 231	raw_spin_unlock(&wq->lock);
 232	io_wq_dec_running(worker);
 233	/*
 234	 * this worker is a goner, clear ->worker_private to avoid any
 235	 * inc/dec running calls that could happen as part of exit from
 236	 * touching 'worker'.
 237	 */
 238	current->worker_private = NULL;
 239
 240	kfree_rcu(worker, rcu);
 241	io_worker_ref_put(wq);
 242	do_exit(0);
 243}
 244
 245static inline bool __io_acct_run_queue(struct io_wq_acct *acct)
 246{
 247	return !test_bit(IO_ACCT_STALLED_BIT, &acct->flags) &&
 248		!wq_list_empty(&acct->work_list);
 249}
 250
 251/*
 252 * If there's work to do, returns true with acct->lock acquired. If not,
 253 * returns false with no lock held.
 254 */
 255static inline bool io_acct_run_queue(struct io_wq_acct *acct)
 256	__acquires(&acct->lock)
 257{
 258	raw_spin_lock(&acct->lock);
 259	if (__io_acct_run_queue(acct))
 260		return true;
 
 
 261
 262	raw_spin_unlock(&acct->lock);
 263	return false;
 264}
 265
 266/*
 267 * Check head of free list for an available worker. If one isn't available,
 268 * caller must create one.
 269 */
 270static bool io_wq_activate_free_worker(struct io_wq *wq,
 271					struct io_wq_acct *acct)
 272	__must_hold(RCU)
 273{
 274	struct hlist_nulls_node *n;
 275	struct io_worker *worker;
 276
 277	/*
 278	 * Iterate free_list and see if we can find an idle worker to
 279	 * activate. If a given worker is on the free_list but in the process
 280	 * of exiting, keep trying.
 281	 */
 282	hlist_nulls_for_each_entry_rcu(worker, n, &wq->free_list, nulls_node) {
 283		if (!io_worker_get(worker))
 284			continue;
 285		if (io_wq_get_acct(worker) != acct) {
 286			io_worker_release(worker);
 287			continue;
 288		}
 289		/*
 290		 * If the worker is already running, it's either already
 291		 * starting work or finishing work. In either case, if it does
 292		 * to go sleep, we'll kick off a new task for this work anyway.
 293		 */
 294		wake_up_process(worker->task);
 295		io_worker_release(worker);
 296		return true;
 297	}
 298
 299	return false;
 300}
 301
 302/*
 303 * We need a worker. If we find a free one, we're good. If not, and we're
 304 * below the max number of workers, create one.
 305 */
 306static bool io_wq_create_worker(struct io_wq *wq, struct io_wq_acct *acct)
 307{
 308	/*
 309	 * Most likely an attempt to queue unbounded work on an io_wq that
 310	 * wasn't setup with any unbounded workers.
 311	 */
 312	if (unlikely(!acct->max_workers))
 313		pr_warn_once("io-wq is not configured for unbound workers");
 314
 315	raw_spin_lock(&wq->lock);
 316	if (acct->nr_workers >= acct->max_workers) {
 317		raw_spin_unlock(&wq->lock);
 318		return true;
 319	}
 320	acct->nr_workers++;
 321	raw_spin_unlock(&wq->lock);
 322	atomic_inc(&acct->nr_running);
 323	atomic_inc(&wq->worker_refs);
 324	return create_io_worker(wq, acct->index);
 325}
 326
 327static void io_wq_inc_running(struct io_worker *worker)
 328{
 329	struct io_wq_acct *acct = io_wq_get_acct(worker);
 330
 331	atomic_inc(&acct->nr_running);
 332}
 333
 334static void create_worker_cb(struct callback_head *cb)
 335{
 336	struct io_worker *worker;
 337	struct io_wq *wq;
 338
 339	struct io_wq_acct *acct;
 340	bool do_create = false;
 341
 342	worker = container_of(cb, struct io_worker, create_work);
 343	wq = worker->wq;
 344	acct = &wq->acct[worker->create_index];
 345	raw_spin_lock(&wq->lock);
 346
 347	if (acct->nr_workers < acct->max_workers) {
 348		acct->nr_workers++;
 349		do_create = true;
 350	}
 351	raw_spin_unlock(&wq->lock);
 352	if (do_create) {
 353		create_io_worker(wq, worker->create_index);
 354	} else {
 355		atomic_dec(&acct->nr_running);
 356		io_worker_ref_put(wq);
 357	}
 358	clear_bit_unlock(0, &worker->create_state);
 359	io_worker_release(worker);
 360}
 361
 362static bool io_queue_worker_create(struct io_worker *worker,
 363				   struct io_wq_acct *acct,
 364				   task_work_func_t func)
 365{
 366	struct io_wq *wq = worker->wq;
 
 367
 368	/* raced with exit, just ignore create call */
 369	if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
 370		goto fail;
 371	if (!io_worker_get(worker))
 372		goto fail;
 373	/*
 374	 * create_state manages ownership of create_work/index. We should
 375	 * only need one entry per worker, as the worker going to sleep
 376	 * will trigger the condition, and waking will clear it once it
 377	 * runs the task_work.
 378	 */
 379	if (test_bit(0, &worker->create_state) ||
 380	    test_and_set_bit_lock(0, &worker->create_state))
 381		goto fail_release;
 382
 383	atomic_inc(&wq->worker_refs);
 384	init_task_work(&worker->create_work, func);
 385	worker->create_index = acct->index;
 386	if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
 387		/*
 388		 * EXIT may have been set after checking it above, check after
 389		 * adding the task_work and remove any creation item if it is
 390		 * now set. wq exit does that too, but we can have added this
 391		 * work item after we canceled in io_wq_exit_workers().
 392		 */
 393		if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
 394			io_wq_cancel_tw_create(wq);
 395		io_worker_ref_put(wq);
 396		return true;
 397	}
 398	io_worker_ref_put(wq);
 399	clear_bit_unlock(0, &worker->create_state);
 400fail_release:
 401	io_worker_release(worker);
 402fail:
 403	atomic_dec(&acct->nr_running);
 404	io_worker_ref_put(wq);
 405	return false;
 406}
 407
 408static void io_wq_dec_running(struct io_worker *worker)
 409{
 410	struct io_wq_acct *acct = io_wq_get_acct(worker);
 411	struct io_wq *wq = worker->wq;
 412
 413	if (!(worker->flags & IO_WORKER_F_UP))
 414		return;
 415
 416	if (!atomic_dec_and_test(&acct->nr_running))
 417		return;
 418	if (!io_acct_run_queue(acct))
 419		return;
 420
 421	raw_spin_unlock(&acct->lock);
 422	atomic_inc(&acct->nr_running);
 423	atomic_inc(&wq->worker_refs);
 424	io_queue_worker_create(worker, acct, create_worker_cb);
 425}
 426
 427/*
 428 * Worker will start processing some work. Move it to the busy list, if
 429 * it's currently on the freelist
 430 */
 431static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
 432{
 433	if (worker->flags & IO_WORKER_F_FREE) {
 434		worker->flags &= ~IO_WORKER_F_FREE;
 435		raw_spin_lock(&wq->lock);
 436		hlist_nulls_del_init_rcu(&worker->nulls_node);
 437		raw_spin_unlock(&wq->lock);
 438	}
 439}
 440
 441/*
 442 * No work, worker going to sleep. Move to freelist.
 
 
 
 
 443 */
 444static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
 445	__must_hold(wq->lock)
 446{
 447	if (!(worker->flags & IO_WORKER_F_FREE)) {
 448		worker->flags |= IO_WORKER_F_FREE;
 449		hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
 450	}
 451}
 452
 453static inline unsigned int io_get_work_hash(struct io_wq_work *work)
 454{
 455	return work->flags >> IO_WQ_HASH_SHIFT;
 456}
 457
 458static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash)
 459{
 
 460	bool ret = false;
 461
 462	spin_lock_irq(&wq->hash->wait.lock);
 463	if (list_empty(&wq->wait.entry)) {
 464		__add_wait_queue(&wq->hash->wait, &wq->wait);
 465		if (!test_bit(hash, &wq->hash->map)) {
 466			__set_current_state(TASK_RUNNING);
 467			list_del_init(&wq->wait.entry);
 468			ret = true;
 469		}
 470	}
 471	spin_unlock_irq(&wq->hash->wait.lock);
 472	return ret;
 473}
 474
 475static struct io_wq_work *io_get_next_work(struct io_wq_acct *acct,
 476					   struct io_worker *worker)
 477	__must_hold(acct->lock)
 478{
 479	struct io_wq_work_node *node, *prev;
 480	struct io_wq_work *work, *tail;
 481	unsigned int stall_hash = -1U;
 482	struct io_wq *wq = worker->wq;
 483
 484	wq_list_for_each(node, prev, &acct->work_list) {
 485		unsigned int hash;
 486
 487		work = container_of(node, struct io_wq_work, list);
 488
 489		/* not hashed, can run anytime */
 490		if (!io_wq_is_hashed(work)) {
 491			wq_list_del(&acct->work_list, node, prev);
 492			return work;
 493		}
 494
 495		hash = io_get_work_hash(work);
 496		/* all items with this hash lie in [work, tail] */
 497		tail = wq->hash_tail[hash];
 498
 499		/* hashed, can run if not already running */
 500		if (!test_and_set_bit(hash, &wq->hash->map)) {
 501			wq->hash_tail[hash] = NULL;
 502			wq_list_cut(&acct->work_list, &tail->list, prev);
 503			return work;
 504		}
 505		if (stall_hash == -1U)
 506			stall_hash = hash;
 507		/* fast forward to a next hash, for-each will fix up @prev */
 508		node = &tail->list;
 509	}
 510
 511	if (stall_hash != -1U) {
 512		bool unstalled;
 513
 514		/*
 515		 * Set this before dropping the lock to avoid racing with new
 516		 * work being added and clearing the stalled bit.
 517		 */
 518		set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
 519		raw_spin_unlock(&acct->lock);
 520		unstalled = io_wait_on_hash(wq, stall_hash);
 521		raw_spin_lock(&acct->lock);
 522		if (unstalled) {
 523			clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
 524			if (wq_has_sleeper(&wq->hash->wait))
 525				wake_up(&wq->hash->wait);
 526		}
 527	}
 528
 529	return NULL;
 530}
 531
 532static void io_assign_current_work(struct io_worker *worker,
 533				   struct io_wq_work *work)
 534{
 535	if (work) {
 536		io_run_task_work();
 537		cond_resched();
 538	}
 539
 540	raw_spin_lock(&worker->lock);
 541	worker->cur_work = work;
 542	worker->next_work = NULL;
 543	raw_spin_unlock(&worker->lock);
 544}
 545
 546/*
 547 * Called with acct->lock held, drops it before returning
 548 */
 549static void io_worker_handle_work(struct io_wq_acct *acct,
 550				  struct io_worker *worker)
 551	__releases(&acct->lock)
 552{
 553	struct io_wq *wq = worker->wq;
 
 
 554	bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
 555
 556	do {
 557		struct io_wq_work *work;
 558
 559		/*
 560		 * If we got some work, mark us as busy. If we didn't, but
 561		 * the list isn't empty, it means we stalled on hashed work.
 562		 * Mark us stalled so we don't keep looking for work when we
 563		 * can't make progress, any work completion or insertion will
 564		 * clear the stalled flag.
 565		 */
 
 566		work = io_get_next_work(acct, worker);
 567		raw_spin_unlock(&acct->lock);
 568		if (work) {
 569			__io_worker_busy(wq, worker);
 570
 571			/*
 572			 * Make sure cancelation can find this, even before
 573			 * it becomes the active work. That avoids a window
 574			 * where the work has been removed from our general
 575			 * work list, but isn't yet discoverable as the
 576			 * current work item for this worker.
 577			 */
 578			raw_spin_lock(&worker->lock);
 579			worker->next_work = work;
 580			raw_spin_unlock(&worker->lock);
 581		} else {
 582			break;
 583		}
 584		io_assign_current_work(worker, work);
 585		__set_current_state(TASK_RUNNING);
 586
 587		/* handle a whole dependent link */
 588		do {
 589			struct io_wq_work *next_hashed, *linked;
 590			unsigned int hash = io_get_work_hash(work);
 591
 592			next_hashed = wq_next_work(work);
 593
 594			if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
 595				work->flags |= IO_WQ_WORK_CANCEL;
 596			wq->do_work(work);
 597			io_assign_current_work(worker, NULL);
 598
 599			linked = wq->free_work(work);
 600			work = next_hashed;
 601			if (!work && linked && !io_wq_is_hashed(linked)) {
 602				work = linked;
 603				linked = NULL;
 604			}
 605			io_assign_current_work(worker, work);
 606			if (linked)
 607				io_wq_enqueue(wq, linked);
 608
 609			if (hash != -1U && !next_hashed) {
 610				/* serialize hash clear with wake_up() */
 611				spin_lock_irq(&wq->hash->wait.lock);
 612				clear_bit(hash, &wq->hash->map);
 613				clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
 614				spin_unlock_irq(&wq->hash->wait.lock);
 615				if (wq_has_sleeper(&wq->hash->wait))
 616					wake_up(&wq->hash->wait);
 617			}
 618		} while (work);
 619
 620		if (!__io_acct_run_queue(acct))
 621			break;
 622		raw_spin_lock(&acct->lock);
 623	} while (1);
 624}
 625
 626static int io_wq_worker(void *data)
 627{
 628	struct io_worker *worker = data;
 629	struct io_wq_acct *acct = io_wq_get_acct(worker);
 630	struct io_wq *wq = worker->wq;
 631	bool exit_mask = false, last_timeout = false;
 
 632	char buf[TASK_COMM_LEN];
 633
 634	worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
 635
 636	snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
 637	set_task_comm(current, buf);
 638
 639	while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
 640		long ret;
 641
 642		set_current_state(TASK_INTERRUPTIBLE);
 643
 644		/*
 645		 * If we have work to do, io_acct_run_queue() returns with
 646		 * the acct->lock held. If not, it will drop it.
 647		 */
 648		while (io_acct_run_queue(acct))
 649			io_worker_handle_work(acct, worker);
 650
 651		raw_spin_lock(&wq->lock);
 652		/*
 653		 * Last sleep timed out. Exit if we're not the last worker,
 654		 * or if someone modified our affinity.
 655		 */
 656		if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
 657			acct->nr_workers--;
 658			raw_spin_unlock(&wq->lock);
 659			__set_current_state(TASK_RUNNING);
 660			break;
 661		}
 662		last_timeout = false;
 663		__io_worker_idle(wq, worker);
 664		raw_spin_unlock(&wq->lock);
 665		if (io_run_task_work())
 666			continue;
 667		ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
 668		if (signal_pending(current)) {
 669			struct ksignal ksig;
 670
 671			if (!get_signal(&ksig))
 672				continue;
 673			break;
 674		}
 675		if (!ret) {
 676			last_timeout = true;
 677			exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
 678							wq->cpu_mask);
 679		}
 680	}
 681
 682	if (test_bit(IO_WQ_BIT_EXIT, &wq->state) && io_acct_run_queue(acct))
 683		io_worker_handle_work(acct, worker);
 684
 685	io_worker_exit(worker);
 686	return 0;
 687}
 688
 689/*
 690 * Called when a worker is scheduled in. Mark us as currently running.
 691 */
 692void io_wq_worker_running(struct task_struct *tsk)
 693{
 694	struct io_worker *worker = tsk->worker_private;
 695
 696	if (!worker)
 697		return;
 698	if (!(worker->flags & IO_WORKER_F_UP))
 699		return;
 700	if (worker->flags & IO_WORKER_F_RUNNING)
 701		return;
 702	worker->flags |= IO_WORKER_F_RUNNING;
 703	io_wq_inc_running(worker);
 704}
 705
 706/*
 707 * Called when worker is going to sleep. If there are no workers currently
 708 * running and we have work pending, wake up a free one or create a new one.
 709 */
 710void io_wq_worker_sleeping(struct task_struct *tsk)
 711{
 712	struct io_worker *worker = tsk->worker_private;
 713
 714	if (!worker)
 715		return;
 716	if (!(worker->flags & IO_WORKER_F_UP))
 717		return;
 718	if (!(worker->flags & IO_WORKER_F_RUNNING))
 719		return;
 720
 721	worker->flags &= ~IO_WORKER_F_RUNNING;
 722	io_wq_dec_running(worker);
 723}
 724
 725static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker,
 726			       struct task_struct *tsk)
 727{
 728	tsk->worker_private = worker;
 729	worker->task = tsk;
 730	set_cpus_allowed_ptr(tsk, wq->cpu_mask);
 
 731
 732	raw_spin_lock(&wq->lock);
 733	hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
 734	list_add_tail_rcu(&worker->all_list, &wq->all_list);
 735	worker->flags |= IO_WORKER_F_FREE;
 736	raw_spin_unlock(&wq->lock);
 737	wake_up_new_task(tsk);
 738}
 739
 740static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
 741{
 742	return true;
 743}
 744
 745static inline bool io_should_retry_thread(long err)
 746{
 747	/*
 748	 * Prevent perpetual task_work retry, if the task (or its group) is
 749	 * exiting.
 750	 */
 751	if (fatal_signal_pending(current))
 752		return false;
 753
 754	switch (err) {
 755	case -EAGAIN:
 756	case -ERESTARTSYS:
 757	case -ERESTARTNOINTR:
 758	case -ERESTARTNOHAND:
 759		return true;
 760	default:
 761		return false;
 762	}
 763}
 764
 765static void create_worker_cont(struct callback_head *cb)
 766{
 767	struct io_worker *worker;
 768	struct task_struct *tsk;
 769	struct io_wq *wq;
 770
 771	worker = container_of(cb, struct io_worker, create_work);
 772	clear_bit_unlock(0, &worker->create_state);
 773	wq = worker->wq;
 774	tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
 775	if (!IS_ERR(tsk)) {
 776		io_init_new_worker(wq, worker, tsk);
 777		io_worker_release(worker);
 778		return;
 779	} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
 780		struct io_wq_acct *acct = io_wq_get_acct(worker);
 781
 782		atomic_dec(&acct->nr_running);
 783		raw_spin_lock(&wq->lock);
 784		acct->nr_workers--;
 785		if (!acct->nr_workers) {
 786			struct io_cb_cancel_data match = {
 787				.fn		= io_wq_work_match_all,
 788				.cancel_all	= true,
 789			};
 790
 791			raw_spin_unlock(&wq->lock);
 792			while (io_acct_cancel_pending_work(wq, acct, &match))
 793				;
 794		} else {
 795			raw_spin_unlock(&wq->lock);
 796		}
 797		io_worker_ref_put(wq);
 798		kfree(worker);
 799		return;
 800	}
 801
 802	/* re-create attempts grab a new worker ref, drop the existing one */
 803	io_worker_release(worker);
 804	schedule_work(&worker->work);
 805}
 806
 807static void io_workqueue_create(struct work_struct *work)
 808{
 809	struct io_worker *worker = container_of(work, struct io_worker, work);
 810	struct io_wq_acct *acct = io_wq_get_acct(worker);
 811
 812	if (!io_queue_worker_create(worker, acct, create_worker_cont))
 813		kfree(worker);
 814}
 815
 816static bool create_io_worker(struct io_wq *wq, int index)
 817{
 818	struct io_wq_acct *acct = &wq->acct[index];
 819	struct io_worker *worker;
 820	struct task_struct *tsk;
 821
 822	__set_current_state(TASK_RUNNING);
 823
 824	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
 825	if (!worker) {
 826fail:
 827		atomic_dec(&acct->nr_running);
 828		raw_spin_lock(&wq->lock);
 829		acct->nr_workers--;
 830		raw_spin_unlock(&wq->lock);
 831		io_worker_ref_put(wq);
 832		return false;
 833	}
 834
 835	refcount_set(&worker->ref, 1);
 836	worker->wq = wq;
 837	raw_spin_lock_init(&worker->lock);
 838	init_completion(&worker->ref_done);
 839
 840	if (index == IO_WQ_ACCT_BOUND)
 841		worker->flags |= IO_WORKER_F_BOUND;
 842
 843	tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
 844	if (!IS_ERR(tsk)) {
 845		io_init_new_worker(wq, worker, tsk);
 846	} else if (!io_should_retry_thread(PTR_ERR(tsk))) {
 847		kfree(worker);
 848		goto fail;
 849	} else {
 850		INIT_WORK(&worker->work, io_workqueue_create);
 851		schedule_work(&worker->work);
 852	}
 853
 854	return true;
 855}
 856
 857/*
 858 * Iterate the passed in list and call the specific function for each
 859 * worker that isn't exiting
 860 */
 861static bool io_wq_for_each_worker(struct io_wq *wq,
 862				  bool (*func)(struct io_worker *, void *),
 863				  void *data)
 864{
 865	struct io_worker *worker;
 866	bool ret = false;
 867
 868	list_for_each_entry_rcu(worker, &wq->all_list, all_list) {
 869		if (io_worker_get(worker)) {
 870			/* no task if node is/was offline */
 871			if (worker->task)
 872				ret = func(worker, data);
 873			io_worker_release(worker);
 874			if (ret)
 875				break;
 876		}
 877	}
 878
 879	return ret;
 880}
 881
 882static bool io_wq_worker_wake(struct io_worker *worker, void *data)
 883{
 884	__set_notify_signal(worker->task);
 885	wake_up_process(worker->task);
 886	return false;
 887}
 888
 889static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
 890{
 
 
 891	do {
 892		work->flags |= IO_WQ_WORK_CANCEL;
 893		wq->do_work(work);
 894		work = wq->free_work(work);
 895	} while (work);
 896}
 897
 898static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work)
 899{
 900	struct io_wq_acct *acct = io_work_get_acct(wq, work);
 901	unsigned int hash;
 902	struct io_wq_work *tail;
 903
 904	if (!io_wq_is_hashed(work)) {
 905append:
 906		wq_list_add_tail(&work->list, &acct->work_list);
 907		return;
 908	}
 909
 910	hash = io_get_work_hash(work);
 911	tail = wq->hash_tail[hash];
 912	wq->hash_tail[hash] = work;
 913	if (!tail)
 914		goto append;
 915
 916	wq_list_add_after(&work->list, &tail->list, &acct->work_list);
 917}
 918
 919static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
 920{
 921	return work == data;
 922}
 923
 924void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
 925{
 926	struct io_wq_acct *acct = io_work_get_acct(wq, work);
 927	struct io_cb_cancel_data match;
 928	unsigned work_flags = work->flags;
 929	bool do_create;
 930
 931	/*
 932	 * If io-wq is exiting for this task, or if the request has explicitly
 933	 * been marked as one that should not get executed, cancel it here.
 934	 */
 935	if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
 936	    (work->flags & IO_WQ_WORK_CANCEL)) {
 937		io_run_cancel(work, wq);
 938		return;
 939	}
 940
 941	raw_spin_lock(&acct->lock);
 942	io_wq_insert_work(wq, work);
 943	clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
 944	raw_spin_unlock(&acct->lock);
 945
 
 946	rcu_read_lock();
 947	do_create = !io_wq_activate_free_worker(wq, acct);
 948	rcu_read_unlock();
 949
 
 
 950	if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
 951	    !atomic_read(&acct->nr_running))) {
 952		bool did_create;
 953
 954		did_create = io_wq_create_worker(wq, acct);
 955		if (likely(did_create))
 956			return;
 957
 958		raw_spin_lock(&wq->lock);
 959		if (acct->nr_workers) {
 960			raw_spin_unlock(&wq->lock);
 961			return;
 962		}
 963		raw_spin_unlock(&wq->lock);
 964
 965		/* fatal condition, failed to create the first worker */
 966		match.fn		= io_wq_work_match_item,
 967		match.data		= work,
 968		match.cancel_all	= false,
 969
 970		io_acct_cancel_pending_work(wq, acct, &match);
 971	}
 972}
 973
 
 
 
 
 
 
 
 974/*
 975 * Work items that hash to the same value will not be done in parallel.
 976 * Used to limit concurrent writes, generally hashed by inode.
 977 */
 978void io_wq_hash_work(struct io_wq_work *work, void *val)
 979{
 980	unsigned int bit;
 981
 982	bit = hash_ptr(val, IO_WQ_HASH_ORDER);
 983	work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
 984}
 985
 986static bool __io_wq_worker_cancel(struct io_worker *worker,
 987				  struct io_cb_cancel_data *match,
 988				  struct io_wq_work *work)
 989{
 990	if (work && match->fn(work, match->data)) {
 991		work->flags |= IO_WQ_WORK_CANCEL;
 992		__set_notify_signal(worker->task);
 993		return true;
 994	}
 995
 996	return false;
 997}
 998
 999static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
1000{
1001	struct io_cb_cancel_data *match = data;
1002
1003	/*
1004	 * Hold the lock to avoid ->cur_work going out of scope, caller
1005	 * may dereference the passed in work.
1006	 */
1007	raw_spin_lock(&worker->lock);
1008	if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
1009	    __io_wq_worker_cancel(worker, match, worker->next_work))
1010		match->nr_running++;
1011	raw_spin_unlock(&worker->lock);
1012
1013	return match->nr_running && !match->cancel_all;
1014}
1015
1016static inline void io_wq_remove_pending(struct io_wq *wq,
1017					 struct io_wq_work *work,
1018					 struct io_wq_work_node *prev)
1019{
1020	struct io_wq_acct *acct = io_work_get_acct(wq, work);
1021	unsigned int hash = io_get_work_hash(work);
1022	struct io_wq_work *prev_work = NULL;
1023
1024	if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) {
1025		if (prev)
1026			prev_work = container_of(prev, struct io_wq_work, list);
1027		if (prev_work && io_get_work_hash(prev_work) == hash)
1028			wq->hash_tail[hash] = prev_work;
1029		else
1030			wq->hash_tail[hash] = NULL;
1031	}
1032	wq_list_del(&acct->work_list, &work->list, prev);
1033}
1034
1035static bool io_acct_cancel_pending_work(struct io_wq *wq,
1036					struct io_wq_acct *acct,
1037					struct io_cb_cancel_data *match)
1038{
1039	struct io_wq_work_node *node, *prev;
1040	struct io_wq_work *work;
1041
1042	raw_spin_lock(&acct->lock);
1043	wq_list_for_each(node, prev, &acct->work_list) {
1044		work = container_of(node, struct io_wq_work, list);
1045		if (!match->fn(work, match->data))
1046			continue;
1047		io_wq_remove_pending(wq, work, prev);
1048		raw_spin_unlock(&acct->lock);
1049		io_run_cancel(work, wq);
1050		match->nr_pending++;
1051		/* not safe to continue after unlock */
1052		return true;
1053	}
1054	raw_spin_unlock(&acct->lock);
1055
1056	return false;
1057}
1058
1059static void io_wq_cancel_pending_work(struct io_wq *wq,
1060				      struct io_cb_cancel_data *match)
1061{
1062	int i;
1063retry:
1064	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1065		struct io_wq_acct *acct = io_get_acct(wq, i == 0);
1066
1067		if (io_acct_cancel_pending_work(wq, acct, match)) {
1068			if (match->cancel_all)
1069				goto retry;
1070			break;
1071		}
1072	}
1073}
1074
1075static void io_wq_cancel_running_work(struct io_wq *wq,
1076				       struct io_cb_cancel_data *match)
1077{
1078	rcu_read_lock();
1079	io_wq_for_each_worker(wq, io_wq_worker_cancel, match);
1080	rcu_read_unlock();
1081}
1082
1083enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1084				  void *data, bool cancel_all)
1085{
1086	struct io_cb_cancel_data match = {
1087		.fn		= cancel,
1088		.data		= data,
1089		.cancel_all	= cancel_all,
1090	};
 
1091
1092	/*
1093	 * First check pending list, if we're lucky we can just remove it
1094	 * from there. CANCEL_OK means that the work is returned as-new,
1095	 * no completion will be posted for it.
1096	 *
1097	 * Then check if a free (going busy) or busy worker has the work
1098	 * currently running. If we find it there, we'll return CANCEL_RUNNING
1099	 * as an indication that we attempt to signal cancellation. The
1100	 * completion will run normally in this case.
1101	 *
1102	 * Do both of these while holding the wq->lock, to ensure that
1103	 * we'll find a work item regardless of state.
1104	 */
1105	io_wq_cancel_pending_work(wq, &match);
1106	if (match.nr_pending && !match.cancel_all)
1107		return IO_WQ_CANCEL_OK;
1108
1109	raw_spin_lock(&wq->lock);
1110	io_wq_cancel_running_work(wq, &match);
1111	raw_spin_unlock(&wq->lock);
1112	if (match.nr_running && !match.cancel_all)
1113		return IO_WQ_CANCEL_RUNNING;
 
 
 
 
 
1114
1115	if (match.nr_running)
1116		return IO_WQ_CANCEL_RUNNING;
1117	if (match.nr_pending)
1118		return IO_WQ_CANCEL_OK;
1119	return IO_WQ_CANCEL_NOTFOUND;
1120}
1121
1122static int io_wq_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1123			    int sync, void *key)
1124{
1125	struct io_wq *wq = container_of(wait, struct io_wq, wait);
1126	int i;
1127
1128	list_del_init(&wait->entry);
1129
1130	rcu_read_lock();
1131	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1132		struct io_wq_acct *acct = &wq->acct[i];
1133
1134		if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1135			io_wq_activate_free_worker(wq, acct);
1136	}
1137	rcu_read_unlock();
1138	return 1;
1139}
1140
1141struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1142{
1143	int ret, i;
1144	struct io_wq *wq;
1145
1146	if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1147		return ERR_PTR(-EINVAL);
1148	if (WARN_ON_ONCE(!bounded))
1149		return ERR_PTR(-EINVAL);
1150
1151	wq = kzalloc(sizeof(struct io_wq), GFP_KERNEL);
1152	if (!wq)
1153		return ERR_PTR(-ENOMEM);
 
 
 
1154
1155	refcount_inc(&data->hash->refs);
1156	wq->hash = data->hash;
1157	wq->free_work = data->free_work;
1158	wq->do_work = data->do_work;
1159
1160	ret = -ENOMEM;
1161
1162	if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL))
1163		goto err;
1164	cpumask_copy(wq->cpu_mask, cpu_possible_mask);
1165	wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1166	wq->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1167				task_rlimit(current, RLIMIT_NPROC);
1168	INIT_LIST_HEAD(&wq->wait.entry);
1169	wq->wait.func = io_wq_hash_wake;
1170	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1171		struct io_wq_acct *acct = &wq->acct[i];
1172
1173		acct->index = i;
1174		atomic_set(&acct->nr_running, 0);
1175		INIT_WQ_LIST(&acct->work_list);
1176		raw_spin_lock_init(&acct->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177	}
1178
1179	raw_spin_lock_init(&wq->lock);
1180	INIT_HLIST_NULLS_HEAD(&wq->free_list, 0);
1181	INIT_LIST_HEAD(&wq->all_list);
1182
1183	wq->task = get_task_struct(data->task);
1184	atomic_set(&wq->worker_refs, 1);
1185	init_completion(&wq->worker_done);
1186	ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1187	if (ret)
1188		goto err;
1189
1190	return wq;
1191err:
1192	io_wq_put_hash(data->hash);
1193	free_cpumask_var(wq->cpu_mask);
 
 
 
 
 
 
 
1194	kfree(wq);
1195	return ERR_PTR(ret);
1196}
1197
1198static bool io_task_work_match(struct callback_head *cb, void *data)
1199{
1200	struct io_worker *worker;
1201
1202	if (cb->func != create_worker_cb && cb->func != create_worker_cont)
1203		return false;
1204	worker = container_of(cb, struct io_worker, create_work);
1205	return worker->wq == data;
1206}
1207
1208void io_wq_exit_start(struct io_wq *wq)
1209{
1210	set_bit(IO_WQ_BIT_EXIT, &wq->state);
1211}
1212
1213static void io_wq_cancel_tw_create(struct io_wq *wq)
1214{
1215	struct callback_head *cb;
1216
1217	while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
1218		struct io_worker *worker;
1219
1220		worker = container_of(cb, struct io_worker, create_work);
1221		io_worker_cancel_cb(worker);
1222		/*
1223		 * Only the worker continuation helper has worker allocated and
1224		 * hence needs freeing.
1225		 */
1226		if (cb->func == create_worker_cont)
1227			kfree(worker);
1228	}
1229}
1230
1231static void io_wq_exit_workers(struct io_wq *wq)
1232{
 
 
1233	if (!wq->task)
1234		return;
1235
1236	io_wq_cancel_tw_create(wq);
1237
1238	rcu_read_lock();
1239	io_wq_for_each_worker(wq, io_wq_worker_wake, NULL);
 
 
 
 
1240	rcu_read_unlock();
1241	io_worker_ref_put(wq);
1242	wait_for_completion(&wq->worker_done);
1243
1244	spin_lock_irq(&wq->hash->wait.lock);
1245	list_del_init(&wq->wait.entry);
1246	spin_unlock_irq(&wq->hash->wait.lock);
1247
 
1248	put_task_struct(wq->task);
1249	wq->task = NULL;
1250}
1251
1252static void io_wq_destroy(struct io_wq *wq)
1253{
1254	struct io_cb_cancel_data match = {
1255		.fn		= io_wq_work_match_all,
1256		.cancel_all	= true,
1257	};
1258
1259	cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1260	io_wq_cancel_pending_work(wq, &match);
1261	free_cpumask_var(wq->cpu_mask);
 
 
 
 
 
 
 
 
 
1262	io_wq_put_hash(wq->hash);
1263	kfree(wq);
1264}
1265
1266void io_wq_put_and_exit(struct io_wq *wq)
1267{
1268	WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1269
1270	io_wq_exit_workers(wq);
1271	io_wq_destroy(wq);
1272}
1273
1274struct online_data {
1275	unsigned int cpu;
1276	bool online;
1277};
1278
1279static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1280{
1281	struct online_data *od = data;
1282
1283	if (od->online)
1284		cpumask_set_cpu(od->cpu, worker->wq->cpu_mask);
1285	else
1286		cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask);
1287	return false;
1288}
1289
1290static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
1291{
1292	struct online_data od = {
1293		.cpu = cpu,
1294		.online = online
1295	};
 
1296
1297	rcu_read_lock();
1298	io_wq_for_each_worker(wq, io_wq_worker_affinity, &od);
 
1299	rcu_read_unlock();
1300	return 0;
1301}
1302
1303static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1304{
1305	struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1306
1307	return __io_wq_cpu_online(wq, cpu, true);
1308}
1309
1310static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1311{
1312	struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1313
1314	return __io_wq_cpu_online(wq, cpu, false);
1315}
1316
1317int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
1318{
1319	if (!tctx || !tctx->io_wq)
1320		return -EINVAL;
1321
1322	rcu_read_lock();
1323	if (mask)
1324		cpumask_copy(tctx->io_wq->cpu_mask, mask);
1325	else
1326		cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask);
 
 
 
 
1327	rcu_read_unlock();
1328
1329	return 0;
1330}
1331
1332/*
1333 * Set max number of unbounded workers, returns old value. If new_count is 0,
1334 * then just return the old value.
1335 */
1336int io_wq_max_workers(struct io_wq *wq, int *new_count)
1337{
1338	struct io_wq_acct *acct;
1339	int prev[IO_WQ_ACCT_NR];
1340	int i;
 
1341
1342	BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
1343	BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1344	BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
1345
1346	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1347		if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1348			new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1349	}
1350
1351	for (i = 0; i < IO_WQ_ACCT_NR; i++)
1352		prev[i] = 0;
1353
1354	rcu_read_lock();
1355
1356	raw_spin_lock(&wq->lock);
1357	for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1358		acct = &wq->acct[i];
1359		prev[i] = max_t(int, acct->max_workers, prev[i]);
1360		if (new_count[i])
1361			acct->max_workers = new_count[i];
 
 
 
 
 
 
 
1362	}
1363	raw_spin_unlock(&wq->lock);
1364	rcu_read_unlock();
1365
1366	for (i = 0; i < IO_WQ_ACCT_NR; i++)
1367		new_count[i] = prev[i];
1368
1369	return 0;
1370}
1371
1372static __init int io_wq_init(void)
1373{
1374	int ret;
1375
1376	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1377					io_wq_cpu_online, io_wq_cpu_offline);
1378	if (ret < 0)
1379		return ret;
1380	io_wq_online = ret;
1381	return 0;
1382}
1383subsys_initcall(io_wq_init);