Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * linux/net/sunrpc/sched.c
   3 *
   4 * Scheduling for synchronous and asynchronous RPC requests.
   5 *
   6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
   7 *
   8 * TCP NFS related read + write fixes
   9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  10 */
  11
  12#include <linux/module.h>
  13
  14#include <linux/sched.h>
  15#include <linux/interrupt.h>
  16#include <linux/slab.h>
  17#include <linux/mempool.h>
  18#include <linux/smp.h>
  19#include <linux/spinlock.h>
  20#include <linux/mutex.h>
  21#include <linux/freezer.h>
 
  22
  23#include <linux/sunrpc/clnt.h>
 
  24
  25#include "sunrpc.h"
  26
  27#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  28#define RPCDBG_FACILITY		RPCDBG_SCHED
  29#endif
  30
  31#define CREATE_TRACE_POINTS
  32#include <trace/events/sunrpc.h>
  33
  34/*
  35 * RPC slabs and memory pools
  36 */
  37#define RPC_BUFFER_MAXSIZE	(2048)
  38#define RPC_BUFFER_POOLSIZE	(8)
  39#define RPC_TASK_POOLSIZE	(8)
  40static struct kmem_cache	*rpc_task_slabp __read_mostly;
  41static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
  42static mempool_t	*rpc_task_mempool __read_mostly;
  43static mempool_t	*rpc_buffer_mempool __read_mostly;
  44
  45static void			rpc_async_schedule(struct work_struct *);
  46static void			 rpc_release_task(struct rpc_task *task);
  47static void __rpc_queue_timer_fn(unsigned long ptr);
  48
  49/*
  50 * RPC tasks sit here while waiting for conditions to improve.
  51 */
  52static struct rpc_wait_queue delay_queue;
  53
  54/*
  55 * rpciod-related stuff
  56 */
  57struct workqueue_struct *rpciod_workqueue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58
  59/*
  60 * Disable the timer for a given RPC task. Should be called with
  61 * queue->lock and bh_disabled in order to avoid races within
  62 * rpc_run_timer().
  63 */
  64static void
  65__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  66{
  67	if (task->tk_timeout == 0)
  68		return;
  69	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
  70	task->tk_timeout = 0;
  71	list_del(&task->u.tk_wait.timer_list);
  72	if (list_empty(&queue->timer_list.list))
  73		del_timer(&queue->timer_list.timer);
  74}
  75
  76static void
  77rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
  78{
 
  79	queue->timer_list.expires = expires;
  80	mod_timer(&queue->timer_list.timer, expires);
 
 
 
 
  81}
  82
  83/*
  84 * Set up a timer for the current task.
  85 */
  86static void
  87__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
 
  88{
  89	if (!task->tk_timeout)
  90		return;
  91
  92	dprintk("RPC: %5u setting alarm for %u ms\n",
  93		task->tk_pid, jiffies_to_msecs(task->tk_timeout));
  94
  95	task->u.tk_wait.expires = jiffies + task->tk_timeout;
  96	if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
  97		rpc_set_queue_timer(queue, task->u.tk_wait.expires);
  98	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
  99}
 100
 101static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
 102{
 103	struct list_head *q = &queue->tasks[queue->priority];
 104	struct rpc_task *task;
 105
 106	if (!list_empty(q)) {
 107		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 108		if (task->tk_owner == queue->owner)
 109			list_move_tail(&task->u.tk_wait.list, q);
 110	}
 111}
 112
 113static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
 114{
 115	if (queue->priority != priority) {
 116		/* Fairness: rotate the list when changing priority */
 117		rpc_rotate_queue_owner(queue);
 118		queue->priority = priority;
 
 119	}
 120}
 121
 122static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
 123{
 124	queue->owner = pid;
 125	queue->nr = RPC_BATCH_COUNT;
 126}
 127
 128static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
 129{
 130	rpc_set_waitqueue_priority(queue, queue->maxpriority);
 131	rpc_set_waitqueue_owner(queue, 0);
 132}
 133
 134/*
 135 * Add new request to a priority queue.
 136 */
 137static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
 138		struct rpc_task *task,
 139		unsigned char queue_priority)
 140{
 141	struct list_head *q;
 142	struct rpc_task *t;
 143
 144	INIT_LIST_HEAD(&task->u.tk_wait.links);
 145	if (unlikely(queue_priority > queue->maxpriority))
 146		queue_priority = queue->maxpriority;
 147	if (queue_priority > queue->priority)
 148		rpc_set_waitqueue_priority(queue, queue_priority);
 149	q = &queue->tasks[queue_priority];
 150	list_for_each_entry(t, q, u.tk_wait.list) {
 151		if (t->tk_owner == task->tk_owner) {
 152			list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
 
 
 
 
 153			return;
 154		}
 155	}
 
 156	list_add_tail(&task->u.tk_wait.list, q);
 157}
 158
 159/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 160 * Add new request to wait queue.
 161 *
 162 * Swapper tasks always get inserted at the head of the queue.
 163 * This should avoid many nasty memory deadlocks and hopefully
 164 * improve overall performance.
 165 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
 166 */
 167static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
 168		struct rpc_task *task,
 169		unsigned char queue_priority)
 170{
 171	WARN_ON_ONCE(RPC_IS_QUEUED(task));
 172	if (RPC_IS_QUEUED(task))
 173		return;
 174
 175	if (RPC_IS_PRIORITY(queue))
 176		__rpc_add_wait_queue_priority(queue, task, queue_priority);
 177	else if (RPC_IS_SWAPPER(task))
 178		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
 179	else
 180		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
 181	task->tk_waitqueue = queue;
 182	queue->qlen++;
 183	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
 184	smp_wmb();
 185	rpc_set_queued(task);
 186
 187	dprintk("RPC: %5u added to queue %p \"%s\"\n",
 188			task->tk_pid, queue, rpc_qname(queue));
 189}
 190
 191/*
 192 * Remove request from a priority queue.
 193 */
 194static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
 195{
 196	struct rpc_task *t;
 197
 198	if (!list_empty(&task->u.tk_wait.links)) {
 199		t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
 200		list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
 201		list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
 202	}
 203}
 204
 205/*
 206 * Remove request from queue.
 207 * Note: must be called with spin lock held.
 208 */
 209static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
 210{
 211	__rpc_disable_timer(queue, task);
 212	if (RPC_IS_PRIORITY(queue))
 213		__rpc_remove_wait_queue_priority(task);
 214	list_del(&task->u.tk_wait.list);
 
 215	queue->qlen--;
 216	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
 217			task->tk_pid, queue, rpc_qname(queue));
 218}
 219
 220static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
 221{
 222	int i;
 223
 224	spin_lock_init(&queue->lock);
 225	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
 226		INIT_LIST_HEAD(&queue->tasks[i]);
 227	queue->maxpriority = nr_queues - 1;
 228	rpc_reset_waitqueue_priority(queue);
 229	queue->qlen = 0;
 230	setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
 
 231	INIT_LIST_HEAD(&queue->timer_list.list);
 232	rpc_assign_waitqueue_name(queue, qname);
 233}
 234
 235void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 236{
 237	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
 238}
 239EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
 240
 241void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 242{
 243	__rpc_init_priority_wait_queue(queue, qname, 1);
 244}
 245EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
 246
 247void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
 248{
 249	del_timer_sync(&queue->timer_list.timer);
 250}
 251EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
 252
 253static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
 254{
 255	freezable_schedule_unsafe();
 256	if (signal_pending_state(mode, current))
 257		return -ERESTARTSYS;
 258	return 0;
 259}
 260
 261#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
 262static void rpc_task_set_debuginfo(struct rpc_task *task)
 263{
 264	static atomic_t rpc_pid;
 265
 266	task->tk_pid = atomic_inc_return(&rpc_pid);
 267}
 268#else
 269static inline void rpc_task_set_debuginfo(struct rpc_task *task)
 270{
 271}
 272#endif
 273
 274static void rpc_set_active(struct rpc_task *task)
 275{
 276	trace_rpc_task_begin(task->tk_client, task, NULL);
 277
 278	rpc_task_set_debuginfo(task);
 279	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 
 280}
 281
 282/*
 283 * Mark an RPC call as having completed by clearing the 'active' bit
 284 * and then waking up all tasks that were sleeping.
 285 */
 286static int rpc_complete_task(struct rpc_task *task)
 287{
 288	void *m = &task->tk_runstate;
 289	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
 290	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
 291	unsigned long flags;
 292	int ret;
 293
 294	trace_rpc_task_complete(task->tk_client, task, NULL);
 295
 296	spin_lock_irqsave(&wq->lock, flags);
 297	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 298	ret = atomic_dec_and_test(&task->tk_count);
 299	if (waitqueue_active(wq))
 300		__wake_up_locked_key(wq, TASK_NORMAL, &k);
 301	spin_unlock_irqrestore(&wq->lock, flags);
 302	return ret;
 303}
 304
 305/*
 306 * Allow callers to wait for completion of an RPC call
 307 *
 308 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
 309 * to enforce taking of the wq->lock and hence avoid races with
 310 * rpc_complete_task().
 311 */
 312int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
 313{
 314	if (action == NULL)
 315		action = rpc_wait_bit_killable;
 316	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
 317			action, TASK_KILLABLE);
 318}
 319EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
 320
 321/*
 322 * Make an RPC task runnable.
 323 *
 324 * Note: If the task is ASYNC, and is being made runnable after sitting on an
 325 * rpc_wait_queue, this must be called with the queue spinlock held to protect
 326 * the wait queue operation.
 327 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
 328 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
 329 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
 330 * the RPC_TASK_RUNNING flag.
 331 */
 332static void rpc_make_runnable(struct rpc_task *task)
 
 333{
 334	bool need_wakeup = !rpc_test_and_set_running(task);
 335
 336	rpc_clear_queued(task);
 337	if (!need_wakeup)
 338		return;
 339	if (RPC_IS_ASYNC(task)) {
 340		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 341		queue_work(rpciod_workqueue, &task->u.tk_work);
 342	} else
 343		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
 344}
 345
 346/*
 347 * Prepare for sleeping on a wait queue.
 348 * By always appending tasks to the list we ensure FIFO behavior.
 349 * NB: An RPC task will only receive interrupt-driven events as long
 350 * as it's on a wait queue.
 351 */
 352static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
 353		struct rpc_task *task,
 354		rpc_action action,
 355		unsigned char queue_priority)
 356{
 357	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
 358			task->tk_pid, rpc_qname(q), jiffies);
 359
 360	trace_rpc_task_sleep(task->tk_client, task, q);
 361
 362	__rpc_add_wait_queue(q, task, queue_priority);
 
 363
 364	WARN_ON_ONCE(task->tk_callback != NULL);
 365	task->tk_callback = action;
 366	__rpc_add_timer(q, task);
 
 
 
 
 367}
 368
 369void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 370				rpc_action action)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 371{
 372	/* We shouldn't ever put an inactive task to sleep */
 373	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
 374	if (!RPC_IS_ACTIVATED(task)) {
 375		task->tk_status = -EIO;
 376		rpc_put_task_async(task);
 377		return;
 378	}
 
 
 
 
 
 
 
 
 
 
 379
 380	/*
 381	 * Protect the queue operations.
 382	 */
 383	spin_lock_bh(&q->lock);
 384	__rpc_sleep_on_priority(q, task, action, task->tk_priority);
 385	spin_unlock_bh(&q->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 386}
 387EXPORT_SYMBOL_GPL(rpc_sleep_on);
 388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
 390		rpc_action action, int priority)
 391{
 392	/* We shouldn't ever put an inactive task to sleep */
 393	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
 394	if (!RPC_IS_ACTIVATED(task)) {
 395		task->tk_status = -EIO;
 396		rpc_put_task_async(task);
 397		return;
 398	}
 399
 
 
 400	/*
 401	 * Protect the queue operations.
 402	 */
 403	spin_lock_bh(&q->lock);
 404	__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
 405	spin_unlock_bh(&q->lock);
 406}
 407EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
 408
 409/**
 410 * __rpc_do_wake_up_task - wake up a single rpc_task
 
 411 * @queue: wait queue
 412 * @task: task to be woken up
 413 *
 414 * Caller must hold queue->lock, and have cleared the task queued flag.
 415 */
 416static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 
 
 417{
 418	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
 419			task->tk_pid, jiffies);
 420
 421	/* Has the task been executed yet? If not, we cannot wake it up! */
 422	if (!RPC_IS_ACTIVATED(task)) {
 423		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
 424		return;
 425	}
 426
 427	trace_rpc_task_wakeup(task->tk_client, task, queue);
 428
 429	__rpc_remove_wait_queue(queue, task);
 430
 431	rpc_make_runnable(task);
 432
 433	dprintk("RPC:       __rpc_wake_up_task done\n");
 434}
 435
 436/*
 437 * Wake up a queued task while the queue lock is being held
 438 */
 439static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
 
 
 
 440{
 441	if (RPC_IS_QUEUED(task)) {
 442		smp_rmb();
 443		if (task->tk_waitqueue == queue)
 444			__rpc_do_wake_up_task(queue, task);
 
 
 
 
 445	}
 
 
 
 
 
 
 
 
 
 
 
 446}
 447
 448/*
 449 * Wake up a task on a specific queue
 450 */
 451void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 452{
 453	spin_lock_bh(&queue->lock);
 
 
 454	rpc_wake_up_task_queue_locked(queue, task);
 455	spin_unlock_bh(&queue->lock);
 456}
 457EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
 458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 459/*
 460 * Wake up the next task on a priority queue.
 461 */
 462static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
 463{
 464	struct list_head *q;
 465	struct rpc_task *task;
 466
 467	/*
 
 
 
 
 
 
 
 
 
 468	 * Service a batch of tasks from a single owner.
 469	 */
 470	q = &queue->tasks[queue->priority];
 471	if (!list_empty(q)) {
 472		task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
 473		if (queue->owner == task->tk_owner) {
 474			if (--queue->nr)
 475				goto out;
 476			list_move_tail(&task->u.tk_wait.list, q);
 477		}
 478		/*
 479		 * Check if we need to switch queues.
 480		 */
 481		goto new_owner;
 482	}
 483
 484	/*
 485	 * Service the next queue.
 486	 */
 487	do {
 488		if (q == &queue->tasks[0])
 489			q = &queue->tasks[queue->maxpriority];
 490		else
 491			q = q - 1;
 492		if (!list_empty(q)) {
 493			task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
 494			goto new_queue;
 495		}
 496	} while (q != &queue->tasks[queue->priority]);
 497
 498	rpc_reset_waitqueue_priority(queue);
 499	return NULL;
 500
 501new_queue:
 502	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
 503new_owner:
 504	rpc_set_waitqueue_owner(queue, task->tk_owner);
 505out:
 506	return task;
 507}
 508
 509static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
 510{
 511	if (RPC_IS_PRIORITY(queue))
 512		return __rpc_find_next_queued_priority(queue);
 513	if (!list_empty(&queue->tasks[0]))
 514		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
 515	return NULL;
 516}
 517
 518/*
 519 * Wake up the first task on the wait queue.
 520 */
 521struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
 
 522		bool (*func)(struct rpc_task *, void *), void *data)
 523{
 524	struct rpc_task	*task = NULL;
 525
 526	dprintk("RPC:       wake_up_first(%p \"%s\")\n",
 527			queue, rpc_qname(queue));
 528	spin_lock_bh(&queue->lock);
 529	task = __rpc_find_next_queued(queue);
 530	if (task != NULL) {
 531		if (func(task, data))
 532			rpc_wake_up_task_queue_locked(queue, task);
 533		else
 534			task = NULL;
 535	}
 536	spin_unlock_bh(&queue->lock);
 537
 538	return task;
 539}
 
 
 
 
 
 
 
 
 
 540EXPORT_SYMBOL_GPL(rpc_wake_up_first);
 541
 542static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
 543{
 544	return true;
 545}
 546
 547/*
 548 * Wake up the next task on the wait queue.
 549*/
 550struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
 551{
 552	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
 553}
 554EXPORT_SYMBOL_GPL(rpc_wake_up_next);
 555
 556/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 557 * rpc_wake_up - wake up all rpc_tasks
 558 * @queue: rpc_wait_queue on which the tasks are sleeping
 559 *
 560 * Grabs queue->lock
 561 */
 562void rpc_wake_up(struct rpc_wait_queue *queue)
 563{
 564	struct list_head *head;
 
 
 
 
 
 
 
 
 
 
 
 
 
 565
 566	spin_lock_bh(&queue->lock);
 567	head = &queue->tasks[queue->maxpriority];
 568	for (;;) {
 569		while (!list_empty(head)) {
 570			struct rpc_task *task;
 571			task = list_first_entry(head,
 572					struct rpc_task,
 573					u.tk_wait.list);
 574			rpc_wake_up_task_queue_locked(queue, task);
 575		}
 576		if (head == &queue->tasks[0])
 577			break;
 578		head--;
 579	}
 580	spin_unlock_bh(&queue->lock);
 581}
 582EXPORT_SYMBOL_GPL(rpc_wake_up);
 583
 584/**
 585 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
 586 * @queue: rpc_wait_queue on which the tasks are sleeping
 587 * @status: status value to set
 588 *
 589 * Grabs queue->lock
 590 */
 591void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 592{
 593	struct list_head *head;
 594
 595	spin_lock_bh(&queue->lock);
 596	head = &queue->tasks[queue->maxpriority];
 597	for (;;) {
 598		while (!list_empty(head)) {
 599			struct rpc_task *task;
 600			task = list_first_entry(head,
 601					struct rpc_task,
 602					u.tk_wait.list);
 603			task->tk_status = status;
 604			rpc_wake_up_task_queue_locked(queue, task);
 605		}
 606		if (head == &queue->tasks[0])
 607			break;
 608		head--;
 609	}
 610	spin_unlock_bh(&queue->lock);
 611}
 612EXPORT_SYMBOL_GPL(rpc_wake_up_status);
 613
 614static void __rpc_queue_timer_fn(unsigned long ptr)
 615{
 616	struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
 
 
 617	struct rpc_task *task, *n;
 618	unsigned long expires, now, timeo;
 619
 620	spin_lock(&queue->lock);
 621	expires = now = jiffies;
 622	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
 623		timeo = task->u.tk_wait.expires;
 624		if (time_after_eq(now, timeo)) {
 625			dprintk("RPC: %5u timeout\n", task->tk_pid);
 626			task->tk_status = -ETIMEDOUT;
 627			rpc_wake_up_task_queue_locked(queue, task);
 628			continue;
 629		}
 630		if (expires == now || time_after(expires, timeo))
 631			expires = timeo;
 632	}
 633	if (!list_empty(&queue->timer_list.list))
 634		rpc_set_queue_timer(queue, expires);
 635	spin_unlock(&queue->lock);
 636}
 637
 638static void __rpc_atrun(struct rpc_task *task)
 639{
 640	if (task->tk_status == -ETIMEDOUT)
 641		task->tk_status = 0;
 642}
 643
 644/*
 645 * Run a task at a later time
 646 */
 647void rpc_delay(struct rpc_task *task, unsigned long delay)
 648{
 649	task->tk_timeout = delay;
 650	rpc_sleep_on(&delay_queue, task, __rpc_atrun);
 651}
 652EXPORT_SYMBOL_GPL(rpc_delay);
 653
 654/*
 655 * Helper to call task->tk_ops->rpc_call_prepare
 656 */
 657void rpc_prepare_task(struct rpc_task *task)
 658{
 659	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 660}
 661
 662static void
 663rpc_init_task_statistics(struct rpc_task *task)
 664{
 665	/* Initialize retry counters */
 666	task->tk_garb_retry = 2;
 667	task->tk_cred_retry = 2;
 668	task->tk_rebind_retry = 2;
 669
 670	/* starting timestamp */
 671	task->tk_start = ktime_get();
 672}
 673
 674static void
 675rpc_reset_task_statistics(struct rpc_task *task)
 676{
 677	task->tk_timeouts = 0;
 678	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
 679
 680	rpc_init_task_statistics(task);
 681}
 682
 683/*
 684 * Helper that calls task->tk_ops->rpc_call_done if it exists
 685 */
 686void rpc_exit_task(struct rpc_task *task)
 687{
 
 688	task->tk_action = NULL;
 
 
 
 
 689	if (task->tk_ops->rpc_call_done != NULL) {
 690		task->tk_ops->rpc_call_done(task, task->tk_calldata);
 691		if (task->tk_action != NULL) {
 692			WARN_ON(RPC_ASSASSINATED(task));
 693			/* Always release the RPC slot and buffer memory */
 694			xprt_release(task);
 695			rpc_reset_task_statistics(task);
 696		}
 697	}
 698}
 699
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 700void rpc_exit(struct rpc_task *task, int status)
 701{
 702	task->tk_status = status;
 703	task->tk_action = rpc_exit_task;
 704	if (RPC_IS_QUEUED(task))
 705		rpc_wake_up_queued_task(task->tk_waitqueue, task);
 706}
 707EXPORT_SYMBOL_GPL(rpc_exit);
 708
 709void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
 710{
 711	if (ops->rpc_release != NULL)
 712		ops->rpc_release(calldata);
 713}
 714
 715/*
 716 * This is the RPC `scheduler' (or rather, the finite state machine).
 717 */
 718static void __rpc_execute(struct rpc_task *task)
 719{
 720	struct rpc_wait_queue *queue;
 721	int task_is_async = RPC_IS_ASYNC(task);
 722	int status = 0;
 723
 724	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
 725			task->tk_pid, task->tk_flags);
 726
 727	WARN_ON_ONCE(RPC_IS_QUEUED(task));
 728	if (RPC_IS_QUEUED(task))
 729		return;
 730
 731	for (;;) {
 732		void (*do_action)(struct rpc_task *);
 733
 734		/*
 735		 * Execute any pending callback first.
 
 
 
 
 736		 */
 737		do_action = task->tk_callback;
 738		task->tk_callback = NULL;
 739		if (do_action == NULL) {
 740			/*
 741			 * Perform the next FSM step.
 742			 * tk_action may be NULL if the task has been killed.
 743			 * In particular, note that rpc_killall_tasks may
 744			 * do this at any time, so beware when dereferencing.
 745			 */
 746			do_action = task->tk_action;
 747			if (do_action == NULL)
 748				break;
 749		}
 750		trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
 
 
 751		do_action(task);
 752
 753		/*
 754		 * Lockless check for whether task is sleeping or not.
 755		 */
 756		if (!RPC_IS_QUEUED(task))
 757			continue;
 
 
 
 
 
 
 
 
 
 758		/*
 759		 * The queue->lock protects against races with
 760		 * rpc_make_runnable().
 761		 *
 762		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
 763		 * rpc_task, rpc_make_runnable() can assign it to a
 764		 * different workqueue. We therefore cannot assume that the
 765		 * rpc_task pointer may still be dereferenced.
 766		 */
 767		queue = task->tk_waitqueue;
 768		spin_lock_bh(&queue->lock);
 769		if (!RPC_IS_QUEUED(task)) {
 770			spin_unlock_bh(&queue->lock);
 771			continue;
 772		}
 773		rpc_clear_running(task);
 774		spin_unlock_bh(&queue->lock);
 775		if (task_is_async)
 776			return;
 777
 778		/* sync task: sleep here */
 779		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
 780		status = out_of_line_wait_on_bit(&task->tk_runstate,
 781				RPC_TASK_QUEUED, rpc_wait_bit_killable,
 782				TASK_KILLABLE);
 783		if (status == -ERESTARTSYS) {
 784			/*
 785			 * When a sync task receives a signal, it exits with
 786			 * -ERESTARTSYS. In order to catch any callbacks that
 787			 * clean up after sleeping on some queue, we don't
 788			 * break the loop here, but go around once more.
 789			 */
 790			dprintk("RPC: %5u got signal\n", task->tk_pid);
 791			task->tk_flags |= RPC_TASK_KILLED;
 
 792			rpc_exit(task, -ERESTARTSYS);
 793		}
 794		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
 795	}
 796
 797	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
 798			task->tk_status);
 799	/* Release all resources associated with the task */
 800	rpc_release_task(task);
 801}
 802
 803/*
 804 * User-visible entry point to the scheduler.
 805 *
 806 * This may be called recursively if e.g. an async NFS task updates
 807 * the attributes and finds that dirty pages must be flushed.
 808 * NOTE: Upon exit of this function the task is guaranteed to be
 809 *	 released. In particular note that tk_release() will have
 810 *	 been called, so your task memory may have been freed.
 811 */
 812void rpc_execute(struct rpc_task *task)
 813{
 814	bool is_async = RPC_IS_ASYNC(task);
 815
 816	rpc_set_active(task);
 817	rpc_make_runnable(task);
 818	if (!is_async)
 
 819		__rpc_execute(task);
 
 
 820}
 821
 822static void rpc_async_schedule(struct work_struct *work)
 823{
 
 
 824	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
 
 825}
 826
 827/**
 828 * rpc_malloc - allocate an RPC buffer
 829 * @task: RPC task that will use this buffer
 830 * @size: requested byte size
 
 
 
 831 *
 832 * To prevent rpciod from hanging, this allocator never sleeps,
 833 * returning NULL and suppressing warning if the request cannot be serviced
 834 * immediately.
 835 * The caller can arrange to sleep in a way that is safe for rpciod.
 836 *
 837 * Most requests are 'small' (under 2KiB) and can be serviced from a
 838 * mempool, ensuring that NFS reads and writes can always proceed,
 839 * and that there is good locality of reference for these buffers.
 840 *
 841 * In order to avoid memory starvation triggering more writebacks of
 842 * NFS requests, we avoid using GFP_KERNEL.
 843 */
 844void *rpc_malloc(struct rpc_task *task, size_t size)
 845{
 
 
 846	struct rpc_buffer *buf;
 847	gfp_t gfp = GFP_NOIO | __GFP_NOWARN;
 848
 849	if (RPC_IS_SWAPPER(task))
 850		gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
 851
 852	size += sizeof(struct rpc_buffer);
 853	if (size <= RPC_BUFFER_MAXSIZE)
 854		buf = mempool_alloc(rpc_buffer_mempool, gfp);
 855	else
 856		buf = kmalloc(size, gfp);
 857
 858	if (!buf)
 859		return NULL;
 860
 861	buf->len = size;
 862	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
 863			task->tk_pid, size, buf);
 864	return &buf->data;
 865}
 866EXPORT_SYMBOL_GPL(rpc_malloc);
 867
 868/**
 869 * rpc_free - free buffer allocated via rpc_malloc
 870 * @buffer: buffer to free
 871 *
 872 */
 873void rpc_free(void *buffer)
 874{
 
 875	size_t size;
 876	struct rpc_buffer *buf;
 877
 878	if (!buffer)
 879		return;
 880
 881	buf = container_of(buffer, struct rpc_buffer, data);
 882	size = buf->len;
 883
 884	dprintk("RPC:       freeing buffer of size %zu at %p\n",
 885			size, buf);
 886
 887	if (size <= RPC_BUFFER_MAXSIZE)
 888		mempool_free(buf, rpc_buffer_mempool);
 889	else
 890		kfree(buf);
 891}
 892EXPORT_SYMBOL_GPL(rpc_free);
 893
 894/*
 895 * Creation and deletion of RPC task structures
 896 */
 897static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
 898{
 899	memset(task, 0, sizeof(*task));
 900	atomic_set(&task->tk_count, 1);
 901	task->tk_flags  = task_setup_data->flags;
 902	task->tk_ops = task_setup_data->callback_ops;
 903	task->tk_calldata = task_setup_data->callback_data;
 904	INIT_LIST_HEAD(&task->tk_task);
 905
 906	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
 907	task->tk_owner = current->tgid;
 908
 909	/* Initialize workqueue for async tasks */
 910	task->tk_workqueue = task_setup_data->workqueue;
 911
 912	task->tk_xprt = xprt_get(task_setup_data->rpc_xprt);
 
 
 
 913
 914	if (task->tk_ops->rpc_call_prepare != NULL)
 915		task->tk_action = rpc_prepare_task;
 916
 917	rpc_init_task_statistics(task);
 918
 919	dprintk("RPC:       new task initialized, procpid %u\n",
 920				task_pid_nr(current));
 921}
 922
 923static struct rpc_task *
 924rpc_alloc_task(void)
 925{
 926	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
 927}
 928
 929/*
 930 * Create a new task for the specified client.
 931 */
 932struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
 933{
 934	struct rpc_task	*task = setup_data->task;
 935	unsigned short flags = 0;
 936
 937	if (task == NULL) {
 938		task = rpc_alloc_task();
 939		if (task == NULL) {
 940			rpc_release_calldata(setup_data->callback_ops,
 941					setup_data->callback_data);
 942			return ERR_PTR(-ENOMEM);
 943		}
 944		flags = RPC_TASK_DYNAMIC;
 945	}
 946
 947	rpc_init_task(task, setup_data);
 948	task->tk_flags |= flags;
 949	dprintk("RPC:       allocated task %p\n", task);
 950	return task;
 951}
 952
 953/*
 954 * rpc_free_task - release rpc task and perform cleanups
 955 *
 956 * Note that we free up the rpc_task _after_ rpc_release_calldata()
 957 * in order to work around a workqueue dependency issue.
 958 *
 959 * Tejun Heo states:
 960 * "Workqueue currently considers two work items to be the same if they're
 961 * on the same address and won't execute them concurrently - ie. it
 962 * makes a work item which is queued again while being executed wait
 963 * for the previous execution to complete.
 964 *
 965 * If a work function frees the work item, and then waits for an event
 966 * which should be performed by another work item and *that* work item
 967 * recycles the freed work item, it can create a false dependency loop.
 968 * There really is no reliable way to detect this short of verifying
 969 * every memory free."
 970 *
 971 */
 972static void rpc_free_task(struct rpc_task *task)
 973{
 974	unsigned short tk_flags = task->tk_flags;
 975
 
 976	rpc_release_calldata(task->tk_ops, task->tk_calldata);
 977
 978	if (tk_flags & RPC_TASK_DYNAMIC) {
 979		dprintk("RPC: %5u freeing task\n", task->tk_pid);
 980		mempool_free(task, rpc_task_mempool);
 981	}
 982}
 983
 984static void rpc_async_release(struct work_struct *work)
 985{
 
 
 986	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
 
 987}
 988
 989static void rpc_release_resources_task(struct rpc_task *task)
 990{
 991	xprt_release(task);
 992	if (task->tk_msg.rpc_cred) {
 993		put_rpccred(task->tk_msg.rpc_cred);
 
 994		task->tk_msg.rpc_cred = NULL;
 995	}
 996	rpc_task_release_client(task);
 997}
 998
 999static void rpc_final_put_task(struct rpc_task *task,
1000		struct workqueue_struct *q)
1001{
1002	if (q != NULL) {
1003		INIT_WORK(&task->u.tk_work, rpc_async_release);
1004		queue_work(q, &task->u.tk_work);
1005	} else
1006		rpc_free_task(task);
1007}
1008
1009static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1010{
1011	if (atomic_dec_and_test(&task->tk_count)) {
1012		rpc_release_resources_task(task);
1013		rpc_final_put_task(task, q);
1014	}
1015}
1016
1017void rpc_put_task(struct rpc_task *task)
1018{
1019	rpc_do_put_task(task, NULL);
1020}
1021EXPORT_SYMBOL_GPL(rpc_put_task);
1022
1023void rpc_put_task_async(struct rpc_task *task)
1024{
1025	rpc_do_put_task(task, task->tk_workqueue);
1026}
1027EXPORT_SYMBOL_GPL(rpc_put_task_async);
1028
1029static void rpc_release_task(struct rpc_task *task)
1030{
1031	dprintk("RPC: %5u release task\n", task->tk_pid);
1032
1033	WARN_ON_ONCE(RPC_IS_QUEUED(task));
1034
1035	rpc_release_resources_task(task);
1036
1037	/*
1038	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1039	 * so it should be safe to use task->tk_count as a test for whether
1040	 * or not any other processes still hold references to our rpc_task.
1041	 */
1042	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1043		/* Wake up anyone who may be waiting for task completion */
1044		if (!rpc_complete_task(task))
1045			return;
1046	} else {
1047		if (!atomic_dec_and_test(&task->tk_count))
1048			return;
1049	}
1050	rpc_final_put_task(task, task->tk_workqueue);
1051}
1052
1053int rpciod_up(void)
1054{
1055	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1056}
1057
1058void rpciod_down(void)
1059{
1060	module_put(THIS_MODULE);
1061}
1062
1063/*
1064 * Start up the rpciod workqueue.
1065 */
1066static int rpciod_start(void)
1067{
1068	struct workqueue_struct *wq;
1069
1070	/*
1071	 * Create the rpciod thread and wait for it to start.
1072	 */
1073	dprintk("RPC:       creating workqueue rpciod\n");
1074	/* Note: highpri because network receive is latency sensitive */
1075	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1076	rpciod_workqueue = wq;
1077	return rpciod_workqueue != NULL;
 
 
 
 
 
 
 
 
 
 
 
1078}
1079
1080static void rpciod_stop(void)
1081{
1082	struct workqueue_struct *wq = NULL;
1083
1084	if (rpciod_workqueue == NULL)
1085		return;
1086	dprintk("RPC:       destroying workqueue rpciod\n");
1087
1088	wq = rpciod_workqueue;
1089	rpciod_workqueue = NULL;
 
 
 
1090	destroy_workqueue(wq);
1091}
1092
1093void
1094rpc_destroy_mempool(void)
1095{
1096	rpciod_stop();
1097	mempool_destroy(rpc_buffer_mempool);
1098	mempool_destroy(rpc_task_mempool);
1099	kmem_cache_destroy(rpc_task_slabp);
1100	kmem_cache_destroy(rpc_buffer_slabp);
1101	rpc_destroy_wait_queue(&delay_queue);
1102}
1103
1104int
1105rpc_init_mempool(void)
1106{
1107	/*
1108	 * The following is not strictly a mempool initialisation,
1109	 * but there is no harm in doing it here
1110	 */
1111	rpc_init_wait_queue(&delay_queue, "delayq");
1112	if (!rpciod_start())
1113		goto err_nomem;
1114
1115	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1116					     sizeof(struct rpc_task),
1117					     0, SLAB_HWCACHE_ALIGN,
1118					     NULL);
1119	if (!rpc_task_slabp)
1120		goto err_nomem;
1121	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1122					     RPC_BUFFER_MAXSIZE,
1123					     0, SLAB_HWCACHE_ALIGN,
1124					     NULL);
1125	if (!rpc_buffer_slabp)
1126		goto err_nomem;
1127	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1128						    rpc_task_slabp);
1129	if (!rpc_task_mempool)
1130		goto err_nomem;
1131	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1132						      rpc_buffer_slabp);
1133	if (!rpc_buffer_mempool)
1134		goto err_nomem;
1135	return 0;
1136err_nomem:
1137	rpc_destroy_mempool();
1138	return -ENOMEM;
1139}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/net/sunrpc/sched.c
   4 *
   5 * Scheduling for synchronous and asynchronous RPC requests.
   6 *
   7 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
   8 *
   9 * TCP NFS related read + write fixes
  10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  11 */
  12
  13#include <linux/module.h>
  14
  15#include <linux/sched.h>
  16#include <linux/interrupt.h>
  17#include <linux/slab.h>
  18#include <linux/mempool.h>
  19#include <linux/smp.h>
  20#include <linux/spinlock.h>
  21#include <linux/mutex.h>
  22#include <linux/freezer.h>
  23#include <linux/sched/mm.h>
  24
  25#include <linux/sunrpc/clnt.h>
  26#include <linux/sunrpc/metrics.h>
  27
  28#include "sunrpc.h"
  29
 
 
 
 
  30#define CREATE_TRACE_POINTS
  31#include <trace/events/sunrpc.h>
  32
  33/*
  34 * RPC slabs and memory pools
  35 */
  36#define RPC_BUFFER_MAXSIZE	(2048)
  37#define RPC_BUFFER_POOLSIZE	(8)
  38#define RPC_TASK_POOLSIZE	(8)
  39static struct kmem_cache	*rpc_task_slabp __read_mostly;
  40static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
  41static mempool_t	*rpc_task_mempool __read_mostly;
  42static mempool_t	*rpc_buffer_mempool __read_mostly;
  43
  44static void			rpc_async_schedule(struct work_struct *);
  45static void			 rpc_release_task(struct rpc_task *task);
  46static void __rpc_queue_timer_fn(struct work_struct *);
  47
  48/*
  49 * RPC tasks sit here while waiting for conditions to improve.
  50 */
  51static struct rpc_wait_queue delay_queue;
  52
  53/*
  54 * rpciod-related stuff
  55 */
  56struct workqueue_struct *rpciod_workqueue __read_mostly;
  57struct workqueue_struct *xprtiod_workqueue __read_mostly;
  58EXPORT_SYMBOL_GPL(xprtiod_workqueue);
  59
  60unsigned long
  61rpc_task_timeout(const struct rpc_task *task)
  62{
  63	unsigned long timeout = READ_ONCE(task->tk_timeout);
  64
  65	if (timeout != 0) {
  66		unsigned long now = jiffies;
  67		if (time_before(now, timeout))
  68			return timeout - now;
  69	}
  70	return 0;
  71}
  72EXPORT_SYMBOL_GPL(rpc_task_timeout);
  73
  74/*
  75 * Disable the timer for a given RPC task. Should be called with
  76 * queue->lock and bh_disabled in order to avoid races within
  77 * rpc_run_timer().
  78 */
  79static void
  80__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  81{
  82	if (list_empty(&task->u.tk_wait.timer_list))
  83		return;
 
  84	task->tk_timeout = 0;
  85	list_del(&task->u.tk_wait.timer_list);
  86	if (list_empty(&queue->timer_list.list))
  87		cancel_delayed_work(&queue->timer_list.dwork);
  88}
  89
  90static void
  91rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
  92{
  93	unsigned long now = jiffies;
  94	queue->timer_list.expires = expires;
  95	if (time_before_eq(expires, now))
  96		expires = 0;
  97	else
  98		expires -= now;
  99	mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
 100}
 101
 102/*
 103 * Set up a timer for the current task.
 104 */
 105static void
 106__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
 107		unsigned long timeout)
 108{
 109	task->tk_timeout = timeout;
 110	if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
 111		rpc_set_queue_timer(queue, timeout);
 
 
 
 
 
 
 112	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
 113}
 114
 
 
 
 
 
 
 
 
 
 
 
 
 115static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
 116{
 117	if (queue->priority != priority) {
 
 
 118		queue->priority = priority;
 119		queue->nr = 1U << priority;
 120	}
 121}
 122
 
 
 
 
 
 
 123static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
 124{
 125	rpc_set_waitqueue_priority(queue, queue->maxpriority);
 
 126}
 127
 128/*
 129 * Add a request to a queue list
 130 */
 131static void
 132__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
 
 133{
 
 134	struct rpc_task *t;
 135
 
 
 
 
 
 
 136	list_for_each_entry(t, q, u.tk_wait.list) {
 137		if (t->tk_owner == task->tk_owner) {
 138			list_add_tail(&task->u.tk_wait.links,
 139					&t->u.tk_wait.links);
 140			/* Cache the queue head in task->u.tk_wait.list */
 141			task->u.tk_wait.list.next = q;
 142			task->u.tk_wait.list.prev = NULL;
 143			return;
 144		}
 145	}
 146	INIT_LIST_HEAD(&task->u.tk_wait.links);
 147	list_add_tail(&task->u.tk_wait.list, q);
 148}
 149
 150/*
 151 * Remove request from a queue list
 152 */
 153static void
 154__rpc_list_dequeue_task(struct rpc_task *task)
 155{
 156	struct list_head *q;
 157	struct rpc_task *t;
 158
 159	if (task->u.tk_wait.list.prev == NULL) {
 160		list_del(&task->u.tk_wait.links);
 161		return;
 162	}
 163	if (!list_empty(&task->u.tk_wait.links)) {
 164		t = list_first_entry(&task->u.tk_wait.links,
 165				struct rpc_task,
 166				u.tk_wait.links);
 167		/* Assume __rpc_list_enqueue_task() cached the queue head */
 168		q = t->u.tk_wait.list.next;
 169		list_add_tail(&t->u.tk_wait.list, q);
 170		list_del(&task->u.tk_wait.links);
 171	}
 172	list_del(&task->u.tk_wait.list);
 173}
 174
 175/*
 176 * Add new request to a priority queue.
 177 */
 178static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
 179		struct rpc_task *task,
 180		unsigned char queue_priority)
 181{
 182	if (unlikely(queue_priority > queue->maxpriority))
 183		queue_priority = queue->maxpriority;
 184	__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
 185}
 186
 187/*
 188 * Add new request to wait queue.
 189 *
 190 * Swapper tasks always get inserted at the head of the queue.
 191 * This should avoid many nasty memory deadlocks and hopefully
 192 * improve overall performance.
 193 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
 194 */
 195static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
 196		struct rpc_task *task,
 197		unsigned char queue_priority)
 198{
 199	INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
 
 
 
 200	if (RPC_IS_PRIORITY(queue))
 201		__rpc_add_wait_queue_priority(queue, task, queue_priority);
 202	else if (RPC_IS_SWAPPER(task))
 203		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
 204	else
 205		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
 206	task->tk_waitqueue = queue;
 207	queue->qlen++;
 208	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
 209	smp_wmb();
 210	rpc_set_queued(task);
 
 
 
 211}
 212
 213/*
 214 * Remove request from a priority queue.
 215 */
 216static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
 217{
 218	__rpc_list_dequeue_task(task);
 
 
 
 
 
 
 219}
 220
 221/*
 222 * Remove request from queue.
 223 * Note: must be called with spin lock held.
 224 */
 225static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
 226{
 227	__rpc_disable_timer(queue, task);
 228	if (RPC_IS_PRIORITY(queue))
 229		__rpc_remove_wait_queue_priority(task);
 230	else
 231		list_del(&task->u.tk_wait.list);
 232	queue->qlen--;
 
 
 233}
 234
 235static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
 236{
 237	int i;
 238
 239	spin_lock_init(&queue->lock);
 240	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
 241		INIT_LIST_HEAD(&queue->tasks[i]);
 242	queue->maxpriority = nr_queues - 1;
 243	rpc_reset_waitqueue_priority(queue);
 244	queue->qlen = 0;
 245	queue->timer_list.expires = 0;
 246	INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
 247	INIT_LIST_HEAD(&queue->timer_list.list);
 248	rpc_assign_waitqueue_name(queue, qname);
 249}
 250
 251void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 252{
 253	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
 254}
 255EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
 256
 257void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 258{
 259	__rpc_init_priority_wait_queue(queue, qname, 1);
 260}
 261EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
 262
 263void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
 264{
 265	cancel_delayed_work_sync(&queue->timer_list.dwork);
 266}
 267EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
 268
 269static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
 270{
 271	freezable_schedule_unsafe();
 272	if (signal_pending_state(mode, current))
 273		return -ERESTARTSYS;
 274	return 0;
 275}
 276
 277#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
 278static void rpc_task_set_debuginfo(struct rpc_task *task)
 279{
 280	static atomic_t rpc_pid;
 281
 282	task->tk_pid = atomic_inc_return(&rpc_pid);
 283}
 284#else
 285static inline void rpc_task_set_debuginfo(struct rpc_task *task)
 286{
 287}
 288#endif
 289
 290static void rpc_set_active(struct rpc_task *task)
 291{
 
 
 292	rpc_task_set_debuginfo(task);
 293	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 294	trace_rpc_task_begin(task, NULL);
 295}
 296
 297/*
 298 * Mark an RPC call as having completed by clearing the 'active' bit
 299 * and then waking up all tasks that were sleeping.
 300 */
 301static int rpc_complete_task(struct rpc_task *task)
 302{
 303	void *m = &task->tk_runstate;
 304	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
 305	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
 306	unsigned long flags;
 307	int ret;
 308
 309	trace_rpc_task_complete(task, NULL);
 310
 311	spin_lock_irqsave(&wq->lock, flags);
 312	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 313	ret = atomic_dec_and_test(&task->tk_count);
 314	if (waitqueue_active(wq))
 315		__wake_up_locked_key(wq, TASK_NORMAL, &k);
 316	spin_unlock_irqrestore(&wq->lock, flags);
 317	return ret;
 318}
 319
 320/*
 321 * Allow callers to wait for completion of an RPC call
 322 *
 323 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
 324 * to enforce taking of the wq->lock and hence avoid races with
 325 * rpc_complete_task().
 326 */
 327int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
 328{
 329	if (action == NULL)
 330		action = rpc_wait_bit_killable;
 331	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
 332			action, TASK_KILLABLE);
 333}
 334EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
 335
 336/*
 337 * Make an RPC task runnable.
 338 *
 339 * Note: If the task is ASYNC, and is being made runnable after sitting on an
 340 * rpc_wait_queue, this must be called with the queue spinlock held to protect
 341 * the wait queue operation.
 342 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
 343 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
 344 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
 345 * the RPC_TASK_RUNNING flag.
 346 */
 347static void rpc_make_runnable(struct workqueue_struct *wq,
 348		struct rpc_task *task)
 349{
 350	bool need_wakeup = !rpc_test_and_set_running(task);
 351
 352	rpc_clear_queued(task);
 353	if (!need_wakeup)
 354		return;
 355	if (RPC_IS_ASYNC(task)) {
 356		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 357		queue_work(wq, &task->u.tk_work);
 358	} else
 359		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
 360}
 361
 362/*
 363 * Prepare for sleeping on a wait queue.
 364 * By always appending tasks to the list we ensure FIFO behavior.
 365 * NB: An RPC task will only receive interrupt-driven events as long
 366 * as it's on a wait queue.
 367 */
 368static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
 369		struct rpc_task *task,
 
 370		unsigned char queue_priority)
 371{
 372	trace_rpc_task_sleep(task, q);
 
 
 
 373
 374	__rpc_add_wait_queue(q, task, queue_priority);
 375}
 376
 377static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
 378		struct rpc_task *task,
 379		unsigned char queue_priority)
 380{
 381	if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
 382		return;
 383	__rpc_do_sleep_on_priority(q, task, queue_priority);
 384}
 385
 386static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
 387		struct rpc_task *task, unsigned long timeout,
 388		unsigned char queue_priority)
 389{
 390	if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
 391		return;
 392	if (time_is_after_jiffies(timeout)) {
 393		__rpc_do_sleep_on_priority(q, task, queue_priority);
 394		__rpc_add_timer(q, task, timeout);
 395	} else
 396		task->tk_status = -ETIMEDOUT;
 397}
 398
 399static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
 400{
 401	if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
 402		task->tk_callback = action;
 403}
 404
 405static bool rpc_sleep_check_activated(struct rpc_task *task)
 406{
 407	/* We shouldn't ever put an inactive task to sleep */
 408	if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
 
 409		task->tk_status = -EIO;
 410		rpc_put_task_async(task);
 411		return false;
 412	}
 413	return true;
 414}
 415
 416void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
 417				rpc_action action, unsigned long timeout)
 418{
 419	if (!rpc_sleep_check_activated(task))
 420		return;
 421
 422	rpc_set_tk_callback(task, action);
 423
 424	/*
 425	 * Protect the queue operations.
 426	 */
 427	spin_lock(&q->lock);
 428	__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
 429	spin_unlock(&q->lock);
 430}
 431EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
 432
 433void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 434				rpc_action action)
 435{
 436	if (!rpc_sleep_check_activated(task))
 437		return;
 438
 439	rpc_set_tk_callback(task, action);
 440
 441	WARN_ON_ONCE(task->tk_timeout != 0);
 442	/*
 443	 * Protect the queue operations.
 444	 */
 445	spin_lock(&q->lock);
 446	__rpc_sleep_on_priority(q, task, task->tk_priority);
 447	spin_unlock(&q->lock);
 448}
 449EXPORT_SYMBOL_GPL(rpc_sleep_on);
 450
 451void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
 452		struct rpc_task *task, unsigned long timeout, int priority)
 453{
 454	if (!rpc_sleep_check_activated(task))
 455		return;
 456
 457	priority -= RPC_PRIORITY_LOW;
 458	/*
 459	 * Protect the queue operations.
 460	 */
 461	spin_lock(&q->lock);
 462	__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
 463	spin_unlock(&q->lock);
 464}
 465EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
 466
 467void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
 468		int priority)
 469{
 470	if (!rpc_sleep_check_activated(task))
 
 
 
 
 471		return;
 
 472
 473	WARN_ON_ONCE(task->tk_timeout != 0);
 474	priority -= RPC_PRIORITY_LOW;
 475	/*
 476	 * Protect the queue operations.
 477	 */
 478	spin_lock(&q->lock);
 479	__rpc_sleep_on_priority(q, task, priority);
 480	spin_unlock(&q->lock);
 481}
 482EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
 483
 484/**
 485 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
 486 * @wq: workqueue on which to run task
 487 * @queue: wait queue
 488 * @task: task to be woken up
 489 *
 490 * Caller must hold queue->lock, and have cleared the task queued flag.
 491 */
 492static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
 493		struct rpc_wait_queue *queue,
 494		struct rpc_task *task)
 495{
 
 
 
 496	/* Has the task been executed yet? If not, we cannot wake it up! */
 497	if (!RPC_IS_ACTIVATED(task)) {
 498		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
 499		return;
 500	}
 501
 502	trace_rpc_task_wakeup(task, queue);
 503
 504	__rpc_remove_wait_queue(queue, task);
 505
 506	rpc_make_runnable(wq, task);
 
 
 507}
 508
 509/*
 510 * Wake up a queued task while the queue lock is being held
 511 */
 512static struct rpc_task *
 513rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
 514		struct rpc_wait_queue *queue, struct rpc_task *task,
 515		bool (*action)(struct rpc_task *, void *), void *data)
 516{
 517	if (RPC_IS_QUEUED(task)) {
 518		smp_rmb();
 519		if (task->tk_waitqueue == queue) {
 520			if (action == NULL || action(task, data)) {
 521				__rpc_do_wake_up_task_on_wq(wq, queue, task);
 522				return task;
 523			}
 524		}
 525	}
 526	return NULL;
 527}
 528
 529/*
 530 * Wake up a queued task while the queue lock is being held
 531 */
 532static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
 533					  struct rpc_task *task)
 534{
 535	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
 536						   task, NULL, NULL);
 537}
 538
 539/*
 540 * Wake up a task on a specific queue
 541 */
 542void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 543{
 544	if (!RPC_IS_QUEUED(task))
 545		return;
 546	spin_lock(&queue->lock);
 547	rpc_wake_up_task_queue_locked(queue, task);
 548	spin_unlock(&queue->lock);
 549}
 550EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
 551
 552static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
 553{
 554	task->tk_status = *(int *)status;
 555	return true;
 556}
 557
 558static void
 559rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
 560		struct rpc_task *task, int status)
 561{
 562	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
 563			task, rpc_task_action_set_status, &status);
 564}
 565
 566/**
 567 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
 568 * @queue: pointer to rpc_wait_queue
 569 * @task: pointer to rpc_task
 570 * @status: integer error value
 571 *
 572 * If @task is queued on @queue, then it is woken up, and @task->tk_status is
 573 * set to the value of @status.
 574 */
 575void
 576rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
 577		struct rpc_task *task, int status)
 578{
 579	if (!RPC_IS_QUEUED(task))
 580		return;
 581	spin_lock(&queue->lock);
 582	rpc_wake_up_task_queue_set_status_locked(queue, task, status);
 583	spin_unlock(&queue->lock);
 584}
 585
 586/*
 587 * Wake up the next task on a priority queue.
 588 */
 589static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
 590{
 591	struct list_head *q;
 592	struct rpc_task *task;
 593
 594	/*
 595	 * Service the privileged queue.
 596	 */
 597	q = &queue->tasks[RPC_NR_PRIORITY - 1];
 598	if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
 599		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 600		goto out;
 601	}
 602
 603	/*
 604	 * Service a batch of tasks from a single owner.
 605	 */
 606	q = &queue->tasks[queue->priority];
 607	if (!list_empty(q) && queue->nr) {
 608		queue->nr--;
 609		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 610		goto out;
 
 
 
 
 
 
 
 611	}
 612
 613	/*
 614	 * Service the next queue.
 615	 */
 616	do {
 617		if (q == &queue->tasks[0])
 618			q = &queue->tasks[queue->maxpriority];
 619		else
 620			q = q - 1;
 621		if (!list_empty(q)) {
 622			task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 623			goto new_queue;
 624		}
 625	} while (q != &queue->tasks[queue->priority]);
 626
 627	rpc_reset_waitqueue_priority(queue);
 628	return NULL;
 629
 630new_queue:
 631	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
 
 
 632out:
 633	return task;
 634}
 635
 636static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
 637{
 638	if (RPC_IS_PRIORITY(queue))
 639		return __rpc_find_next_queued_priority(queue);
 640	if (!list_empty(&queue->tasks[0]))
 641		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
 642	return NULL;
 643}
 644
 645/*
 646 * Wake up the first task on the wait queue.
 647 */
 648struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
 649		struct rpc_wait_queue *queue,
 650		bool (*func)(struct rpc_task *, void *), void *data)
 651{
 652	struct rpc_task	*task = NULL;
 653
 654	spin_lock(&queue->lock);
 
 
 655	task = __rpc_find_next_queued(queue);
 656	if (task != NULL)
 657		task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
 658				task, func, data);
 659	spin_unlock(&queue->lock);
 
 
 
 660
 661	return task;
 662}
 663
 664/*
 665 * Wake up the first task on the wait queue.
 666 */
 667struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
 668		bool (*func)(struct rpc_task *, void *), void *data)
 669{
 670	return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
 671}
 672EXPORT_SYMBOL_GPL(rpc_wake_up_first);
 673
 674static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
 675{
 676	return true;
 677}
 678
 679/*
 680 * Wake up the next task on the wait queue.
 681*/
 682struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
 683{
 684	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
 685}
 686EXPORT_SYMBOL_GPL(rpc_wake_up_next);
 687
 688/**
 689 * rpc_wake_up_locked - wake up all rpc_tasks
 690 * @queue: rpc_wait_queue on which the tasks are sleeping
 691 *
 692 */
 693static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
 694{
 695	struct rpc_task *task;
 696
 697	for (;;) {
 698		task = __rpc_find_next_queued(queue);
 699		if (task == NULL)
 700			break;
 701		rpc_wake_up_task_queue_locked(queue, task);
 702	}
 703}
 704
 705/**
 706 * rpc_wake_up - wake up all rpc_tasks
 707 * @queue: rpc_wait_queue on which the tasks are sleeping
 708 *
 709 * Grabs queue->lock
 710 */
 711void rpc_wake_up(struct rpc_wait_queue *queue)
 712{
 713	spin_lock(&queue->lock);
 714	rpc_wake_up_locked(queue);
 715	spin_unlock(&queue->lock);
 716}
 717EXPORT_SYMBOL_GPL(rpc_wake_up);
 718
 719/**
 720 * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
 721 * @queue: rpc_wait_queue on which the tasks are sleeping
 722 * @status: status value to set
 723 */
 724static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
 725{
 726	struct rpc_task *task;
 727
 
 
 728	for (;;) {
 729		task = __rpc_find_next_queued(queue);
 730		if (task == NULL)
 
 
 
 
 
 
 731			break;
 732		rpc_wake_up_task_queue_set_status_locked(queue, task, status);
 733	}
 
 734}
 
 735
 736/**
 737 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
 738 * @queue: rpc_wait_queue on which the tasks are sleeping
 739 * @status: status value to set
 740 *
 741 * Grabs queue->lock
 742 */
 743void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 744{
 745	spin_lock(&queue->lock);
 746	rpc_wake_up_status_locked(queue, status);
 747	spin_unlock(&queue->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 748}
 749EXPORT_SYMBOL_GPL(rpc_wake_up_status);
 750
 751static void __rpc_queue_timer_fn(struct work_struct *work)
 752{
 753	struct rpc_wait_queue *queue = container_of(work,
 754			struct rpc_wait_queue,
 755			timer_list.dwork.work);
 756	struct rpc_task *task, *n;
 757	unsigned long expires, now, timeo;
 758
 759	spin_lock(&queue->lock);
 760	expires = now = jiffies;
 761	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
 762		timeo = task->tk_timeout;
 763		if (time_after_eq(now, timeo)) {
 764			trace_rpc_task_timeout(task, task->tk_action);
 765			task->tk_status = -ETIMEDOUT;
 766			rpc_wake_up_task_queue_locked(queue, task);
 767			continue;
 768		}
 769		if (expires == now || time_after(expires, timeo))
 770			expires = timeo;
 771	}
 772	if (!list_empty(&queue->timer_list.list))
 773		rpc_set_queue_timer(queue, expires);
 774	spin_unlock(&queue->lock);
 775}
 776
 777static void __rpc_atrun(struct rpc_task *task)
 778{
 779	if (task->tk_status == -ETIMEDOUT)
 780		task->tk_status = 0;
 781}
 782
 783/*
 784 * Run a task at a later time
 785 */
 786void rpc_delay(struct rpc_task *task, unsigned long delay)
 787{
 788	rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
 
 789}
 790EXPORT_SYMBOL_GPL(rpc_delay);
 791
 792/*
 793 * Helper to call task->tk_ops->rpc_call_prepare
 794 */
 795void rpc_prepare_task(struct rpc_task *task)
 796{
 797	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 798}
 799
 800static void
 801rpc_init_task_statistics(struct rpc_task *task)
 802{
 803	/* Initialize retry counters */
 804	task->tk_garb_retry = 2;
 805	task->tk_cred_retry = 2;
 806	task->tk_rebind_retry = 2;
 807
 808	/* starting timestamp */
 809	task->tk_start = ktime_get();
 810}
 811
 812static void
 813rpc_reset_task_statistics(struct rpc_task *task)
 814{
 815	task->tk_timeouts = 0;
 816	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
 
 817	rpc_init_task_statistics(task);
 818}
 819
 820/*
 821 * Helper that calls task->tk_ops->rpc_call_done if it exists
 822 */
 823void rpc_exit_task(struct rpc_task *task)
 824{
 825	trace_rpc_task_end(task, task->tk_action);
 826	task->tk_action = NULL;
 827	if (task->tk_ops->rpc_count_stats)
 828		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
 829	else if (task->tk_client)
 830		rpc_count_iostats(task, task->tk_client->cl_metrics);
 831	if (task->tk_ops->rpc_call_done != NULL) {
 832		task->tk_ops->rpc_call_done(task, task->tk_calldata);
 833		if (task->tk_action != NULL) {
 
 834			/* Always release the RPC slot and buffer memory */
 835			xprt_release(task);
 836			rpc_reset_task_statistics(task);
 837		}
 838	}
 839}
 840
 841void rpc_signal_task(struct rpc_task *task)
 842{
 843	struct rpc_wait_queue *queue;
 844
 845	if (!RPC_IS_ACTIVATED(task))
 846		return;
 847
 848	trace_rpc_task_signalled(task, task->tk_action);
 849	set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
 850	smp_mb__after_atomic();
 851	queue = READ_ONCE(task->tk_waitqueue);
 852	if (queue)
 853		rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
 854}
 855
 856void rpc_exit(struct rpc_task *task, int status)
 857{
 858	task->tk_status = status;
 859	task->tk_action = rpc_exit_task;
 860	rpc_wake_up_queued_task(task->tk_waitqueue, task);
 
 861}
 862EXPORT_SYMBOL_GPL(rpc_exit);
 863
 864void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
 865{
 866	if (ops->rpc_release != NULL)
 867		ops->rpc_release(calldata);
 868}
 869
 870/*
 871 * This is the RPC `scheduler' (or rather, the finite state machine).
 872 */
 873static void __rpc_execute(struct rpc_task *task)
 874{
 875	struct rpc_wait_queue *queue;
 876	int task_is_async = RPC_IS_ASYNC(task);
 877	int status = 0;
 878
 
 
 
 879	WARN_ON_ONCE(RPC_IS_QUEUED(task));
 880	if (RPC_IS_QUEUED(task))
 881		return;
 882
 883	for (;;) {
 884		void (*do_action)(struct rpc_task *);
 885
 886		/*
 887		 * Perform the next FSM step or a pending callback.
 888		 *
 889		 * tk_action may be NULL if the task has been killed.
 890		 * In particular, note that rpc_killall_tasks may
 891		 * do this at any time, so beware when dereferencing.
 892		 */
 893		do_action = task->tk_action;
 894		if (task->tk_callback) {
 895			do_action = task->tk_callback;
 896			task->tk_callback = NULL;
 
 
 
 
 
 
 
 
 897		}
 898		if (!do_action)
 899			break;
 900		trace_rpc_task_run_action(task, do_action);
 901		do_action(task);
 902
 903		/*
 904		 * Lockless check for whether task is sleeping or not.
 905		 */
 906		if (!RPC_IS_QUEUED(task))
 907			continue;
 908
 909		/*
 910		 * Signalled tasks should exit rather than sleep.
 911		 */
 912		if (RPC_SIGNALLED(task)) {
 913			task->tk_rpc_status = -ERESTARTSYS;
 914			rpc_exit(task, -ERESTARTSYS);
 915		}
 916
 917		/*
 918		 * The queue->lock protects against races with
 919		 * rpc_make_runnable().
 920		 *
 921		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
 922		 * rpc_task, rpc_make_runnable() can assign it to a
 923		 * different workqueue. We therefore cannot assume that the
 924		 * rpc_task pointer may still be dereferenced.
 925		 */
 926		queue = task->tk_waitqueue;
 927		spin_lock(&queue->lock);
 928		if (!RPC_IS_QUEUED(task)) {
 929			spin_unlock(&queue->lock);
 930			continue;
 931		}
 932		rpc_clear_running(task);
 933		spin_unlock(&queue->lock);
 934		if (task_is_async)
 935			return;
 936
 937		/* sync task: sleep here */
 938		trace_rpc_task_sync_sleep(task, task->tk_action);
 939		status = out_of_line_wait_on_bit(&task->tk_runstate,
 940				RPC_TASK_QUEUED, rpc_wait_bit_killable,
 941				TASK_KILLABLE);
 942		if (status < 0) {
 943			/*
 944			 * When a sync task receives a signal, it exits with
 945			 * -ERESTARTSYS. In order to catch any callbacks that
 946			 * clean up after sleeping on some queue, we don't
 947			 * break the loop here, but go around once more.
 948			 */
 949			trace_rpc_task_signalled(task, task->tk_action);
 950			set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
 951			task->tk_rpc_status = -ERESTARTSYS;
 952			rpc_exit(task, -ERESTARTSYS);
 953		}
 954		trace_rpc_task_sync_wake(task, task->tk_action);
 955	}
 956
 
 
 957	/* Release all resources associated with the task */
 958	rpc_release_task(task);
 959}
 960
 961/*
 962 * User-visible entry point to the scheduler.
 963 *
 964 * This may be called recursively if e.g. an async NFS task updates
 965 * the attributes and finds that dirty pages must be flushed.
 966 * NOTE: Upon exit of this function the task is guaranteed to be
 967 *	 released. In particular note that tk_release() will have
 968 *	 been called, so your task memory may have been freed.
 969 */
 970void rpc_execute(struct rpc_task *task)
 971{
 972	bool is_async = RPC_IS_ASYNC(task);
 973
 974	rpc_set_active(task);
 975	rpc_make_runnable(rpciod_workqueue, task);
 976	if (!is_async) {
 977		unsigned int pflags = memalloc_nofs_save();
 978		__rpc_execute(task);
 979		memalloc_nofs_restore(pflags);
 980	}
 981}
 982
 983static void rpc_async_schedule(struct work_struct *work)
 984{
 985	unsigned int pflags = memalloc_nofs_save();
 986
 987	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
 988	memalloc_nofs_restore(pflags);
 989}
 990
 991/**
 992 * rpc_malloc - allocate RPC buffer resources
 993 * @task: RPC task
 994 *
 995 * A single memory region is allocated, which is split between the
 996 * RPC call and RPC reply that this task is being used for. When
 997 * this RPC is retired, the memory is released by calling rpc_free.
 998 *
 999 * To prevent rpciod from hanging, this allocator never sleeps,
1000 * returning -ENOMEM and suppressing warning if the request cannot
1001 * be serviced immediately. The caller can arrange to sleep in a
1002 * way that is safe for rpciod.
1003 *
1004 * Most requests are 'small' (under 2KiB) and can be serviced from a
1005 * mempool, ensuring that NFS reads and writes can always proceed,
1006 * and that there is good locality of reference for these buffers.
 
 
 
1007 */
1008int rpc_malloc(struct rpc_task *task)
1009{
1010	struct rpc_rqst *rqst = task->tk_rqstp;
1011	size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
1012	struct rpc_buffer *buf;
1013	gfp_t gfp = GFP_NOFS;
1014
1015	if (RPC_IS_SWAPPER(task))
1016		gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
1017
1018	size += sizeof(struct rpc_buffer);
1019	if (size <= RPC_BUFFER_MAXSIZE)
1020		buf = mempool_alloc(rpc_buffer_mempool, gfp);
1021	else
1022		buf = kmalloc(size, gfp);
1023
1024	if (!buf)
1025		return -ENOMEM;
1026
1027	buf->len = size;
1028	rqst->rq_buffer = buf->data;
1029	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1030	return 0;
1031}
1032EXPORT_SYMBOL_GPL(rpc_malloc);
1033
1034/**
1035 * rpc_free - free RPC buffer resources allocated via rpc_malloc
1036 * @task: RPC task
1037 *
1038 */
1039void rpc_free(struct rpc_task *task)
1040{
1041	void *buffer = task->tk_rqstp->rq_buffer;
1042	size_t size;
1043	struct rpc_buffer *buf;
1044
 
 
 
1045	buf = container_of(buffer, struct rpc_buffer, data);
1046	size = buf->len;
1047
 
 
 
1048	if (size <= RPC_BUFFER_MAXSIZE)
1049		mempool_free(buf, rpc_buffer_mempool);
1050	else
1051		kfree(buf);
1052}
1053EXPORT_SYMBOL_GPL(rpc_free);
1054
1055/*
1056 * Creation and deletion of RPC task structures
1057 */
1058static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1059{
1060	memset(task, 0, sizeof(*task));
1061	atomic_set(&task->tk_count, 1);
1062	task->tk_flags  = task_setup_data->flags;
1063	task->tk_ops = task_setup_data->callback_ops;
1064	task->tk_calldata = task_setup_data->callback_data;
1065	INIT_LIST_HEAD(&task->tk_task);
1066
1067	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1068	task->tk_owner = current->tgid;
1069
1070	/* Initialize workqueue for async tasks */
1071	task->tk_workqueue = task_setup_data->workqueue;
1072
1073	task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1074			xprt_get(task_setup_data->rpc_xprt));
1075
1076	task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1077
1078	if (task->tk_ops->rpc_call_prepare != NULL)
1079		task->tk_action = rpc_prepare_task;
1080
1081	rpc_init_task_statistics(task);
 
 
 
1082}
1083
1084static struct rpc_task *
1085rpc_alloc_task(void)
1086{
1087	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
1088}
1089
1090/*
1091 * Create a new task for the specified client.
1092 */
1093struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1094{
1095	struct rpc_task	*task = setup_data->task;
1096	unsigned short flags = 0;
1097
1098	if (task == NULL) {
1099		task = rpc_alloc_task();
 
 
 
 
 
1100		flags = RPC_TASK_DYNAMIC;
1101	}
1102
1103	rpc_init_task(task, setup_data);
1104	task->tk_flags |= flags;
 
1105	return task;
1106}
1107
1108/*
1109 * rpc_free_task - release rpc task and perform cleanups
1110 *
1111 * Note that we free up the rpc_task _after_ rpc_release_calldata()
1112 * in order to work around a workqueue dependency issue.
1113 *
1114 * Tejun Heo states:
1115 * "Workqueue currently considers two work items to be the same if they're
1116 * on the same address and won't execute them concurrently - ie. it
1117 * makes a work item which is queued again while being executed wait
1118 * for the previous execution to complete.
1119 *
1120 * If a work function frees the work item, and then waits for an event
1121 * which should be performed by another work item and *that* work item
1122 * recycles the freed work item, it can create a false dependency loop.
1123 * There really is no reliable way to detect this short of verifying
1124 * every memory free."
1125 *
1126 */
1127static void rpc_free_task(struct rpc_task *task)
1128{
1129	unsigned short tk_flags = task->tk_flags;
1130
1131	put_rpccred(task->tk_op_cred);
1132	rpc_release_calldata(task->tk_ops, task->tk_calldata);
1133
1134	if (tk_flags & RPC_TASK_DYNAMIC)
 
1135		mempool_free(task, rpc_task_mempool);
 
1136}
1137
1138static void rpc_async_release(struct work_struct *work)
1139{
1140	unsigned int pflags = memalloc_nofs_save();
1141
1142	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1143	memalloc_nofs_restore(pflags);
1144}
1145
1146static void rpc_release_resources_task(struct rpc_task *task)
1147{
1148	xprt_release(task);
1149	if (task->tk_msg.rpc_cred) {
1150		if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1151			put_cred(task->tk_msg.rpc_cred);
1152		task->tk_msg.rpc_cred = NULL;
1153	}
1154	rpc_task_release_client(task);
1155}
1156
1157static void rpc_final_put_task(struct rpc_task *task,
1158		struct workqueue_struct *q)
1159{
1160	if (q != NULL) {
1161		INIT_WORK(&task->u.tk_work, rpc_async_release);
1162		queue_work(q, &task->u.tk_work);
1163	} else
1164		rpc_free_task(task);
1165}
1166
1167static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1168{
1169	if (atomic_dec_and_test(&task->tk_count)) {
1170		rpc_release_resources_task(task);
1171		rpc_final_put_task(task, q);
1172	}
1173}
1174
1175void rpc_put_task(struct rpc_task *task)
1176{
1177	rpc_do_put_task(task, NULL);
1178}
1179EXPORT_SYMBOL_GPL(rpc_put_task);
1180
1181void rpc_put_task_async(struct rpc_task *task)
1182{
1183	rpc_do_put_task(task, task->tk_workqueue);
1184}
1185EXPORT_SYMBOL_GPL(rpc_put_task_async);
1186
1187static void rpc_release_task(struct rpc_task *task)
1188{
 
 
1189	WARN_ON_ONCE(RPC_IS_QUEUED(task));
1190
1191	rpc_release_resources_task(task);
1192
1193	/*
1194	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1195	 * so it should be safe to use task->tk_count as a test for whether
1196	 * or not any other processes still hold references to our rpc_task.
1197	 */
1198	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1199		/* Wake up anyone who may be waiting for task completion */
1200		if (!rpc_complete_task(task))
1201			return;
1202	} else {
1203		if (!atomic_dec_and_test(&task->tk_count))
1204			return;
1205	}
1206	rpc_final_put_task(task, task->tk_workqueue);
1207}
1208
1209int rpciod_up(void)
1210{
1211	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1212}
1213
1214void rpciod_down(void)
1215{
1216	module_put(THIS_MODULE);
1217}
1218
1219/*
1220 * Start up the rpciod workqueue.
1221 */
1222static int rpciod_start(void)
1223{
1224	struct workqueue_struct *wq;
1225
1226	/*
1227	 * Create the rpciod thread and wait for it to start.
1228	 */
1229	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1230	if (!wq)
1231		goto out_failed;
1232	rpciod_workqueue = wq;
1233	/* Note: highpri because network receive is latency sensitive */
1234	wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
1235	if (!wq)
1236		goto free_rpciod;
1237	xprtiod_workqueue = wq;
1238	return 1;
1239free_rpciod:
1240	wq = rpciod_workqueue;
1241	rpciod_workqueue = NULL;
1242	destroy_workqueue(wq);
1243out_failed:
1244	return 0;
1245}
1246
1247static void rpciod_stop(void)
1248{
1249	struct workqueue_struct *wq = NULL;
1250
1251	if (rpciod_workqueue == NULL)
1252		return;
 
1253
1254	wq = rpciod_workqueue;
1255	rpciod_workqueue = NULL;
1256	destroy_workqueue(wq);
1257	wq = xprtiod_workqueue;
1258	xprtiod_workqueue = NULL;
1259	destroy_workqueue(wq);
1260}
1261
1262void
1263rpc_destroy_mempool(void)
1264{
1265	rpciod_stop();
1266	mempool_destroy(rpc_buffer_mempool);
1267	mempool_destroy(rpc_task_mempool);
1268	kmem_cache_destroy(rpc_task_slabp);
1269	kmem_cache_destroy(rpc_buffer_slabp);
1270	rpc_destroy_wait_queue(&delay_queue);
1271}
1272
1273int
1274rpc_init_mempool(void)
1275{
1276	/*
1277	 * The following is not strictly a mempool initialisation,
1278	 * but there is no harm in doing it here
1279	 */
1280	rpc_init_wait_queue(&delay_queue, "delayq");
1281	if (!rpciod_start())
1282		goto err_nomem;
1283
1284	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1285					     sizeof(struct rpc_task),
1286					     0, SLAB_HWCACHE_ALIGN,
1287					     NULL);
1288	if (!rpc_task_slabp)
1289		goto err_nomem;
1290	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1291					     RPC_BUFFER_MAXSIZE,
1292					     0, SLAB_HWCACHE_ALIGN,
1293					     NULL);
1294	if (!rpc_buffer_slabp)
1295		goto err_nomem;
1296	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1297						    rpc_task_slabp);
1298	if (!rpc_task_mempool)
1299		goto err_nomem;
1300	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1301						      rpc_buffer_slabp);
1302	if (!rpc_buffer_mempool)
1303		goto err_nomem;
1304	return 0;
1305err_nomem:
1306	rpc_destroy_mempool();
1307	return -ENOMEM;
1308}