Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/net/sunrpc/sched.c
   4 *
   5 * Scheduling for synchronous and asynchronous RPC requests.
   6 *
   7 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
   8 *
   9 * TCP NFS related read + write fixes
  10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  11 */
  12
  13#include <linux/module.h>
  14
  15#include <linux/sched.h>
  16#include <linux/interrupt.h>
  17#include <linux/slab.h>
  18#include <linux/mempool.h>
  19#include <linux/smp.h>
  20#include <linux/spinlock.h>
  21#include <linux/mutex.h>
  22#include <linux/freezer.h>
  23#include <linux/sched/mm.h>
  24
  25#include <linux/sunrpc/clnt.h>
  26#include <linux/sunrpc/metrics.h>
  27
  28#include "sunrpc.h"
  29
 
 
 
 
  30#define CREATE_TRACE_POINTS
  31#include <trace/events/sunrpc.h>
  32
  33/*
  34 * RPC slabs and memory pools
  35 */
  36#define RPC_BUFFER_MAXSIZE	(2048)
  37#define RPC_BUFFER_POOLSIZE	(8)
  38#define RPC_TASK_POOLSIZE	(8)
  39static struct kmem_cache	*rpc_task_slabp __read_mostly;
  40static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
  41static mempool_t	*rpc_task_mempool __read_mostly;
  42static mempool_t	*rpc_buffer_mempool __read_mostly;
  43
  44static void			rpc_async_schedule(struct work_struct *);
  45static void			 rpc_release_task(struct rpc_task *task);
  46static void __rpc_queue_timer_fn(struct work_struct *);
  47
  48/*
  49 * RPC tasks sit here while waiting for conditions to improve.
  50 */
  51static struct rpc_wait_queue delay_queue;
  52
  53/*
  54 * rpciod-related stuff
  55 */
  56struct workqueue_struct *rpciod_workqueue __read_mostly;
  57struct workqueue_struct *xprtiod_workqueue __read_mostly;
  58EXPORT_SYMBOL_GPL(xprtiod_workqueue);
  59
  60gfp_t rpc_task_gfp_mask(void)
  61{
  62	if (current->flags & PF_WQ_WORKER)
  63		return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
  64	return GFP_KERNEL;
  65}
  66EXPORT_SYMBOL_GPL(rpc_task_gfp_mask);
  67
  68bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
  69{
  70	if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
  71		return true;
  72	return false;
  73}
  74
  75unsigned long
  76rpc_task_timeout(const struct rpc_task *task)
  77{
  78	unsigned long timeout = READ_ONCE(task->tk_timeout);
  79
  80	if (timeout != 0) {
  81		unsigned long now = jiffies;
  82		if (time_before(now, timeout))
  83			return timeout - now;
  84	}
  85	return 0;
  86}
  87EXPORT_SYMBOL_GPL(rpc_task_timeout);
  88
  89/*
  90 * Disable the timer for a given RPC task. Should be called with
  91 * queue->lock and bh_disabled in order to avoid races within
  92 * rpc_run_timer().
  93 */
  94static void
  95__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  96{
  97	if (list_empty(&task->u.tk_wait.timer_list))
  98		return;
 
  99	task->tk_timeout = 0;
 100	list_del(&task->u.tk_wait.timer_list);
 101	if (list_empty(&queue->timer_list.list))
 102		cancel_delayed_work(&queue->timer_list.dwork);
 103}
 104
 105static void
 106rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
 107{
 108	unsigned long now = jiffies;
 109	queue->timer_list.expires = expires;
 110	if (time_before_eq(expires, now))
 111		expires = 0;
 112	else
 113		expires -= now;
 114	mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
 115}
 116
 117/*
 118 * Set up a timer for the current task.
 119 */
 120static void
 121__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
 122		unsigned long timeout)
 123{
 124	task->tk_timeout = timeout;
 125	if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
 126		rpc_set_queue_timer(queue, timeout);
 
 
 
 
 
 
 127	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
 128}
 129
 
 
 
 
 
 
 
 
 
 
 
 
 130static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
 131{
 132	if (queue->priority != priority) {
 
 
 133		queue->priority = priority;
 134		queue->nr = 1U << priority;
 135	}
 136}
 137
 
 
 
 
 
 
 138static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
 139{
 140	rpc_set_waitqueue_priority(queue, queue->maxpriority);
 
 141}
 142
 143/*
 144 * Add a request to a queue list
 145 */
 146static void
 147__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
 
 148{
 
 149	struct rpc_task *t;
 150
 
 
 
 
 
 
 151	list_for_each_entry(t, q, u.tk_wait.list) {
 152		if (t->tk_owner == task->tk_owner) {
 153			list_add_tail(&task->u.tk_wait.links,
 154					&t->u.tk_wait.links);
 155			/* Cache the queue head in task->u.tk_wait.list */
 156			task->u.tk_wait.list.next = q;
 157			task->u.tk_wait.list.prev = NULL;
 158			return;
 159		}
 160	}
 161	INIT_LIST_HEAD(&task->u.tk_wait.links);
 162	list_add_tail(&task->u.tk_wait.list, q);
 163}
 164
 165/*
 166 * Remove request from a queue list
 167 */
 168static void
 169__rpc_list_dequeue_task(struct rpc_task *task)
 170{
 171	struct list_head *q;
 172	struct rpc_task *t;
 173
 174	if (task->u.tk_wait.list.prev == NULL) {
 175		list_del(&task->u.tk_wait.links);
 176		return;
 177	}
 178	if (!list_empty(&task->u.tk_wait.links)) {
 179		t = list_first_entry(&task->u.tk_wait.links,
 180				struct rpc_task,
 181				u.tk_wait.links);
 182		/* Assume __rpc_list_enqueue_task() cached the queue head */
 183		q = t->u.tk_wait.list.next;
 184		list_add_tail(&t->u.tk_wait.list, q);
 185		list_del(&task->u.tk_wait.links);
 186	}
 187	list_del(&task->u.tk_wait.list);
 188}
 189
 190/*
 191 * Add new request to a priority queue.
 192 */
 193static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
 194		struct rpc_task *task,
 195		unsigned char queue_priority)
 196{
 197	if (unlikely(queue_priority > queue->maxpriority))
 198		queue_priority = queue->maxpriority;
 199	__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
 200}
 201
 202/*
 203 * Add new request to wait queue.
 
 
 
 
 
 204 */
 205static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
 206		struct rpc_task *task,
 207		unsigned char queue_priority)
 208{
 209	INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
 
 
 
 210	if (RPC_IS_PRIORITY(queue))
 211		__rpc_add_wait_queue_priority(queue, task, queue_priority);
 
 
 212	else
 213		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
 214	task->tk_waitqueue = queue;
 215	queue->qlen++;
 216	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
 217	smp_wmb();
 218	rpc_set_queued(task);
 
 
 
 219}
 220
 221/*
 222 * Remove request from a priority queue.
 223 */
 224static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
 225{
 226	__rpc_list_dequeue_task(task);
 
 
 
 
 
 
 227}
 228
 229/*
 230 * Remove request from queue.
 231 * Note: must be called with spin lock held.
 232 */
 233static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
 234{
 235	__rpc_disable_timer(queue, task);
 236	if (RPC_IS_PRIORITY(queue))
 237		__rpc_remove_wait_queue_priority(task);
 238	else
 239		list_del(&task->u.tk_wait.list);
 240	queue->qlen--;
 
 
 241}
 242
 243static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
 244{
 245	int i;
 246
 247	spin_lock_init(&queue->lock);
 248	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
 249		INIT_LIST_HEAD(&queue->tasks[i]);
 250	queue->maxpriority = nr_queues - 1;
 251	rpc_reset_waitqueue_priority(queue);
 252	queue->qlen = 0;
 253	queue->timer_list.expires = 0;
 254	INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
 255	INIT_LIST_HEAD(&queue->timer_list.list);
 256	rpc_assign_waitqueue_name(queue, qname);
 257}
 258
 259void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 260{
 261	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
 262}
 263EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
 264
 265void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 266{
 267	__rpc_init_priority_wait_queue(queue, qname, 1);
 268}
 269EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
 270
 271void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
 272{
 273	cancel_delayed_work_sync(&queue->timer_list.dwork);
 274}
 275EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
 276
 277static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
 278{
 279	schedule();
 280	if (signal_pending_state(mode, current))
 281		return -ERESTARTSYS;
 282	return 0;
 283}
 284
 285#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
 286static void rpc_task_set_debuginfo(struct rpc_task *task)
 287{
 288	struct rpc_clnt *clnt = task->tk_client;
 289
 290	/* Might be a task carrying a reverse-direction operation */
 291	if (!clnt) {
 292		static atomic_t rpc_pid;
 293
 294		task->tk_pid = atomic_inc_return(&rpc_pid);
 295		return;
 296	}
 297
 298	task->tk_pid = atomic_inc_return(&clnt->cl_pid);
 299}
 300#else
 301static inline void rpc_task_set_debuginfo(struct rpc_task *task)
 302{
 303}
 304#endif
 305
 306static void rpc_set_active(struct rpc_task *task)
 307{
 
 
 308	rpc_task_set_debuginfo(task);
 309	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 310	trace_rpc_task_begin(task, NULL);
 311}
 312
 313/*
 314 * Mark an RPC call as having completed by clearing the 'active' bit
 315 * and then waking up all tasks that were sleeping.
 316 */
 317static int rpc_complete_task(struct rpc_task *task)
 318{
 319	void *m = &task->tk_runstate;
 320	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
 321	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
 322	unsigned long flags;
 323	int ret;
 324
 325	trace_rpc_task_complete(task, NULL);
 326
 327	spin_lock_irqsave(&wq->lock, flags);
 328	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 329	ret = atomic_dec_and_test(&task->tk_count);
 330	if (waitqueue_active(wq))
 331		__wake_up_locked_key(wq, TASK_NORMAL, &k);
 332	spin_unlock_irqrestore(&wq->lock, flags);
 333	return ret;
 334}
 335
 336/*
 337 * Allow callers to wait for completion of an RPC call
 338 *
 339 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
 340 * to enforce taking of the wq->lock and hence avoid races with
 341 * rpc_complete_task().
 342 */
 343int rpc_wait_for_completion_task(struct rpc_task *task)
 344{
 
 
 345	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
 346			rpc_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
 347}
 348EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task);
 349
 350/*
 351 * Make an RPC task runnable.
 352 *
 353 * Note: If the task is ASYNC, and is being made runnable after sitting on an
 354 * rpc_wait_queue, this must be called with the queue spinlock held to protect
 355 * the wait queue operation.
 356 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
 357 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
 358 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
 359 * the RPC_TASK_RUNNING flag.
 360 */
 361static void rpc_make_runnable(struct workqueue_struct *wq,
 362		struct rpc_task *task)
 363{
 364	bool need_wakeup = !rpc_test_and_set_running(task);
 365
 366	rpc_clear_queued(task);
 367	if (!need_wakeup)
 368		return;
 369	if (RPC_IS_ASYNC(task)) {
 370		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 371		queue_work(wq, &task->u.tk_work);
 372	} else
 373		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
 374}
 375
 376/*
 377 * Prepare for sleeping on a wait queue.
 378 * By always appending tasks to the list we ensure FIFO behavior.
 379 * NB: An RPC task will only receive interrupt-driven events as long
 380 * as it's on a wait queue.
 381 */
 382static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
 383		struct rpc_task *task,
 384		unsigned char queue_priority)
 385{
 386	trace_rpc_task_sleep(task, q);
 387
 388	__rpc_add_wait_queue(q, task, queue_priority);
 389}
 390
 391static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
 392		struct rpc_task *task,
 
 393		unsigned char queue_priority)
 394{
 395	if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
 396		return;
 397	__rpc_do_sleep_on_priority(q, task, queue_priority);
 398}
 399
 400static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
 401		struct rpc_task *task, unsigned long timeout,
 402		unsigned char queue_priority)
 403{
 404	if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
 405		return;
 406	if (time_is_after_jiffies(timeout)) {
 407		__rpc_do_sleep_on_priority(q, task, queue_priority);
 408		__rpc_add_timer(q, task, timeout);
 409	} else
 410		task->tk_status = -ETIMEDOUT;
 411}
 412
 413static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
 414{
 415	if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
 416		task->tk_callback = action;
 417}
 418
 419static bool rpc_sleep_check_activated(struct rpc_task *task)
 420{
 421	/* We shouldn't ever put an inactive task to sleep */
 422	if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
 423		task->tk_status = -EIO;
 424		rpc_put_task_async(task);
 425		return false;
 426	}
 427	return true;
 428}
 429
 430void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
 431				rpc_action action, unsigned long timeout)
 432{
 433	if (!rpc_sleep_check_activated(task))
 434		return;
 435
 436	rpc_set_tk_callback(task, action);
 437
 438	/*
 439	 * Protect the queue operations.
 440	 */
 441	spin_lock(&q->lock);
 442	__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
 443	spin_unlock(&q->lock);
 444}
 445EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
 446
 447void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 448				rpc_action action)
 449{
 450	if (!rpc_sleep_check_activated(task))
 
 
 
 
 451		return;
 
 452
 453	rpc_set_tk_callback(task, action);
 454
 455	WARN_ON_ONCE(task->tk_timeout != 0);
 456	/*
 457	 * Protect the queue operations.
 458	 */
 459	spin_lock(&q->lock);
 460	__rpc_sleep_on_priority(q, task, task->tk_priority);
 461	spin_unlock(&q->lock);
 462}
 463EXPORT_SYMBOL_GPL(rpc_sleep_on);
 464
 465void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
 466		struct rpc_task *task, unsigned long timeout, int priority)
 467{
 468	if (!rpc_sleep_check_activated(task))
 469		return;
 470
 471	priority -= RPC_PRIORITY_LOW;
 472	/*
 473	 * Protect the queue operations.
 474	 */
 475	spin_lock(&q->lock);
 476	__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
 477	spin_unlock(&q->lock);
 478}
 479EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
 480
 481void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
 482		int priority)
 483{
 484	if (!rpc_sleep_check_activated(task))
 
 
 
 
 485		return;
 
 486
 487	WARN_ON_ONCE(task->tk_timeout != 0);
 488	priority -= RPC_PRIORITY_LOW;
 489	/*
 490	 * Protect the queue operations.
 491	 */
 492	spin_lock(&q->lock);
 493	__rpc_sleep_on_priority(q, task, priority);
 494	spin_unlock(&q->lock);
 495}
 496EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
 497
 498/**
 499 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
 500 * @wq: workqueue on which to run task
 501 * @queue: wait queue
 502 * @task: task to be woken up
 503 *
 504 * Caller must hold queue->lock, and have cleared the task queued flag.
 505 */
 506static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
 507		struct rpc_wait_queue *queue,
 508		struct rpc_task *task)
 509{
 
 
 
 510	/* Has the task been executed yet? If not, we cannot wake it up! */
 511	if (!RPC_IS_ACTIVATED(task)) {
 512		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
 513		return;
 514	}
 515
 516	trace_rpc_task_wakeup(task, queue);
 517
 518	__rpc_remove_wait_queue(queue, task);
 519
 520	rpc_make_runnable(wq, task);
 
 
 521}
 522
 523/*
 524 * Wake up a queued task while the queue lock is being held
 525 */
 526static struct rpc_task *
 527rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
 528		struct rpc_wait_queue *queue, struct rpc_task *task,
 529		bool (*action)(struct rpc_task *, void *), void *data)
 530{
 531	if (RPC_IS_QUEUED(task)) {
 532		smp_rmb();
 533		if (task->tk_waitqueue == queue) {
 534			if (action == NULL || action(task, data)) {
 535				__rpc_do_wake_up_task_on_wq(wq, queue, task);
 536				return task;
 537			}
 538		}
 539	}
 540	return NULL;
 541}
 542
 543/*
 544 * Wake up a queued task while the queue lock is being held
 545 */
 546static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
 547					  struct rpc_task *task)
 548{
 549	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
 550						   task, NULL, NULL);
 551}
 552
 553/*
 554 * Wake up a task on a specific queue
 555 */
 556void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 557{
 558	if (!RPC_IS_QUEUED(task))
 559		return;
 560	spin_lock(&queue->lock);
 561	rpc_wake_up_task_queue_locked(queue, task);
 562	spin_unlock(&queue->lock);
 563}
 564EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
 565
 566static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
 567{
 568	task->tk_status = *(int *)status;
 569	return true;
 570}
 571
 572static void
 573rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
 574		struct rpc_task *task, int status)
 575{
 576	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
 577			task, rpc_task_action_set_status, &status);
 578}
 579
 580/**
 581 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
 582 * @queue: pointer to rpc_wait_queue
 583 * @task: pointer to rpc_task
 584 * @status: integer error value
 585 *
 586 * If @task is queued on @queue, then it is woken up, and @task->tk_status is
 587 * set to the value of @status.
 588 */
 589void
 590rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
 591		struct rpc_task *task, int status)
 592{
 593	if (!RPC_IS_QUEUED(task))
 594		return;
 595	spin_lock(&queue->lock);
 596	rpc_wake_up_task_queue_set_status_locked(queue, task, status);
 597	spin_unlock(&queue->lock);
 598}
 599
 600/*
 601 * Wake up the next task on a priority queue.
 602 */
 603static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
 604{
 605	struct list_head *q;
 606	struct rpc_task *task;
 607
 608	/*
 609	 * Service the privileged queue.
 610	 */
 611	q = &queue->tasks[RPC_NR_PRIORITY - 1];
 612	if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
 613		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 614		goto out;
 615	}
 616
 617	/*
 618	 * Service a batch of tasks from a single owner.
 619	 */
 620	q = &queue->tasks[queue->priority];
 621	if (!list_empty(q) && queue->nr) {
 622		queue->nr--;
 623		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 624		goto out;
 
 
 
 
 
 
 
 625	}
 626
 627	/*
 628	 * Service the next queue.
 629	 */
 630	do {
 631		if (q == &queue->tasks[0])
 632			q = &queue->tasks[queue->maxpriority];
 633		else
 634			q = q - 1;
 635		if (!list_empty(q)) {
 636			task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 637			goto new_queue;
 638		}
 639	} while (q != &queue->tasks[queue->priority]);
 640
 641	rpc_reset_waitqueue_priority(queue);
 642	return NULL;
 643
 644new_queue:
 645	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
 
 
 646out:
 647	return task;
 648}
 649
 650static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
 651{
 652	if (RPC_IS_PRIORITY(queue))
 653		return __rpc_find_next_queued_priority(queue);
 654	if (!list_empty(&queue->tasks[0]))
 655		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
 656	return NULL;
 657}
 658
 659/*
 660 * Wake up the first task on the wait queue.
 661 */
 662struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
 663		struct rpc_wait_queue *queue,
 664		bool (*func)(struct rpc_task *, void *), void *data)
 665{
 666	struct rpc_task	*task = NULL;
 667
 668	spin_lock(&queue->lock);
 
 
 669	task = __rpc_find_next_queued(queue);
 670	if (task != NULL)
 671		task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
 672				task, func, data);
 673	spin_unlock(&queue->lock);
 
 
 
 674
 675	return task;
 676}
 677
 678/*
 679 * Wake up the first task on the wait queue.
 680 */
 681struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
 682		bool (*func)(struct rpc_task *, void *), void *data)
 683{
 684	return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
 685}
 686EXPORT_SYMBOL_GPL(rpc_wake_up_first);
 687
 688static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
 689{
 690	return true;
 691}
 692
 693/*
 694 * Wake up the next task on the wait queue.
 695*/
 696struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
 697{
 698	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
 699}
 700EXPORT_SYMBOL_GPL(rpc_wake_up_next);
 701
 702/**
 703 * rpc_wake_up_locked - wake up all rpc_tasks
 704 * @queue: rpc_wait_queue on which the tasks are sleeping
 705 *
 706 */
 707static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
 708{
 709	struct rpc_task *task;
 710
 711	for (;;) {
 712		task = __rpc_find_next_queued(queue);
 713		if (task == NULL)
 714			break;
 715		rpc_wake_up_task_queue_locked(queue, task);
 716	}
 717}
 718
 719/**
 720 * rpc_wake_up - wake up all rpc_tasks
 721 * @queue: rpc_wait_queue on which the tasks are sleeping
 722 *
 723 * Grabs queue->lock
 724 */
 725void rpc_wake_up(struct rpc_wait_queue *queue)
 726{
 727	spin_lock(&queue->lock);
 728	rpc_wake_up_locked(queue);
 729	spin_unlock(&queue->lock);
 730}
 731EXPORT_SYMBOL_GPL(rpc_wake_up);
 732
 733/**
 734 * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
 735 * @queue: rpc_wait_queue on which the tasks are sleeping
 736 * @status: status value to set
 737 */
 738static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
 739{
 740	struct rpc_task *task;
 741
 
 
 742	for (;;) {
 743		task = __rpc_find_next_queued(queue);
 744		if (task == NULL)
 
 
 
 
 
 
 745			break;
 746		rpc_wake_up_task_queue_set_status_locked(queue, task, status);
 747	}
 
 748}
 
 749
 750/**
 751 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
 752 * @queue: rpc_wait_queue on which the tasks are sleeping
 753 * @status: status value to set
 754 *
 755 * Grabs queue->lock
 756 */
 757void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 758{
 759	spin_lock(&queue->lock);
 760	rpc_wake_up_status_locked(queue, status);
 761	spin_unlock(&queue->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762}
 763EXPORT_SYMBOL_GPL(rpc_wake_up_status);
 764
 765static void __rpc_queue_timer_fn(struct work_struct *work)
 766{
 767	struct rpc_wait_queue *queue = container_of(work,
 768			struct rpc_wait_queue,
 769			timer_list.dwork.work);
 770	struct rpc_task *task, *n;
 771	unsigned long expires, now, timeo;
 772
 773	spin_lock(&queue->lock);
 774	expires = now = jiffies;
 775	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
 776		timeo = task->tk_timeout;
 777		if (time_after_eq(now, timeo)) {
 778			trace_rpc_task_timeout(task, task->tk_action);
 779			task->tk_status = -ETIMEDOUT;
 780			rpc_wake_up_task_queue_locked(queue, task);
 781			continue;
 782		}
 783		if (expires == now || time_after(expires, timeo))
 784			expires = timeo;
 785	}
 786	if (!list_empty(&queue->timer_list.list))
 787		rpc_set_queue_timer(queue, expires);
 788	spin_unlock(&queue->lock);
 789}
 790
 791static void __rpc_atrun(struct rpc_task *task)
 792{
 793	if (task->tk_status == -ETIMEDOUT)
 794		task->tk_status = 0;
 795}
 796
 797/*
 798 * Run a task at a later time
 799 */
 800void rpc_delay(struct rpc_task *task, unsigned long delay)
 801{
 802	rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
 
 803}
 804EXPORT_SYMBOL_GPL(rpc_delay);
 805
 806/*
 807 * Helper to call task->tk_ops->rpc_call_prepare
 808 */
 809void rpc_prepare_task(struct rpc_task *task)
 810{
 811	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 812}
 813
 814static void
 815rpc_init_task_statistics(struct rpc_task *task)
 816{
 817	/* Initialize retry counters */
 818	task->tk_garb_retry = 2;
 819	task->tk_cred_retry = 2;
 820	task->tk_rebind_retry = 2;
 821
 822	/* starting timestamp */
 823	task->tk_start = ktime_get();
 824}
 825
 826static void
 827rpc_reset_task_statistics(struct rpc_task *task)
 828{
 829	task->tk_timeouts = 0;
 830	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
 
 831	rpc_init_task_statistics(task);
 832}
 833
 834/*
 835 * Helper that calls task->tk_ops->rpc_call_done if it exists
 836 */
 837void rpc_exit_task(struct rpc_task *task)
 838{
 839	trace_rpc_task_end(task, task->tk_action);
 840	task->tk_action = NULL;
 841	if (task->tk_ops->rpc_count_stats)
 842		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
 843	else if (task->tk_client)
 844		rpc_count_iostats(task, task->tk_client->cl_metrics);
 845	if (task->tk_ops->rpc_call_done != NULL) {
 846		trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done);
 847		task->tk_ops->rpc_call_done(task, task->tk_calldata);
 848		if (task->tk_action != NULL) {
 
 849			/* Always release the RPC slot and buffer memory */
 850			xprt_release(task);
 851			rpc_reset_task_statistics(task);
 852		}
 853	}
 854}
 855
 856void rpc_signal_task(struct rpc_task *task)
 857{
 858	struct rpc_wait_queue *queue;
 859
 860	if (!RPC_IS_ACTIVATED(task))
 861		return;
 862
 863	if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
 864		return;
 865	trace_rpc_task_signalled(task, task->tk_action);
 866	set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
 867	smp_mb__after_atomic();
 868	queue = READ_ONCE(task->tk_waitqueue);
 869	if (queue)
 870		rpc_wake_up_queued_task(queue, task);
 871}
 872
 873void rpc_task_try_cancel(struct rpc_task *task, int error)
 874{
 875	struct rpc_wait_queue *queue;
 876
 877	if (!rpc_task_set_rpc_status(task, error))
 878		return;
 879	queue = READ_ONCE(task->tk_waitqueue);
 880	if (queue)
 881		rpc_wake_up_queued_task(queue, task);
 882}
 883
 884void rpc_exit(struct rpc_task *task, int status)
 885{
 886	task->tk_status = status;
 887	task->tk_action = rpc_exit_task;
 888	rpc_wake_up_queued_task(task->tk_waitqueue, task);
 
 889}
 890EXPORT_SYMBOL_GPL(rpc_exit);
 891
 892void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
 893{
 894	if (ops->rpc_release != NULL)
 895		ops->rpc_release(calldata);
 896}
 897
 898static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk)
 899{
 900	if (!xprt)
 901		return false;
 902	if (!atomic_read(&xprt->swapper))
 903		return false;
 904	return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk;
 905}
 906
 907/*
 908 * This is the RPC `scheduler' (or rather, the finite state machine).
 909 */
 910static void __rpc_execute(struct rpc_task *task)
 911{
 912	struct rpc_wait_queue *queue;
 913	int task_is_async = RPC_IS_ASYNC(task);
 914	int status = 0;
 915	unsigned long pflags = current->flags;
 
 
 916
 917	WARN_ON_ONCE(RPC_IS_QUEUED(task));
 918	if (RPC_IS_QUEUED(task))
 919		return;
 920
 921	for (;;) {
 922		void (*do_action)(struct rpc_task *);
 923
 924		/*
 925		 * Perform the next FSM step or a pending callback.
 926		 *
 927		 * tk_action may be NULL if the task has been killed.
 928		 */
 929		do_action = task->tk_action;
 930		/* Tasks with an RPC error status should exit */
 931		if (do_action != rpc_exit_task &&
 932		    (status = READ_ONCE(task->tk_rpc_status)) != 0) {
 933			task->tk_status = status;
 934			if (do_action != NULL)
 935				do_action = rpc_exit_task;
 936		}
 937		/* Callbacks override all actions */
 938		if (task->tk_callback) {
 939			do_action = task->tk_callback;
 940			task->tk_callback = NULL;
 941		}
 942		if (!do_action)
 943			break;
 944		if (RPC_IS_SWAPPER(task) ||
 945		    xprt_needs_memalloc(task->tk_xprt, task))
 946			current->flags |= PF_MEMALLOC;
 947
 948		trace_rpc_task_run_action(task, do_action);
 949		do_action(task);
 950
 951		/*
 952		 * Lockless check for whether task is sleeping or not.
 953		 */
 954		if (!RPC_IS_QUEUED(task)) {
 955			cond_resched();
 956			continue;
 957		}
 958
 959		/*
 960		 * The queue->lock protects against races with
 961		 * rpc_make_runnable().
 962		 *
 963		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
 964		 * rpc_task, rpc_make_runnable() can assign it to a
 965		 * different workqueue. We therefore cannot assume that the
 966		 * rpc_task pointer may still be dereferenced.
 967		 */
 968		queue = task->tk_waitqueue;
 969		spin_lock(&queue->lock);
 970		if (!RPC_IS_QUEUED(task)) {
 971			spin_unlock(&queue->lock);
 972			continue;
 973		}
 974		/* Wake up any task that has an exit status */
 975		if (READ_ONCE(task->tk_rpc_status) != 0) {
 976			rpc_wake_up_task_queue_locked(queue, task);
 977			spin_unlock(&queue->lock);
 978			continue;
 979		}
 980		rpc_clear_running(task);
 981		spin_unlock(&queue->lock);
 982		if (task_is_async)
 983			goto out;
 984
 985		/* sync task: sleep here */
 986		trace_rpc_task_sync_sleep(task, task->tk_action);
 987		status = out_of_line_wait_on_bit(&task->tk_runstate,
 988				RPC_TASK_QUEUED, rpc_wait_bit_killable,
 989				TASK_KILLABLE|TASK_FREEZABLE);
 990		if (status < 0) {
 991			/*
 992			 * When a sync task receives a signal, it exits with
 993			 * -ERESTARTSYS. In order to catch any callbacks that
 994			 * clean up after sleeping on some queue, we don't
 995			 * break the loop here, but go around once more.
 996			 */
 997			rpc_signal_task(task);
 
 
 998		}
 999		trace_rpc_task_sync_wake(task, task->tk_action);
1000	}
1001
 
 
1002	/* Release all resources associated with the task */
1003	rpc_release_task(task);
1004out:
1005	current_restore_flags(pflags, PF_MEMALLOC);
1006}
1007
1008/*
1009 * User-visible entry point to the scheduler.
1010 *
1011 * This may be called recursively if e.g. an async NFS task updates
1012 * the attributes and finds that dirty pages must be flushed.
1013 * NOTE: Upon exit of this function the task is guaranteed to be
1014 *	 released. In particular note that tk_release() will have
1015 *	 been called, so your task memory may have been freed.
1016 */
1017void rpc_execute(struct rpc_task *task)
1018{
1019	bool is_async = RPC_IS_ASYNC(task);
1020
1021	rpc_set_active(task);
1022	rpc_make_runnable(rpciod_workqueue, task);
1023	if (!is_async) {
1024		unsigned int pflags = memalloc_nofs_save();
1025		__rpc_execute(task);
1026		memalloc_nofs_restore(pflags);
1027	}
1028}
1029
1030static void rpc_async_schedule(struct work_struct *work)
1031{
1032	unsigned int pflags = memalloc_nofs_save();
1033
1034	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
1035	memalloc_nofs_restore(pflags);
1036}
1037
1038/**
1039 * rpc_malloc - allocate RPC buffer resources
1040 * @task: RPC task
1041 *
1042 * A single memory region is allocated, which is split between the
1043 * RPC call and RPC reply that this task is being used for. When
1044 * this RPC is retired, the memory is released by calling rpc_free.
1045 *
1046 * To prevent rpciod from hanging, this allocator never sleeps,
1047 * returning -ENOMEM and suppressing warning if the request cannot
1048 * be serviced immediately. The caller can arrange to sleep in a
1049 * way that is safe for rpciod.
1050 *
1051 * Most requests are 'small' (under 2KiB) and can be serviced from a
1052 * mempool, ensuring that NFS reads and writes can always proceed,
1053 * and that there is good locality of reference for these buffers.
 
 
 
1054 */
1055int rpc_malloc(struct rpc_task *task)
1056{
1057	struct rpc_rqst *rqst = task->tk_rqstp;
1058	size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
1059	struct rpc_buffer *buf;
1060	gfp_t gfp = rpc_task_gfp_mask();
 
 
 
1061
1062	size += sizeof(struct rpc_buffer);
1063	if (size <= RPC_BUFFER_MAXSIZE) {
1064		buf = kmem_cache_alloc(rpc_buffer_slabp, gfp);
1065		/* Reach for the mempool if dynamic allocation fails */
1066		if (!buf && RPC_IS_ASYNC(task))
1067			buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT);
1068	} else
1069		buf = kmalloc(size, gfp);
1070
1071	if (!buf)
1072		return -ENOMEM;
1073
1074	buf->len = size;
 
 
1075	rqst->rq_buffer = buf->data;
1076	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1077	return 0;
1078}
1079EXPORT_SYMBOL_GPL(rpc_malloc);
1080
1081/**
1082 * rpc_free - free RPC buffer resources allocated via rpc_malloc
1083 * @task: RPC task
1084 *
1085 */
1086void rpc_free(struct rpc_task *task)
1087{
1088	void *buffer = task->tk_rqstp->rq_buffer;
1089	size_t size;
1090	struct rpc_buffer *buf;
1091
1092	buf = container_of(buffer, struct rpc_buffer, data);
1093	size = buf->len;
1094
 
 
 
1095	if (size <= RPC_BUFFER_MAXSIZE)
1096		mempool_free(buf, rpc_buffer_mempool);
1097	else
1098		kfree(buf);
1099}
1100EXPORT_SYMBOL_GPL(rpc_free);
1101
1102/*
1103 * Creation and deletion of RPC task structures
1104 */
1105static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1106{
1107	memset(task, 0, sizeof(*task));
1108	atomic_set(&task->tk_count, 1);
1109	task->tk_flags  = task_setup_data->flags;
1110	task->tk_ops = task_setup_data->callback_ops;
1111	task->tk_calldata = task_setup_data->callback_data;
1112	INIT_LIST_HEAD(&task->tk_task);
1113
1114	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1115	task->tk_owner = current->tgid;
1116
1117	/* Initialize workqueue for async tasks */
1118	task->tk_workqueue = task_setup_data->workqueue;
1119
1120	task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1121			xprt_get(task_setup_data->rpc_xprt));
1122
1123	task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1124
1125	if (task->tk_ops->rpc_call_prepare != NULL)
1126		task->tk_action = rpc_prepare_task;
1127
1128	rpc_init_task_statistics(task);
 
 
 
1129}
1130
1131static struct rpc_task *rpc_alloc_task(void)
 
1132{
1133	struct rpc_task *task;
1134
1135	task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask());
1136	if (task)
1137		return task;
1138	return mempool_alloc(rpc_task_mempool, GFP_NOWAIT);
1139}
1140
1141/*
1142 * Create a new task for the specified client.
1143 */
1144struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1145{
1146	struct rpc_task	*task = setup_data->task;
1147	unsigned short flags = 0;
1148
1149	if (task == NULL) {
1150		task = rpc_alloc_task();
1151		if (task == NULL) {
1152			rpc_release_calldata(setup_data->callback_ops,
1153					     setup_data->callback_data);
1154			return ERR_PTR(-ENOMEM);
1155		}
1156		flags = RPC_TASK_DYNAMIC;
1157	}
1158
1159	rpc_init_task(task, setup_data);
1160	task->tk_flags |= flags;
 
1161	return task;
1162}
1163
1164/*
1165 * rpc_free_task - release rpc task and perform cleanups
1166 *
1167 * Note that we free up the rpc_task _after_ rpc_release_calldata()
1168 * in order to work around a workqueue dependency issue.
1169 *
1170 * Tejun Heo states:
1171 * "Workqueue currently considers two work items to be the same if they're
1172 * on the same address and won't execute them concurrently - ie. it
1173 * makes a work item which is queued again while being executed wait
1174 * for the previous execution to complete.
1175 *
1176 * If a work function frees the work item, and then waits for an event
1177 * which should be performed by another work item and *that* work item
1178 * recycles the freed work item, it can create a false dependency loop.
1179 * There really is no reliable way to detect this short of verifying
1180 * every memory free."
1181 *
1182 */
1183static void rpc_free_task(struct rpc_task *task)
1184{
1185	unsigned short tk_flags = task->tk_flags;
1186
1187	put_rpccred(task->tk_op_cred);
1188	rpc_release_calldata(task->tk_ops, task->tk_calldata);
1189
1190	if (tk_flags & RPC_TASK_DYNAMIC)
 
1191		mempool_free(task, rpc_task_mempool);
 
1192}
1193
1194static void rpc_async_release(struct work_struct *work)
1195{
1196	unsigned int pflags = memalloc_nofs_save();
1197
1198	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1199	memalloc_nofs_restore(pflags);
1200}
1201
1202static void rpc_release_resources_task(struct rpc_task *task)
1203{
1204	xprt_release(task);
1205	if (task->tk_msg.rpc_cred) {
1206		if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1207			put_cred(task->tk_msg.rpc_cred);
1208		task->tk_msg.rpc_cred = NULL;
1209	}
1210	rpc_task_release_client(task);
1211}
1212
1213static void rpc_final_put_task(struct rpc_task *task,
1214		struct workqueue_struct *q)
1215{
1216	if (q != NULL) {
1217		INIT_WORK(&task->u.tk_work, rpc_async_release);
1218		queue_work(q, &task->u.tk_work);
1219	} else
1220		rpc_free_task(task);
1221}
1222
1223static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1224{
1225	if (atomic_dec_and_test(&task->tk_count)) {
1226		rpc_release_resources_task(task);
1227		rpc_final_put_task(task, q);
1228	}
1229}
1230
1231void rpc_put_task(struct rpc_task *task)
1232{
1233	rpc_do_put_task(task, NULL);
1234}
1235EXPORT_SYMBOL_GPL(rpc_put_task);
1236
1237void rpc_put_task_async(struct rpc_task *task)
1238{
1239	rpc_do_put_task(task, task->tk_workqueue);
1240}
1241EXPORT_SYMBOL_GPL(rpc_put_task_async);
1242
1243static void rpc_release_task(struct rpc_task *task)
1244{
 
 
1245	WARN_ON_ONCE(RPC_IS_QUEUED(task));
1246
1247	rpc_release_resources_task(task);
1248
1249	/*
1250	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1251	 * so it should be safe to use task->tk_count as a test for whether
1252	 * or not any other processes still hold references to our rpc_task.
1253	 */
1254	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1255		/* Wake up anyone who may be waiting for task completion */
1256		if (!rpc_complete_task(task))
1257			return;
1258	} else {
1259		if (!atomic_dec_and_test(&task->tk_count))
1260			return;
1261	}
1262	rpc_final_put_task(task, task->tk_workqueue);
1263}
1264
1265int rpciod_up(void)
1266{
1267	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1268}
1269
1270void rpciod_down(void)
1271{
1272	module_put(THIS_MODULE);
1273}
1274
1275/*
1276 * Start up the rpciod workqueue.
1277 */
1278static int rpciod_start(void)
1279{
1280	struct workqueue_struct *wq;
1281
1282	/*
1283	 * Create the rpciod thread and wait for it to start.
1284	 */
1285	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
 
1286	if (!wq)
1287		goto out_failed;
1288	rpciod_workqueue = wq;
1289	wq = alloc_workqueue("xprtiod", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
 
1290	if (!wq)
1291		goto free_rpciod;
1292	xprtiod_workqueue = wq;
1293	return 1;
1294free_rpciod:
1295	wq = rpciod_workqueue;
1296	rpciod_workqueue = NULL;
1297	destroy_workqueue(wq);
1298out_failed:
1299	return 0;
1300}
1301
1302static void rpciod_stop(void)
1303{
1304	struct workqueue_struct *wq = NULL;
1305
1306	if (rpciod_workqueue == NULL)
1307		return;
 
1308
1309	wq = rpciod_workqueue;
1310	rpciod_workqueue = NULL;
1311	destroy_workqueue(wq);
1312	wq = xprtiod_workqueue;
1313	xprtiod_workqueue = NULL;
1314	destroy_workqueue(wq);
1315}
1316
1317void
1318rpc_destroy_mempool(void)
1319{
1320	rpciod_stop();
1321	mempool_destroy(rpc_buffer_mempool);
1322	mempool_destroy(rpc_task_mempool);
1323	kmem_cache_destroy(rpc_task_slabp);
1324	kmem_cache_destroy(rpc_buffer_slabp);
1325	rpc_destroy_wait_queue(&delay_queue);
1326}
1327
1328int
1329rpc_init_mempool(void)
1330{
1331	/*
1332	 * The following is not strictly a mempool initialisation,
1333	 * but there is no harm in doing it here
1334	 */
1335	rpc_init_wait_queue(&delay_queue, "delayq");
1336	if (!rpciod_start())
1337		goto err_nomem;
1338
1339	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1340					     sizeof(struct rpc_task),
1341					     0, SLAB_HWCACHE_ALIGN,
1342					     NULL);
1343	if (!rpc_task_slabp)
1344		goto err_nomem;
1345	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1346					     RPC_BUFFER_MAXSIZE,
1347					     0, SLAB_HWCACHE_ALIGN,
1348					     NULL);
1349	if (!rpc_buffer_slabp)
1350		goto err_nomem;
1351	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1352						    rpc_task_slabp);
1353	if (!rpc_task_mempool)
1354		goto err_nomem;
1355	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1356						      rpc_buffer_slabp);
1357	if (!rpc_buffer_mempool)
1358		goto err_nomem;
1359	return 0;
1360err_nomem:
1361	rpc_destroy_mempool();
1362	return -ENOMEM;
1363}
v4.10.11
 
   1/*
   2 * linux/net/sunrpc/sched.c
   3 *
   4 * Scheduling for synchronous and asynchronous RPC requests.
   5 *
   6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
   7 *
   8 * TCP NFS related read + write fixes
   9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  10 */
  11
  12#include <linux/module.h>
  13
  14#include <linux/sched.h>
  15#include <linux/interrupt.h>
  16#include <linux/slab.h>
  17#include <linux/mempool.h>
  18#include <linux/smp.h>
  19#include <linux/spinlock.h>
  20#include <linux/mutex.h>
  21#include <linux/freezer.h>
 
  22
  23#include <linux/sunrpc/clnt.h>
 
  24
  25#include "sunrpc.h"
  26
  27#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  28#define RPCDBG_FACILITY		RPCDBG_SCHED
  29#endif
  30
  31#define CREATE_TRACE_POINTS
  32#include <trace/events/sunrpc.h>
  33
  34/*
  35 * RPC slabs and memory pools
  36 */
  37#define RPC_BUFFER_MAXSIZE	(2048)
  38#define RPC_BUFFER_POOLSIZE	(8)
  39#define RPC_TASK_POOLSIZE	(8)
  40static struct kmem_cache	*rpc_task_slabp __read_mostly;
  41static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
  42static mempool_t	*rpc_task_mempool __read_mostly;
  43static mempool_t	*rpc_buffer_mempool __read_mostly;
  44
  45static void			rpc_async_schedule(struct work_struct *);
  46static void			 rpc_release_task(struct rpc_task *task);
  47static void __rpc_queue_timer_fn(unsigned long ptr);
  48
  49/*
  50 * RPC tasks sit here while waiting for conditions to improve.
  51 */
  52static struct rpc_wait_queue delay_queue;
  53
  54/*
  55 * rpciod-related stuff
  56 */
  57struct workqueue_struct *rpciod_workqueue __read_mostly;
  58struct workqueue_struct *xprtiod_workqueue __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  59
  60/*
  61 * Disable the timer for a given RPC task. Should be called with
  62 * queue->lock and bh_disabled in order to avoid races within
  63 * rpc_run_timer().
  64 */
  65static void
  66__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  67{
  68	if (task->tk_timeout == 0)
  69		return;
  70	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
  71	task->tk_timeout = 0;
  72	list_del(&task->u.tk_wait.timer_list);
  73	if (list_empty(&queue->timer_list.list))
  74		del_timer(&queue->timer_list.timer);
  75}
  76
  77static void
  78rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
  79{
 
  80	queue->timer_list.expires = expires;
  81	mod_timer(&queue->timer_list.timer, expires);
 
 
 
 
  82}
  83
  84/*
  85 * Set up a timer for the current task.
  86 */
  87static void
  88__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
 
  89{
  90	if (!task->tk_timeout)
  91		return;
  92
  93	dprintk("RPC: %5u setting alarm for %u ms\n",
  94		task->tk_pid, jiffies_to_msecs(task->tk_timeout));
  95
  96	task->u.tk_wait.expires = jiffies + task->tk_timeout;
  97	if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
  98		rpc_set_queue_timer(queue, task->u.tk_wait.expires);
  99	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
 100}
 101
 102static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
 103{
 104	struct list_head *q = &queue->tasks[queue->priority];
 105	struct rpc_task *task;
 106
 107	if (!list_empty(q)) {
 108		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 109		if (task->tk_owner == queue->owner)
 110			list_move_tail(&task->u.tk_wait.list, q);
 111	}
 112}
 113
 114static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
 115{
 116	if (queue->priority != priority) {
 117		/* Fairness: rotate the list when changing priority */
 118		rpc_rotate_queue_owner(queue);
 119		queue->priority = priority;
 
 120	}
 121}
 122
 123static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
 124{
 125	queue->owner = pid;
 126	queue->nr = RPC_BATCH_COUNT;
 127}
 128
 129static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
 130{
 131	rpc_set_waitqueue_priority(queue, queue->maxpriority);
 132	rpc_set_waitqueue_owner(queue, 0);
 133}
 134
 135/*
 136 * Add new request to a priority queue.
 137 */
 138static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
 139		struct rpc_task *task,
 140		unsigned char queue_priority)
 141{
 142	struct list_head *q;
 143	struct rpc_task *t;
 144
 145	INIT_LIST_HEAD(&task->u.tk_wait.links);
 146	if (unlikely(queue_priority > queue->maxpriority))
 147		queue_priority = queue->maxpriority;
 148	if (queue_priority > queue->priority)
 149		rpc_set_waitqueue_priority(queue, queue_priority);
 150	q = &queue->tasks[queue_priority];
 151	list_for_each_entry(t, q, u.tk_wait.list) {
 152		if (t->tk_owner == task->tk_owner) {
 153			list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
 
 
 
 
 154			return;
 155		}
 156	}
 
 157	list_add_tail(&task->u.tk_wait.list, q);
 158}
 159
 160/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161 * Add new request to wait queue.
 162 *
 163 * Swapper tasks always get inserted at the head of the queue.
 164 * This should avoid many nasty memory deadlocks and hopefully
 165 * improve overall performance.
 166 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
 167 */
 168static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
 169		struct rpc_task *task,
 170		unsigned char queue_priority)
 171{
 172	WARN_ON_ONCE(RPC_IS_QUEUED(task));
 173	if (RPC_IS_QUEUED(task))
 174		return;
 175
 176	if (RPC_IS_PRIORITY(queue))
 177		__rpc_add_wait_queue_priority(queue, task, queue_priority);
 178	else if (RPC_IS_SWAPPER(task))
 179		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
 180	else
 181		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
 182	task->tk_waitqueue = queue;
 183	queue->qlen++;
 184	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
 185	smp_wmb();
 186	rpc_set_queued(task);
 187
 188	dprintk("RPC: %5u added to queue %p \"%s\"\n",
 189			task->tk_pid, queue, rpc_qname(queue));
 190}
 191
 192/*
 193 * Remove request from a priority queue.
 194 */
 195static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
 196{
 197	struct rpc_task *t;
 198
 199	if (!list_empty(&task->u.tk_wait.links)) {
 200		t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
 201		list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
 202		list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
 203	}
 204}
 205
 206/*
 207 * Remove request from queue.
 208 * Note: must be called with spin lock held.
 209 */
 210static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
 211{
 212	__rpc_disable_timer(queue, task);
 213	if (RPC_IS_PRIORITY(queue))
 214		__rpc_remove_wait_queue_priority(task);
 215	list_del(&task->u.tk_wait.list);
 
 216	queue->qlen--;
 217	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
 218			task->tk_pid, queue, rpc_qname(queue));
 219}
 220
 221static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
 222{
 223	int i;
 224
 225	spin_lock_init(&queue->lock);
 226	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
 227		INIT_LIST_HEAD(&queue->tasks[i]);
 228	queue->maxpriority = nr_queues - 1;
 229	rpc_reset_waitqueue_priority(queue);
 230	queue->qlen = 0;
 231	setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
 
 232	INIT_LIST_HEAD(&queue->timer_list.list);
 233	rpc_assign_waitqueue_name(queue, qname);
 234}
 235
 236void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 237{
 238	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
 239}
 240EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
 241
 242void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 243{
 244	__rpc_init_priority_wait_queue(queue, qname, 1);
 245}
 246EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
 247
 248void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
 249{
 250	del_timer_sync(&queue->timer_list.timer);
 251}
 252EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
 253
 254static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
 255{
 256	freezable_schedule_unsafe();
 257	if (signal_pending_state(mode, current))
 258		return -ERESTARTSYS;
 259	return 0;
 260}
 261
 262#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
 263static void rpc_task_set_debuginfo(struct rpc_task *task)
 264{
 265	static atomic_t rpc_pid;
 
 
 
 
 266
 267	task->tk_pid = atomic_inc_return(&rpc_pid);
 
 
 
 
 268}
 269#else
 270static inline void rpc_task_set_debuginfo(struct rpc_task *task)
 271{
 272}
 273#endif
 274
 275static void rpc_set_active(struct rpc_task *task)
 276{
 277	trace_rpc_task_begin(task->tk_client, task, NULL);
 278
 279	rpc_task_set_debuginfo(task);
 280	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 
 281}
 282
 283/*
 284 * Mark an RPC call as having completed by clearing the 'active' bit
 285 * and then waking up all tasks that were sleeping.
 286 */
 287static int rpc_complete_task(struct rpc_task *task)
 288{
 289	void *m = &task->tk_runstate;
 290	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
 291	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
 292	unsigned long flags;
 293	int ret;
 294
 295	trace_rpc_task_complete(task->tk_client, task, NULL);
 296
 297	spin_lock_irqsave(&wq->lock, flags);
 298	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 299	ret = atomic_dec_and_test(&task->tk_count);
 300	if (waitqueue_active(wq))
 301		__wake_up_locked_key(wq, TASK_NORMAL, &k);
 302	spin_unlock_irqrestore(&wq->lock, flags);
 303	return ret;
 304}
 305
 306/*
 307 * Allow callers to wait for completion of an RPC call
 308 *
 309 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
 310 * to enforce taking of the wq->lock and hence avoid races with
 311 * rpc_complete_task().
 312 */
 313int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
 314{
 315	if (action == NULL)
 316		action = rpc_wait_bit_killable;
 317	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
 318			action, TASK_KILLABLE);
 319}
 320EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
 321
 322/*
 323 * Make an RPC task runnable.
 324 *
 325 * Note: If the task is ASYNC, and is being made runnable after sitting on an
 326 * rpc_wait_queue, this must be called with the queue spinlock held to protect
 327 * the wait queue operation.
 328 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
 329 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
 330 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
 331 * the RPC_TASK_RUNNING flag.
 332 */
 333static void rpc_make_runnable(struct workqueue_struct *wq,
 334		struct rpc_task *task)
 335{
 336	bool need_wakeup = !rpc_test_and_set_running(task);
 337
 338	rpc_clear_queued(task);
 339	if (!need_wakeup)
 340		return;
 341	if (RPC_IS_ASYNC(task)) {
 342		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 343		queue_work(wq, &task->u.tk_work);
 344	} else
 345		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
 346}
 347
 348/*
 349 * Prepare for sleeping on a wait queue.
 350 * By always appending tasks to the list we ensure FIFO behavior.
 351 * NB: An RPC task will only receive interrupt-driven events as long
 352 * as it's on a wait queue.
 353 */
 
 
 
 
 
 
 
 
 
 354static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
 355		struct rpc_task *task,
 356		rpc_action action,
 357		unsigned char queue_priority)
 358{
 359	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
 360			task->tk_pid, rpc_qname(q), jiffies);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361
 362	trace_rpc_task_sleep(task->tk_client, task, q);
 
 
 
 
 363
 364	__rpc_add_wait_queue(q, task, queue_priority);
 365
 366	WARN_ON_ONCE(task->tk_callback != NULL);
 367	task->tk_callback = action;
 368	__rpc_add_timer(q, task);
 
 
 
 369}
 
 370
 371void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 372				rpc_action action)
 373{
 374	/* We shouldn't ever put an inactive task to sleep */
 375	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
 376	if (!RPC_IS_ACTIVATED(task)) {
 377		task->tk_status = -EIO;
 378		rpc_put_task_async(task);
 379		return;
 380	}
 381
 
 
 
 382	/*
 383	 * Protect the queue operations.
 384	 */
 385	spin_lock_bh(&q->lock);
 386	__rpc_sleep_on_priority(q, task, action, task->tk_priority);
 387	spin_unlock_bh(&q->lock);
 388}
 389EXPORT_SYMBOL_GPL(rpc_sleep_on);
 390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 391void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
 392		rpc_action action, int priority)
 393{
 394	/* We shouldn't ever put an inactive task to sleep */
 395	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
 396	if (!RPC_IS_ACTIVATED(task)) {
 397		task->tk_status = -EIO;
 398		rpc_put_task_async(task);
 399		return;
 400	}
 401
 
 
 402	/*
 403	 * Protect the queue operations.
 404	 */
 405	spin_lock_bh(&q->lock);
 406	__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
 407	spin_unlock_bh(&q->lock);
 408}
 409EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
 410
 411/**
 412 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
 413 * @wq: workqueue on which to run task
 414 * @queue: wait queue
 415 * @task: task to be woken up
 416 *
 417 * Caller must hold queue->lock, and have cleared the task queued flag.
 418 */
 419static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
 420		struct rpc_wait_queue *queue,
 421		struct rpc_task *task)
 422{
 423	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
 424			task->tk_pid, jiffies);
 425
 426	/* Has the task been executed yet? If not, we cannot wake it up! */
 427	if (!RPC_IS_ACTIVATED(task)) {
 428		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
 429		return;
 430	}
 431
 432	trace_rpc_task_wakeup(task->tk_client, task, queue);
 433
 434	__rpc_remove_wait_queue(queue, task);
 435
 436	rpc_make_runnable(wq, task);
 437
 438	dprintk("RPC:       __rpc_wake_up_task done\n");
 439}
 440
 441/*
 442 * Wake up a queued task while the queue lock is being held
 443 */
 444static void rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
 445		struct rpc_wait_queue *queue, struct rpc_task *task)
 
 
 446{
 447	if (RPC_IS_QUEUED(task)) {
 448		smp_rmb();
 449		if (task->tk_waitqueue == queue)
 450			__rpc_do_wake_up_task_on_wq(wq, queue, task);
 
 
 
 
 451	}
 
 452}
 453
 454/*
 455 * Wake up a queued task while the queue lock is being held
 456 */
 457static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
 
 458{
 459	rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
 
 460}
 461
 462/*
 463 * Wake up a task on a specific queue
 464 */
 465void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 466{
 467	spin_lock_bh(&queue->lock);
 
 
 468	rpc_wake_up_task_queue_locked(queue, task);
 469	spin_unlock_bh(&queue->lock);
 470}
 471EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
 472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 473/*
 474 * Wake up the next task on a priority queue.
 475 */
 476static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
 477{
 478	struct list_head *q;
 479	struct rpc_task *task;
 480
 481	/*
 
 
 
 
 
 
 
 
 
 482	 * Service a batch of tasks from a single owner.
 483	 */
 484	q = &queue->tasks[queue->priority];
 485	if (!list_empty(q)) {
 486		task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
 487		if (queue->owner == task->tk_owner) {
 488			if (--queue->nr)
 489				goto out;
 490			list_move_tail(&task->u.tk_wait.list, q);
 491		}
 492		/*
 493		 * Check if we need to switch queues.
 494		 */
 495		goto new_owner;
 496	}
 497
 498	/*
 499	 * Service the next queue.
 500	 */
 501	do {
 502		if (q == &queue->tasks[0])
 503			q = &queue->tasks[queue->maxpriority];
 504		else
 505			q = q - 1;
 506		if (!list_empty(q)) {
 507			task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
 508			goto new_queue;
 509		}
 510	} while (q != &queue->tasks[queue->priority]);
 511
 512	rpc_reset_waitqueue_priority(queue);
 513	return NULL;
 514
 515new_queue:
 516	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
 517new_owner:
 518	rpc_set_waitqueue_owner(queue, task->tk_owner);
 519out:
 520	return task;
 521}
 522
 523static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
 524{
 525	if (RPC_IS_PRIORITY(queue))
 526		return __rpc_find_next_queued_priority(queue);
 527	if (!list_empty(&queue->tasks[0]))
 528		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
 529	return NULL;
 530}
 531
 532/*
 533 * Wake up the first task on the wait queue.
 534 */
 535struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
 536		struct rpc_wait_queue *queue,
 537		bool (*func)(struct rpc_task *, void *), void *data)
 538{
 539	struct rpc_task	*task = NULL;
 540
 541	dprintk("RPC:       wake_up_first(%p \"%s\")\n",
 542			queue, rpc_qname(queue));
 543	spin_lock_bh(&queue->lock);
 544	task = __rpc_find_next_queued(queue);
 545	if (task != NULL) {
 546		if (func(task, data))
 547			rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
 548		else
 549			task = NULL;
 550	}
 551	spin_unlock_bh(&queue->lock);
 552
 553	return task;
 554}
 555
 556/*
 557 * Wake up the first task on the wait queue.
 558 */
 559struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
 560		bool (*func)(struct rpc_task *, void *), void *data)
 561{
 562	return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
 563}
 564EXPORT_SYMBOL_GPL(rpc_wake_up_first);
 565
 566static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
 567{
 568	return true;
 569}
 570
 571/*
 572 * Wake up the next task on the wait queue.
 573*/
 574struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
 575{
 576	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
 577}
 578EXPORT_SYMBOL_GPL(rpc_wake_up_next);
 579
 580/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 581 * rpc_wake_up - wake up all rpc_tasks
 582 * @queue: rpc_wait_queue on which the tasks are sleeping
 583 *
 584 * Grabs queue->lock
 585 */
 586void rpc_wake_up(struct rpc_wait_queue *queue)
 587{
 588	struct list_head *head;
 
 
 
 
 
 
 
 
 
 
 
 
 
 589
 590	spin_lock_bh(&queue->lock);
 591	head = &queue->tasks[queue->maxpriority];
 592	for (;;) {
 593		while (!list_empty(head)) {
 594			struct rpc_task *task;
 595			task = list_first_entry(head,
 596					struct rpc_task,
 597					u.tk_wait.list);
 598			rpc_wake_up_task_queue_locked(queue, task);
 599		}
 600		if (head == &queue->tasks[0])
 601			break;
 602		head--;
 603	}
 604	spin_unlock_bh(&queue->lock);
 605}
 606EXPORT_SYMBOL_GPL(rpc_wake_up);
 607
 608/**
 609 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
 610 * @queue: rpc_wait_queue on which the tasks are sleeping
 611 * @status: status value to set
 612 *
 613 * Grabs queue->lock
 614 */
 615void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 616{
 617	struct list_head *head;
 618
 619	spin_lock_bh(&queue->lock);
 620	head = &queue->tasks[queue->maxpriority];
 621	for (;;) {
 622		while (!list_empty(head)) {
 623			struct rpc_task *task;
 624			task = list_first_entry(head,
 625					struct rpc_task,
 626					u.tk_wait.list);
 627			task->tk_status = status;
 628			rpc_wake_up_task_queue_locked(queue, task);
 629		}
 630		if (head == &queue->tasks[0])
 631			break;
 632		head--;
 633	}
 634	spin_unlock_bh(&queue->lock);
 635}
 636EXPORT_SYMBOL_GPL(rpc_wake_up_status);
 637
 638static void __rpc_queue_timer_fn(unsigned long ptr)
 639{
 640	struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
 
 
 641	struct rpc_task *task, *n;
 642	unsigned long expires, now, timeo;
 643
 644	spin_lock(&queue->lock);
 645	expires = now = jiffies;
 646	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
 647		timeo = task->u.tk_wait.expires;
 648		if (time_after_eq(now, timeo)) {
 649			dprintk("RPC: %5u timeout\n", task->tk_pid);
 650			task->tk_status = -ETIMEDOUT;
 651			rpc_wake_up_task_queue_locked(queue, task);
 652			continue;
 653		}
 654		if (expires == now || time_after(expires, timeo))
 655			expires = timeo;
 656	}
 657	if (!list_empty(&queue->timer_list.list))
 658		rpc_set_queue_timer(queue, expires);
 659	spin_unlock(&queue->lock);
 660}
 661
 662static void __rpc_atrun(struct rpc_task *task)
 663{
 664	if (task->tk_status == -ETIMEDOUT)
 665		task->tk_status = 0;
 666}
 667
 668/*
 669 * Run a task at a later time
 670 */
 671void rpc_delay(struct rpc_task *task, unsigned long delay)
 672{
 673	task->tk_timeout = delay;
 674	rpc_sleep_on(&delay_queue, task, __rpc_atrun);
 675}
 676EXPORT_SYMBOL_GPL(rpc_delay);
 677
 678/*
 679 * Helper to call task->tk_ops->rpc_call_prepare
 680 */
 681void rpc_prepare_task(struct rpc_task *task)
 682{
 683	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 684}
 685
 686static void
 687rpc_init_task_statistics(struct rpc_task *task)
 688{
 689	/* Initialize retry counters */
 690	task->tk_garb_retry = 2;
 691	task->tk_cred_retry = 2;
 692	task->tk_rebind_retry = 2;
 693
 694	/* starting timestamp */
 695	task->tk_start = ktime_get();
 696}
 697
 698static void
 699rpc_reset_task_statistics(struct rpc_task *task)
 700{
 701	task->tk_timeouts = 0;
 702	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
 703
 704	rpc_init_task_statistics(task);
 705}
 706
 707/*
 708 * Helper that calls task->tk_ops->rpc_call_done if it exists
 709 */
 710void rpc_exit_task(struct rpc_task *task)
 711{
 
 712	task->tk_action = NULL;
 
 
 
 
 713	if (task->tk_ops->rpc_call_done != NULL) {
 
 714		task->tk_ops->rpc_call_done(task, task->tk_calldata);
 715		if (task->tk_action != NULL) {
 716			WARN_ON(RPC_ASSASSINATED(task));
 717			/* Always release the RPC slot and buffer memory */
 718			xprt_release(task);
 719			rpc_reset_task_statistics(task);
 720		}
 721	}
 722}
 723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724void rpc_exit(struct rpc_task *task, int status)
 725{
 726	task->tk_status = status;
 727	task->tk_action = rpc_exit_task;
 728	if (RPC_IS_QUEUED(task))
 729		rpc_wake_up_queued_task(task->tk_waitqueue, task);
 730}
 731EXPORT_SYMBOL_GPL(rpc_exit);
 732
 733void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
 734{
 735	if (ops->rpc_release != NULL)
 736		ops->rpc_release(calldata);
 737}
 738
 
 
 
 
 
 
 
 
 
 739/*
 740 * This is the RPC `scheduler' (or rather, the finite state machine).
 741 */
 742static void __rpc_execute(struct rpc_task *task)
 743{
 744	struct rpc_wait_queue *queue;
 745	int task_is_async = RPC_IS_ASYNC(task);
 746	int status = 0;
 747
 748	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
 749			task->tk_pid, task->tk_flags);
 750
 751	WARN_ON_ONCE(RPC_IS_QUEUED(task));
 752	if (RPC_IS_QUEUED(task))
 753		return;
 754
 755	for (;;) {
 756		void (*do_action)(struct rpc_task *);
 757
 758		/*
 759		 * Execute any pending callback first.
 
 
 760		 */
 761		do_action = task->tk_callback;
 762		task->tk_callback = NULL;
 763		if (do_action == NULL) {
 764			/*
 765			 * Perform the next FSM step.
 766			 * tk_action may be NULL if the task has been killed.
 767			 * In particular, note that rpc_killall_tasks may
 768			 * do this at any time, so beware when dereferencing.
 769			 */
 770			do_action = task->tk_action;
 771			if (do_action == NULL)
 772				break;
 773		}
 774		trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
 
 
 
 
 
 
 775		do_action(task);
 776
 777		/*
 778		 * Lockless check for whether task is sleeping or not.
 779		 */
 780		if (!RPC_IS_QUEUED(task))
 
 781			continue;
 
 
 782		/*
 783		 * The queue->lock protects against races with
 784		 * rpc_make_runnable().
 785		 *
 786		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
 787		 * rpc_task, rpc_make_runnable() can assign it to a
 788		 * different workqueue. We therefore cannot assume that the
 789		 * rpc_task pointer may still be dereferenced.
 790		 */
 791		queue = task->tk_waitqueue;
 792		spin_lock_bh(&queue->lock);
 793		if (!RPC_IS_QUEUED(task)) {
 794			spin_unlock_bh(&queue->lock);
 
 
 
 
 
 
 795			continue;
 796		}
 797		rpc_clear_running(task);
 798		spin_unlock_bh(&queue->lock);
 799		if (task_is_async)
 800			return;
 801
 802		/* sync task: sleep here */
 803		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
 804		status = out_of_line_wait_on_bit(&task->tk_runstate,
 805				RPC_TASK_QUEUED, rpc_wait_bit_killable,
 806				TASK_KILLABLE);
 807		if (status == -ERESTARTSYS) {
 808			/*
 809			 * When a sync task receives a signal, it exits with
 810			 * -ERESTARTSYS. In order to catch any callbacks that
 811			 * clean up after sleeping on some queue, we don't
 812			 * break the loop here, but go around once more.
 813			 */
 814			dprintk("RPC: %5u got signal\n", task->tk_pid);
 815			task->tk_flags |= RPC_TASK_KILLED;
 816			rpc_exit(task, -ERESTARTSYS);
 817		}
 818		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
 819	}
 820
 821	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
 822			task->tk_status);
 823	/* Release all resources associated with the task */
 824	rpc_release_task(task);
 
 
 825}
 826
 827/*
 828 * User-visible entry point to the scheduler.
 829 *
 830 * This may be called recursively if e.g. an async NFS task updates
 831 * the attributes and finds that dirty pages must be flushed.
 832 * NOTE: Upon exit of this function the task is guaranteed to be
 833 *	 released. In particular note that tk_release() will have
 834 *	 been called, so your task memory may have been freed.
 835 */
 836void rpc_execute(struct rpc_task *task)
 837{
 838	bool is_async = RPC_IS_ASYNC(task);
 839
 840	rpc_set_active(task);
 841	rpc_make_runnable(rpciod_workqueue, task);
 842	if (!is_async)
 
 843		__rpc_execute(task);
 
 
 844}
 845
 846static void rpc_async_schedule(struct work_struct *work)
 847{
 
 
 848	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
 
 849}
 850
 851/**
 852 * rpc_malloc - allocate RPC buffer resources
 853 * @task: RPC task
 854 *
 855 * A single memory region is allocated, which is split between the
 856 * RPC call and RPC reply that this task is being used for. When
 857 * this RPC is retired, the memory is released by calling rpc_free.
 858 *
 859 * To prevent rpciod from hanging, this allocator never sleeps,
 860 * returning -ENOMEM and suppressing warning if the request cannot
 861 * be serviced immediately. The caller can arrange to sleep in a
 862 * way that is safe for rpciod.
 863 *
 864 * Most requests are 'small' (under 2KiB) and can be serviced from a
 865 * mempool, ensuring that NFS reads and writes can always proceed,
 866 * and that there is good locality of reference for these buffers.
 867 *
 868 * In order to avoid memory starvation triggering more writebacks of
 869 * NFS requests, we avoid using GFP_KERNEL.
 870 */
 871int rpc_malloc(struct rpc_task *task)
 872{
 873	struct rpc_rqst *rqst = task->tk_rqstp;
 874	size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
 875	struct rpc_buffer *buf;
 876	gfp_t gfp = GFP_NOIO | __GFP_NOWARN;
 877
 878	if (RPC_IS_SWAPPER(task))
 879		gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
 880
 881	size += sizeof(struct rpc_buffer);
 882	if (size <= RPC_BUFFER_MAXSIZE)
 883		buf = mempool_alloc(rpc_buffer_mempool, gfp);
 884	else
 
 
 
 885		buf = kmalloc(size, gfp);
 886
 887	if (!buf)
 888		return -ENOMEM;
 889
 890	buf->len = size;
 891	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
 892			task->tk_pid, size, buf);
 893	rqst->rq_buffer = buf->data;
 894	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
 895	return 0;
 896}
 897EXPORT_SYMBOL_GPL(rpc_malloc);
 898
 899/**
 900 * rpc_free - free RPC buffer resources allocated via rpc_malloc
 901 * @task: RPC task
 902 *
 903 */
 904void rpc_free(struct rpc_task *task)
 905{
 906	void *buffer = task->tk_rqstp->rq_buffer;
 907	size_t size;
 908	struct rpc_buffer *buf;
 909
 910	buf = container_of(buffer, struct rpc_buffer, data);
 911	size = buf->len;
 912
 913	dprintk("RPC:       freeing buffer of size %zu at %p\n",
 914			size, buf);
 915
 916	if (size <= RPC_BUFFER_MAXSIZE)
 917		mempool_free(buf, rpc_buffer_mempool);
 918	else
 919		kfree(buf);
 920}
 921EXPORT_SYMBOL_GPL(rpc_free);
 922
 923/*
 924 * Creation and deletion of RPC task structures
 925 */
 926static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
 927{
 928	memset(task, 0, sizeof(*task));
 929	atomic_set(&task->tk_count, 1);
 930	task->tk_flags  = task_setup_data->flags;
 931	task->tk_ops = task_setup_data->callback_ops;
 932	task->tk_calldata = task_setup_data->callback_data;
 933	INIT_LIST_HEAD(&task->tk_task);
 934
 935	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
 936	task->tk_owner = current->tgid;
 937
 938	/* Initialize workqueue for async tasks */
 939	task->tk_workqueue = task_setup_data->workqueue;
 940
 941	task->tk_xprt = xprt_get(task_setup_data->rpc_xprt);
 
 
 
 942
 943	if (task->tk_ops->rpc_call_prepare != NULL)
 944		task->tk_action = rpc_prepare_task;
 945
 946	rpc_init_task_statistics(task);
 947
 948	dprintk("RPC:       new task initialized, procpid %u\n",
 949				task_pid_nr(current));
 950}
 951
 952static struct rpc_task *
 953rpc_alloc_task(void)
 954{
 955	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
 
 
 
 
 
 956}
 957
 958/*
 959 * Create a new task for the specified client.
 960 */
 961struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
 962{
 963	struct rpc_task	*task = setup_data->task;
 964	unsigned short flags = 0;
 965
 966	if (task == NULL) {
 967		task = rpc_alloc_task();
 968		if (task == NULL) {
 969			rpc_release_calldata(setup_data->callback_ops,
 970					setup_data->callback_data);
 971			return ERR_PTR(-ENOMEM);
 972		}
 973		flags = RPC_TASK_DYNAMIC;
 974	}
 975
 976	rpc_init_task(task, setup_data);
 977	task->tk_flags |= flags;
 978	dprintk("RPC:       allocated task %p\n", task);
 979	return task;
 980}
 981
 982/*
 983 * rpc_free_task - release rpc task and perform cleanups
 984 *
 985 * Note that we free up the rpc_task _after_ rpc_release_calldata()
 986 * in order to work around a workqueue dependency issue.
 987 *
 988 * Tejun Heo states:
 989 * "Workqueue currently considers two work items to be the same if they're
 990 * on the same address and won't execute them concurrently - ie. it
 991 * makes a work item which is queued again while being executed wait
 992 * for the previous execution to complete.
 993 *
 994 * If a work function frees the work item, and then waits for an event
 995 * which should be performed by another work item and *that* work item
 996 * recycles the freed work item, it can create a false dependency loop.
 997 * There really is no reliable way to detect this short of verifying
 998 * every memory free."
 999 *
1000 */
1001static void rpc_free_task(struct rpc_task *task)
1002{
1003	unsigned short tk_flags = task->tk_flags;
1004
 
1005	rpc_release_calldata(task->tk_ops, task->tk_calldata);
1006
1007	if (tk_flags & RPC_TASK_DYNAMIC) {
1008		dprintk("RPC: %5u freeing task\n", task->tk_pid);
1009		mempool_free(task, rpc_task_mempool);
1010	}
1011}
1012
1013static void rpc_async_release(struct work_struct *work)
1014{
 
 
1015	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
 
1016}
1017
1018static void rpc_release_resources_task(struct rpc_task *task)
1019{
1020	xprt_release(task);
1021	if (task->tk_msg.rpc_cred) {
1022		put_rpccred(task->tk_msg.rpc_cred);
 
1023		task->tk_msg.rpc_cred = NULL;
1024	}
1025	rpc_task_release_client(task);
1026}
1027
1028static void rpc_final_put_task(struct rpc_task *task,
1029		struct workqueue_struct *q)
1030{
1031	if (q != NULL) {
1032		INIT_WORK(&task->u.tk_work, rpc_async_release);
1033		queue_work(q, &task->u.tk_work);
1034	} else
1035		rpc_free_task(task);
1036}
1037
1038static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1039{
1040	if (atomic_dec_and_test(&task->tk_count)) {
1041		rpc_release_resources_task(task);
1042		rpc_final_put_task(task, q);
1043	}
1044}
1045
1046void rpc_put_task(struct rpc_task *task)
1047{
1048	rpc_do_put_task(task, NULL);
1049}
1050EXPORT_SYMBOL_GPL(rpc_put_task);
1051
1052void rpc_put_task_async(struct rpc_task *task)
1053{
1054	rpc_do_put_task(task, task->tk_workqueue);
1055}
1056EXPORT_SYMBOL_GPL(rpc_put_task_async);
1057
1058static void rpc_release_task(struct rpc_task *task)
1059{
1060	dprintk("RPC: %5u release task\n", task->tk_pid);
1061
1062	WARN_ON_ONCE(RPC_IS_QUEUED(task));
1063
1064	rpc_release_resources_task(task);
1065
1066	/*
1067	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1068	 * so it should be safe to use task->tk_count as a test for whether
1069	 * or not any other processes still hold references to our rpc_task.
1070	 */
1071	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1072		/* Wake up anyone who may be waiting for task completion */
1073		if (!rpc_complete_task(task))
1074			return;
1075	} else {
1076		if (!atomic_dec_and_test(&task->tk_count))
1077			return;
1078	}
1079	rpc_final_put_task(task, task->tk_workqueue);
1080}
1081
1082int rpciod_up(void)
1083{
1084	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1085}
1086
1087void rpciod_down(void)
1088{
1089	module_put(THIS_MODULE);
1090}
1091
1092/*
1093 * Start up the rpciod workqueue.
1094 */
1095static int rpciod_start(void)
1096{
1097	struct workqueue_struct *wq;
1098
1099	/*
1100	 * Create the rpciod thread and wait for it to start.
1101	 */
1102	dprintk("RPC:       creating workqueue rpciod\n");
1103	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
1104	if (!wq)
1105		goto out_failed;
1106	rpciod_workqueue = wq;
1107	/* Note: highpri because network receive is latency sensitive */
1108	wq = alloc_workqueue("xprtiod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1109	if (!wq)
1110		goto free_rpciod;
1111	xprtiod_workqueue = wq;
1112	return 1;
1113free_rpciod:
1114	wq = rpciod_workqueue;
1115	rpciod_workqueue = NULL;
1116	destroy_workqueue(wq);
1117out_failed:
1118	return 0;
1119}
1120
1121static void rpciod_stop(void)
1122{
1123	struct workqueue_struct *wq = NULL;
1124
1125	if (rpciod_workqueue == NULL)
1126		return;
1127	dprintk("RPC:       destroying workqueue rpciod\n");
1128
1129	wq = rpciod_workqueue;
1130	rpciod_workqueue = NULL;
1131	destroy_workqueue(wq);
1132	wq = xprtiod_workqueue;
1133	xprtiod_workqueue = NULL;
1134	destroy_workqueue(wq);
1135}
1136
1137void
1138rpc_destroy_mempool(void)
1139{
1140	rpciod_stop();
1141	mempool_destroy(rpc_buffer_mempool);
1142	mempool_destroy(rpc_task_mempool);
1143	kmem_cache_destroy(rpc_task_slabp);
1144	kmem_cache_destroy(rpc_buffer_slabp);
1145	rpc_destroy_wait_queue(&delay_queue);
1146}
1147
1148int
1149rpc_init_mempool(void)
1150{
1151	/*
1152	 * The following is not strictly a mempool initialisation,
1153	 * but there is no harm in doing it here
1154	 */
1155	rpc_init_wait_queue(&delay_queue, "delayq");
1156	if (!rpciod_start())
1157		goto err_nomem;
1158
1159	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1160					     sizeof(struct rpc_task),
1161					     0, SLAB_HWCACHE_ALIGN,
1162					     NULL);
1163	if (!rpc_task_slabp)
1164		goto err_nomem;
1165	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1166					     RPC_BUFFER_MAXSIZE,
1167					     0, SLAB_HWCACHE_ALIGN,
1168					     NULL);
1169	if (!rpc_buffer_slabp)
1170		goto err_nomem;
1171	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1172						    rpc_task_slabp);
1173	if (!rpc_task_mempool)
1174		goto err_nomem;
1175	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1176						      rpc_buffer_slabp);
1177	if (!rpc_buffer_mempool)
1178		goto err_nomem;
1179	return 0;
1180err_nomem:
1181	rpc_destroy_mempool();
1182	return -ENOMEM;
1183}