Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v3.1
   1/*
   2 * linux/net/sunrpc/sched.c
   3 *
   4 * Scheduling for synchronous and asynchronous RPC requests.
   5 *
   6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
   7 *
   8 * TCP NFS related read + write fixes
   9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  10 */
  11
  12#include <linux/module.h>
  13
  14#include <linux/sched.h>
  15#include <linux/interrupt.h>
  16#include <linux/slab.h>
  17#include <linux/mempool.h>
  18#include <linux/smp.h>
  19#include <linux/spinlock.h>
  20#include <linux/mutex.h>
 
  21
  22#include <linux/sunrpc/clnt.h>
  23
  24#include "sunrpc.h"
  25
  26#ifdef RPC_DEBUG
  27#define RPCDBG_FACILITY		RPCDBG_SCHED
  28#endif
  29
 
 
 
  30/*
  31 * RPC slabs and memory pools
  32 */
  33#define RPC_BUFFER_MAXSIZE	(2048)
  34#define RPC_BUFFER_POOLSIZE	(8)
  35#define RPC_TASK_POOLSIZE	(8)
  36static struct kmem_cache	*rpc_task_slabp __read_mostly;
  37static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
  38static mempool_t	*rpc_task_mempool __read_mostly;
  39static mempool_t	*rpc_buffer_mempool __read_mostly;
  40
  41static void			rpc_async_schedule(struct work_struct *);
  42static void			 rpc_release_task(struct rpc_task *task);
  43static void __rpc_queue_timer_fn(unsigned long ptr);
  44
  45/*
  46 * RPC tasks sit here while waiting for conditions to improve.
  47 */
  48static struct rpc_wait_queue delay_queue;
  49
  50/*
  51 * rpciod-related stuff
  52 */
  53struct workqueue_struct *rpciod_workqueue;
  54
  55/*
  56 * Disable the timer for a given RPC task. Should be called with
  57 * queue->lock and bh_disabled in order to avoid races within
  58 * rpc_run_timer().
  59 */
  60static void
  61__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  62{
  63	if (task->tk_timeout == 0)
  64		return;
  65	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
  66	task->tk_timeout = 0;
  67	list_del(&task->u.tk_wait.timer_list);
  68	if (list_empty(&queue->timer_list.list))
  69		del_timer(&queue->timer_list.timer);
  70}
  71
  72static void
  73rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
  74{
  75	queue->timer_list.expires = expires;
  76	mod_timer(&queue->timer_list.timer, expires);
  77}
  78
  79/*
  80 * Set up a timer for the current task.
  81 */
  82static void
  83__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  84{
  85	if (!task->tk_timeout)
  86		return;
  87
  88	dprintk("RPC: %5u setting alarm for %lu ms\n",
  89			task->tk_pid, task->tk_timeout * 1000 / HZ);
  90
  91	task->u.tk_wait.expires = jiffies + task->tk_timeout;
  92	if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
  93		rpc_set_queue_timer(queue, task->u.tk_wait.expires);
  94	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
  95}
  96
  97/*
  98 * Add new request to a priority queue.
  99 */
 100static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
 101		struct rpc_task *task,
 102		unsigned char queue_priority)
 103{
 104	struct list_head *q;
 105	struct rpc_task *t;
 106
 107	INIT_LIST_HEAD(&task->u.tk_wait.links);
 108	q = &queue->tasks[queue_priority];
 109	if (unlikely(queue_priority > queue->maxpriority))
 110		q = &queue->tasks[queue->maxpriority];
 111	list_for_each_entry(t, q, u.tk_wait.list) {
 112		if (t->tk_owner == task->tk_owner) {
 113			list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
 114			return;
 115		}
 116	}
 117	list_add_tail(&task->u.tk_wait.list, q);
 118}
 119
 120/*
 121 * Add new request to wait queue.
 122 *
 123 * Swapper tasks always get inserted at the head of the queue.
 124 * This should avoid many nasty memory deadlocks and hopefully
 125 * improve overall performance.
 126 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
 127 */
 128static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
 129		struct rpc_task *task,
 130		unsigned char queue_priority)
 131{
 132	BUG_ON (RPC_IS_QUEUED(task));
 133
 134	if (RPC_IS_PRIORITY(queue))
 135		__rpc_add_wait_queue_priority(queue, task, queue_priority);
 136	else if (RPC_IS_SWAPPER(task))
 137		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
 138	else
 139		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
 140	task->tk_waitqueue = queue;
 141	queue->qlen++;
 142	rpc_set_queued(task);
 143
 144	dprintk("RPC: %5u added to queue %p \"%s\"\n",
 145			task->tk_pid, queue, rpc_qname(queue));
 146}
 147
 148/*
 149 * Remove request from a priority queue.
 150 */
 151static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
 152{
 153	struct rpc_task *t;
 154
 155	if (!list_empty(&task->u.tk_wait.links)) {
 156		t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
 157		list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
 158		list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
 159	}
 160}
 161
 162/*
 163 * Remove request from queue.
 164 * Note: must be called with spin lock held.
 165 */
 166static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
 167{
 168	__rpc_disable_timer(queue, task);
 169	if (RPC_IS_PRIORITY(queue))
 170		__rpc_remove_wait_queue_priority(task);
 171	list_del(&task->u.tk_wait.list);
 172	queue->qlen--;
 173	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
 174			task->tk_pid, queue, rpc_qname(queue));
 175}
 176
 177static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
 178{
 179	queue->priority = priority;
 180	queue->count = 1 << (priority * 2);
 181}
 182
 183static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
 184{
 185	queue->owner = pid;
 186	queue->nr = RPC_BATCH_COUNT;
 187}
 188
 189static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
 190{
 191	rpc_set_waitqueue_priority(queue, queue->maxpriority);
 192	rpc_set_waitqueue_owner(queue, 0);
 193}
 194
 195static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
 196{
 197	int i;
 198
 199	spin_lock_init(&queue->lock);
 200	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
 201		INIT_LIST_HEAD(&queue->tasks[i]);
 202	queue->maxpriority = nr_queues - 1;
 203	rpc_reset_waitqueue_priority(queue);
 204	queue->qlen = 0;
 205	setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
 206	INIT_LIST_HEAD(&queue->timer_list.list);
 207#ifdef RPC_DEBUG
 208	queue->name = qname;
 209#endif
 210}
 211
 212void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 213{
 214	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
 215}
 216EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
 217
 218void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 219{
 220	__rpc_init_priority_wait_queue(queue, qname, 1);
 221}
 222EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
 223
 224void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
 225{
 226	del_timer_sync(&queue->timer_list.timer);
 227}
 228EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
 229
 230static int rpc_wait_bit_killable(void *word)
 231{
 232	if (fatal_signal_pending(current))
 233		return -ERESTARTSYS;
 234	schedule();
 235	return 0;
 236}
 237
 238#ifdef RPC_DEBUG
 239static void rpc_task_set_debuginfo(struct rpc_task *task)
 240{
 241	static atomic_t rpc_pid;
 242
 243	task->tk_pid = atomic_inc_return(&rpc_pid);
 244}
 245#else
 246static inline void rpc_task_set_debuginfo(struct rpc_task *task)
 247{
 248}
 249#endif
 250
 251static void rpc_set_active(struct rpc_task *task)
 252{
 
 
 253	rpc_task_set_debuginfo(task);
 254	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 255}
 256
 257/*
 258 * Mark an RPC call as having completed by clearing the 'active' bit
 259 * and then waking up all tasks that were sleeping.
 260 */
 261static int rpc_complete_task(struct rpc_task *task)
 262{
 263	void *m = &task->tk_runstate;
 264	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
 265	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
 266	unsigned long flags;
 267	int ret;
 268
 
 
 269	spin_lock_irqsave(&wq->lock, flags);
 270	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 271	ret = atomic_dec_and_test(&task->tk_count);
 272	if (waitqueue_active(wq))
 273		__wake_up_locked_key(wq, TASK_NORMAL, &k);
 274	spin_unlock_irqrestore(&wq->lock, flags);
 275	return ret;
 276}
 277
 278/*
 279 * Allow callers to wait for completion of an RPC call
 280 *
 281 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
 282 * to enforce taking of the wq->lock and hence avoid races with
 283 * rpc_complete_task().
 284 */
 285int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
 286{
 287	if (action == NULL)
 288		action = rpc_wait_bit_killable;
 289	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
 290			action, TASK_KILLABLE);
 291}
 292EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
 293
 294/*
 295 * Make an RPC task runnable.
 296 *
 297 * Note: If the task is ASYNC, this must be called with
 298 * the spinlock held to protect the wait queue operation.
 299 */
 300static void rpc_make_runnable(struct rpc_task *task)
 301{
 302	rpc_clear_queued(task);
 303	if (rpc_test_and_set_running(task))
 304		return;
 305	if (RPC_IS_ASYNC(task)) {
 306		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 307		queue_work(rpciod_workqueue, &task->u.tk_work);
 308	} else
 309		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
 310}
 311
 312/*
 313 * Prepare for sleeping on a wait queue.
 314 * By always appending tasks to the list we ensure FIFO behavior.
 315 * NB: An RPC task will only receive interrupt-driven events as long
 316 * as it's on a wait queue.
 317 */
 318static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
 319		struct rpc_task *task,
 320		rpc_action action,
 321		unsigned char queue_priority)
 322{
 323	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
 324			task->tk_pid, rpc_qname(q), jiffies);
 325
 
 
 326	__rpc_add_wait_queue(q, task, queue_priority);
 327
 328	BUG_ON(task->tk_callback != NULL);
 329	task->tk_callback = action;
 330	__rpc_add_timer(q, task);
 331}
 332
 333void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 334				rpc_action action)
 335{
 336	/* We shouldn't ever put an inactive task to sleep */
 337	BUG_ON(!RPC_IS_ACTIVATED(task));
 338
 339	/*
 340	 * Protect the queue operations.
 341	 */
 342	spin_lock_bh(&q->lock);
 343	__rpc_sleep_on_priority(q, task, action, task->tk_priority);
 344	spin_unlock_bh(&q->lock);
 345}
 346EXPORT_SYMBOL_GPL(rpc_sleep_on);
 347
 348void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
 349		rpc_action action, int priority)
 350{
 351	/* We shouldn't ever put an inactive task to sleep */
 352	BUG_ON(!RPC_IS_ACTIVATED(task));
 353
 354	/*
 355	 * Protect the queue operations.
 356	 */
 357	spin_lock_bh(&q->lock);
 358	__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
 359	spin_unlock_bh(&q->lock);
 360}
 361
 362/**
 363 * __rpc_do_wake_up_task - wake up a single rpc_task
 364 * @queue: wait queue
 365 * @task: task to be woken up
 366 *
 367 * Caller must hold queue->lock, and have cleared the task queued flag.
 368 */
 369static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 370{
 371	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
 372			task->tk_pid, jiffies);
 373
 374	/* Has the task been executed yet? If not, we cannot wake it up! */
 375	if (!RPC_IS_ACTIVATED(task)) {
 376		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
 377		return;
 378	}
 379
 
 
 380	__rpc_remove_wait_queue(queue, task);
 381
 382	rpc_make_runnable(task);
 383
 384	dprintk("RPC:       __rpc_wake_up_task done\n");
 385}
 386
 387/*
 388 * Wake up a queued task while the queue lock is being held
 389 */
 390static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
 391{
 392	if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
 393		__rpc_do_wake_up_task(queue, task);
 394}
 395
 396/*
 397 * Tests whether rpc queue is empty
 398 */
 399int rpc_queue_empty(struct rpc_wait_queue *queue)
 400{
 401	int res;
 402
 403	spin_lock_bh(&queue->lock);
 404	res = queue->qlen;
 405	spin_unlock_bh(&queue->lock);
 406	return res == 0;
 407}
 408EXPORT_SYMBOL_GPL(rpc_queue_empty);
 409
 410/*
 411 * Wake up a task on a specific queue
 412 */
 413void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 414{
 415	spin_lock_bh(&queue->lock);
 416	rpc_wake_up_task_queue_locked(queue, task);
 417	spin_unlock_bh(&queue->lock);
 418}
 419EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
 420
 421/*
 422 * Wake up the next task on a priority queue.
 423 */
 424static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
 425{
 426	struct list_head *q;
 427	struct rpc_task *task;
 428
 429	/*
 430	 * Service a batch of tasks from a single owner.
 431	 */
 432	q = &queue->tasks[queue->priority];
 433	if (!list_empty(q)) {
 434		task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
 435		if (queue->owner == task->tk_owner) {
 436			if (--queue->nr)
 437				goto out;
 438			list_move_tail(&task->u.tk_wait.list, q);
 439		}
 440		/*
 441		 * Check if we need to switch queues.
 442		 */
 443		if (--queue->count)
 444			goto new_owner;
 445	}
 446
 447	/*
 448	 * Service the next queue.
 449	 */
 450	do {
 451		if (q == &queue->tasks[0])
 452			q = &queue->tasks[queue->maxpriority];
 453		else
 454			q = q - 1;
 455		if (!list_empty(q)) {
 456			task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
 457			goto new_queue;
 458		}
 459	} while (q != &queue->tasks[queue->priority]);
 460
 461	rpc_reset_waitqueue_priority(queue);
 462	return NULL;
 463
 464new_queue:
 465	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
 466new_owner:
 467	rpc_set_waitqueue_owner(queue, task->tk_owner);
 468out:
 469	rpc_wake_up_task_queue_locked(queue, task);
 470	return task;
 471}
 472
 
 
 
 
 
 
 
 
 
 473/*
 474 * Wake up the next task on the wait queue.
 475 */
 476struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
 
 477{
 478	struct rpc_task	*task = NULL;
 479
 480	dprintk("RPC:       wake_up_next(%p \"%s\")\n",
 481			queue, rpc_qname(queue));
 482	spin_lock_bh(&queue->lock);
 483	if (RPC_IS_PRIORITY(queue))
 484		task = __rpc_wake_up_next_priority(queue);
 485	else {
 486		task_for_first(task, &queue->tasks[0])
 487			rpc_wake_up_task_queue_locked(queue, task);
 
 
 488	}
 489	spin_unlock_bh(&queue->lock);
 490
 491	return task;
 492}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 493EXPORT_SYMBOL_GPL(rpc_wake_up_next);
 494
 495/**
 496 * rpc_wake_up - wake up all rpc_tasks
 497 * @queue: rpc_wait_queue on which the tasks are sleeping
 498 *
 499 * Grabs queue->lock
 500 */
 501void rpc_wake_up(struct rpc_wait_queue *queue)
 502{
 503	struct rpc_task *task, *next;
 504	struct list_head *head;
 505
 506	spin_lock_bh(&queue->lock);
 507	head = &queue->tasks[queue->maxpriority];
 508	for (;;) {
 509		list_for_each_entry_safe(task, next, head, u.tk_wait.list)
 
 
 
 
 510			rpc_wake_up_task_queue_locked(queue, task);
 
 511		if (head == &queue->tasks[0])
 512			break;
 513		head--;
 514	}
 515	spin_unlock_bh(&queue->lock);
 516}
 517EXPORT_SYMBOL_GPL(rpc_wake_up);
 518
 519/**
 520 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
 521 * @queue: rpc_wait_queue on which the tasks are sleeping
 522 * @status: status value to set
 523 *
 524 * Grabs queue->lock
 525 */
 526void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 527{
 528	struct rpc_task *task, *next;
 529	struct list_head *head;
 530
 531	spin_lock_bh(&queue->lock);
 532	head = &queue->tasks[queue->maxpriority];
 533	for (;;) {
 534		list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
 
 
 
 
 535			task->tk_status = status;
 536			rpc_wake_up_task_queue_locked(queue, task);
 537		}
 538		if (head == &queue->tasks[0])
 539			break;
 540		head--;
 541	}
 542	spin_unlock_bh(&queue->lock);
 543}
 544EXPORT_SYMBOL_GPL(rpc_wake_up_status);
 545
 546static void __rpc_queue_timer_fn(unsigned long ptr)
 547{
 548	struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
 549	struct rpc_task *task, *n;
 550	unsigned long expires, now, timeo;
 551
 552	spin_lock(&queue->lock);
 553	expires = now = jiffies;
 554	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
 555		timeo = task->u.tk_wait.expires;
 556		if (time_after_eq(now, timeo)) {
 557			dprintk("RPC: %5u timeout\n", task->tk_pid);
 558			task->tk_status = -ETIMEDOUT;
 559			rpc_wake_up_task_queue_locked(queue, task);
 560			continue;
 561		}
 562		if (expires == now || time_after(expires, timeo))
 563			expires = timeo;
 564	}
 565	if (!list_empty(&queue->timer_list.list))
 566		rpc_set_queue_timer(queue, expires);
 567	spin_unlock(&queue->lock);
 568}
 569
 570static void __rpc_atrun(struct rpc_task *task)
 571{
 572	task->tk_status = 0;
 573}
 574
 575/*
 576 * Run a task at a later time
 577 */
 578void rpc_delay(struct rpc_task *task, unsigned long delay)
 579{
 580	task->tk_timeout = delay;
 581	rpc_sleep_on(&delay_queue, task, __rpc_atrun);
 582}
 583EXPORT_SYMBOL_GPL(rpc_delay);
 584
 585/*
 586 * Helper to call task->tk_ops->rpc_call_prepare
 587 */
 588void rpc_prepare_task(struct rpc_task *task)
 589{
 590	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 591}
 592
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593/*
 594 * Helper that calls task->tk_ops->rpc_call_done if it exists
 595 */
 596void rpc_exit_task(struct rpc_task *task)
 597{
 598	task->tk_action = NULL;
 599	if (task->tk_ops->rpc_call_done != NULL) {
 600		task->tk_ops->rpc_call_done(task, task->tk_calldata);
 601		if (task->tk_action != NULL) {
 602			WARN_ON(RPC_ASSASSINATED(task));
 603			/* Always release the RPC slot and buffer memory */
 604			xprt_release(task);
 
 605		}
 606	}
 607}
 608
 609void rpc_exit(struct rpc_task *task, int status)
 610{
 611	task->tk_status = status;
 612	task->tk_action = rpc_exit_task;
 613	if (RPC_IS_QUEUED(task))
 614		rpc_wake_up_queued_task(task->tk_waitqueue, task);
 615}
 616EXPORT_SYMBOL_GPL(rpc_exit);
 617
 618void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
 619{
 620	if (ops->rpc_release != NULL)
 621		ops->rpc_release(calldata);
 622}
 623
 624/*
 625 * This is the RPC `scheduler' (or rather, the finite state machine).
 626 */
 627static void __rpc_execute(struct rpc_task *task)
 628{
 629	struct rpc_wait_queue *queue;
 630	int task_is_async = RPC_IS_ASYNC(task);
 631	int status = 0;
 632
 633	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
 634			task->tk_pid, task->tk_flags);
 635
 636	BUG_ON(RPC_IS_QUEUED(task));
 637
 638	for (;;) {
 639		void (*do_action)(struct rpc_task *);
 640
 641		/*
 642		 * Execute any pending callback first.
 643		 */
 644		do_action = task->tk_callback;
 645		task->tk_callback = NULL;
 646		if (do_action == NULL) {
 647			/*
 648			 * Perform the next FSM step.
 649			 * tk_action may be NULL if the task has been killed.
 650			 * In particular, note that rpc_killall_tasks may
 651			 * do this at any time, so beware when dereferencing.
 652			 */
 653			do_action = task->tk_action;
 654			if (do_action == NULL)
 655				break;
 656		}
 
 657		do_action(task);
 658
 659		/*
 660		 * Lockless check for whether task is sleeping or not.
 661		 */
 662		if (!RPC_IS_QUEUED(task))
 663			continue;
 664		/*
 665		 * The queue->lock protects against races with
 666		 * rpc_make_runnable().
 667		 *
 668		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
 669		 * rpc_task, rpc_make_runnable() can assign it to a
 670		 * different workqueue. We therefore cannot assume that the
 671		 * rpc_task pointer may still be dereferenced.
 672		 */
 673		queue = task->tk_waitqueue;
 674		spin_lock_bh(&queue->lock);
 675		if (!RPC_IS_QUEUED(task)) {
 676			spin_unlock_bh(&queue->lock);
 677			continue;
 678		}
 679		rpc_clear_running(task);
 680		spin_unlock_bh(&queue->lock);
 681		if (task_is_async)
 682			return;
 683
 684		/* sync task: sleep here */
 685		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
 686		status = out_of_line_wait_on_bit(&task->tk_runstate,
 687				RPC_TASK_QUEUED, rpc_wait_bit_killable,
 688				TASK_KILLABLE);
 689		if (status == -ERESTARTSYS) {
 690			/*
 691			 * When a sync task receives a signal, it exits with
 692			 * -ERESTARTSYS. In order to catch any callbacks that
 693			 * clean up after sleeping on some queue, we don't
 694			 * break the loop here, but go around once more.
 695			 */
 696			dprintk("RPC: %5u got signal\n", task->tk_pid);
 697			task->tk_flags |= RPC_TASK_KILLED;
 698			rpc_exit(task, -ERESTARTSYS);
 699		}
 700		rpc_set_running(task);
 701		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
 702	}
 703
 704	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
 705			task->tk_status);
 706	/* Release all resources associated with the task */
 707	rpc_release_task(task);
 708}
 709
 710/*
 711 * User-visible entry point to the scheduler.
 712 *
 713 * This may be called recursively if e.g. an async NFS task updates
 714 * the attributes and finds that dirty pages must be flushed.
 715 * NOTE: Upon exit of this function the task is guaranteed to be
 716 *	 released. In particular note that tk_release() will have
 717 *	 been called, so your task memory may have been freed.
 718 */
 719void rpc_execute(struct rpc_task *task)
 720{
 721	rpc_set_active(task);
 722	rpc_make_runnable(task);
 723	if (!RPC_IS_ASYNC(task))
 724		__rpc_execute(task);
 725}
 726
 727static void rpc_async_schedule(struct work_struct *work)
 728{
 
 729	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
 
 730}
 731
 732/**
 733 * rpc_malloc - allocate an RPC buffer
 734 * @task: RPC task that will use this buffer
 735 * @size: requested byte size
 736 *
 737 * To prevent rpciod from hanging, this allocator never sleeps,
 738 * returning NULL if the request cannot be serviced immediately.
 739 * The caller can arrange to sleep in a way that is safe for rpciod.
 740 *
 741 * Most requests are 'small' (under 2KiB) and can be serviced from a
 742 * mempool, ensuring that NFS reads and writes can always proceed,
 743 * and that there is good locality of reference for these buffers.
 744 *
 745 * In order to avoid memory starvation triggering more writebacks of
 746 * NFS requests, we avoid using GFP_KERNEL.
 747 */
 748void *rpc_malloc(struct rpc_task *task, size_t size)
 749{
 750	struct rpc_buffer *buf;
 751	gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
 752
 753	size += sizeof(struct rpc_buffer);
 754	if (size <= RPC_BUFFER_MAXSIZE)
 755		buf = mempool_alloc(rpc_buffer_mempool, gfp);
 756	else
 757		buf = kmalloc(size, gfp);
 758
 759	if (!buf)
 760		return NULL;
 761
 762	buf->len = size;
 763	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
 764			task->tk_pid, size, buf);
 765	return &buf->data;
 766}
 767EXPORT_SYMBOL_GPL(rpc_malloc);
 768
 769/**
 770 * rpc_free - free buffer allocated via rpc_malloc
 771 * @buffer: buffer to free
 772 *
 773 */
 774void rpc_free(void *buffer)
 775{
 776	size_t size;
 777	struct rpc_buffer *buf;
 778
 779	if (!buffer)
 780		return;
 781
 782	buf = container_of(buffer, struct rpc_buffer, data);
 783	size = buf->len;
 784
 785	dprintk("RPC:       freeing buffer of size %zu at %p\n",
 786			size, buf);
 787
 788	if (size <= RPC_BUFFER_MAXSIZE)
 789		mempool_free(buf, rpc_buffer_mempool);
 790	else
 791		kfree(buf);
 792}
 793EXPORT_SYMBOL_GPL(rpc_free);
 794
 795/*
 796 * Creation and deletion of RPC task structures
 797 */
 798static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
 799{
 800	memset(task, 0, sizeof(*task));
 801	atomic_set(&task->tk_count, 1);
 802	task->tk_flags  = task_setup_data->flags;
 803	task->tk_ops = task_setup_data->callback_ops;
 804	task->tk_calldata = task_setup_data->callback_data;
 805	INIT_LIST_HEAD(&task->tk_task);
 806
 807	/* Initialize retry counters */
 808	task->tk_garb_retry = 2;
 809	task->tk_cred_retry = 2;
 810	task->tk_rebind_retry = 2;
 811
 812	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
 813	task->tk_owner = current->tgid;
 814
 815	/* Initialize workqueue for async tasks */
 816	task->tk_workqueue = task_setup_data->workqueue;
 817
 818	if (task->tk_ops->rpc_call_prepare != NULL)
 819		task->tk_action = rpc_prepare_task;
 820
 821	/* starting timestamp */
 822	task->tk_start = ktime_get();
 823
 824	dprintk("RPC:       new task initialized, procpid %u\n",
 825				task_pid_nr(current));
 826}
 827
 828static struct rpc_task *
 829rpc_alloc_task(void)
 830{
 831	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
 832}
 833
 834/*
 835 * Create a new task for the specified client.
 836 */
 837struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
 838{
 839	struct rpc_task	*task = setup_data->task;
 840	unsigned short flags = 0;
 841
 842	if (task == NULL) {
 843		task = rpc_alloc_task();
 844		if (task == NULL) {
 845			rpc_release_calldata(setup_data->callback_ops,
 846					setup_data->callback_data);
 847			return ERR_PTR(-ENOMEM);
 848		}
 849		flags = RPC_TASK_DYNAMIC;
 850	}
 851
 852	rpc_init_task(task, setup_data);
 853	task->tk_flags |= flags;
 854	dprintk("RPC:       allocated task %p\n", task);
 855	return task;
 856}
 857
 858static void rpc_free_task(struct rpc_task *task)
 859{
 860	const struct rpc_call_ops *tk_ops = task->tk_ops;
 861	void *calldata = task->tk_calldata;
 862
 863	if (task->tk_flags & RPC_TASK_DYNAMIC) {
 864		dprintk("RPC: %5u freeing task\n", task->tk_pid);
 865		mempool_free(task, rpc_task_mempool);
 866	}
 867	rpc_release_calldata(tk_ops, calldata);
 868}
 869
 870static void rpc_async_release(struct work_struct *work)
 871{
 872	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
 873}
 874
 875static void rpc_release_resources_task(struct rpc_task *task)
 876{
 877	if (task->tk_rqstp)
 878		xprt_release(task);
 879	if (task->tk_msg.rpc_cred) {
 880		put_rpccred(task->tk_msg.rpc_cred);
 881		task->tk_msg.rpc_cred = NULL;
 882	}
 883	rpc_task_release_client(task);
 884}
 885
 886static void rpc_final_put_task(struct rpc_task *task,
 887		struct workqueue_struct *q)
 888{
 889	if (q != NULL) {
 890		INIT_WORK(&task->u.tk_work, rpc_async_release);
 891		queue_work(q, &task->u.tk_work);
 892	} else
 893		rpc_free_task(task);
 894}
 895
 896static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
 897{
 898	if (atomic_dec_and_test(&task->tk_count)) {
 899		rpc_release_resources_task(task);
 900		rpc_final_put_task(task, q);
 901	}
 902}
 903
 904void rpc_put_task(struct rpc_task *task)
 905{
 906	rpc_do_put_task(task, NULL);
 907}
 908EXPORT_SYMBOL_GPL(rpc_put_task);
 909
 910void rpc_put_task_async(struct rpc_task *task)
 911{
 912	rpc_do_put_task(task, task->tk_workqueue);
 913}
 914EXPORT_SYMBOL_GPL(rpc_put_task_async);
 915
 916static void rpc_release_task(struct rpc_task *task)
 917{
 918	dprintk("RPC: %5u release task\n", task->tk_pid);
 919
 920	BUG_ON (RPC_IS_QUEUED(task));
 921
 922	rpc_release_resources_task(task);
 923
 924	/*
 925	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
 926	 * so it should be safe to use task->tk_count as a test for whether
 927	 * or not any other processes still hold references to our rpc_task.
 928	 */
 929	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
 930		/* Wake up anyone who may be waiting for task completion */
 931		if (!rpc_complete_task(task))
 932			return;
 933	} else {
 934		if (!atomic_dec_and_test(&task->tk_count))
 935			return;
 936	}
 937	rpc_final_put_task(task, task->tk_workqueue);
 938}
 939
 940int rpciod_up(void)
 941{
 942	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
 943}
 944
 945void rpciod_down(void)
 946{
 947	module_put(THIS_MODULE);
 948}
 949
 950/*
 951 * Start up the rpciod workqueue.
 952 */
 953static int rpciod_start(void)
 954{
 955	struct workqueue_struct *wq;
 956
 957	/*
 958	 * Create the rpciod thread and wait for it to start.
 959	 */
 960	dprintk("RPC:       creating workqueue rpciod\n");
 961	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
 962	rpciod_workqueue = wq;
 963	return rpciod_workqueue != NULL;
 964}
 965
 966static void rpciod_stop(void)
 967{
 968	struct workqueue_struct *wq = NULL;
 969
 970	if (rpciod_workqueue == NULL)
 971		return;
 972	dprintk("RPC:       destroying workqueue rpciod\n");
 973
 974	wq = rpciod_workqueue;
 975	rpciod_workqueue = NULL;
 976	destroy_workqueue(wq);
 977}
 978
 979void
 980rpc_destroy_mempool(void)
 981{
 982	rpciod_stop();
 983	if (rpc_buffer_mempool)
 984		mempool_destroy(rpc_buffer_mempool);
 985	if (rpc_task_mempool)
 986		mempool_destroy(rpc_task_mempool);
 987	if (rpc_task_slabp)
 988		kmem_cache_destroy(rpc_task_slabp);
 989	if (rpc_buffer_slabp)
 990		kmem_cache_destroy(rpc_buffer_slabp);
 991	rpc_destroy_wait_queue(&delay_queue);
 992}
 993
 994int
 995rpc_init_mempool(void)
 996{
 997	/*
 998	 * The following is not strictly a mempool initialisation,
 999	 * but there is no harm in doing it here
1000	 */
1001	rpc_init_wait_queue(&delay_queue, "delayq");
1002	if (!rpciod_start())
1003		goto err_nomem;
1004
1005	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1006					     sizeof(struct rpc_task),
1007					     0, SLAB_HWCACHE_ALIGN,
1008					     NULL);
1009	if (!rpc_task_slabp)
1010		goto err_nomem;
1011	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1012					     RPC_BUFFER_MAXSIZE,
1013					     0, SLAB_HWCACHE_ALIGN,
1014					     NULL);
1015	if (!rpc_buffer_slabp)
1016		goto err_nomem;
1017	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1018						    rpc_task_slabp);
1019	if (!rpc_task_mempool)
1020		goto err_nomem;
1021	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1022						      rpc_buffer_slabp);
1023	if (!rpc_buffer_mempool)
1024		goto err_nomem;
1025	return 0;
1026err_nomem:
1027	rpc_destroy_mempool();
1028	return -ENOMEM;
1029}
v3.5.6
   1/*
   2 * linux/net/sunrpc/sched.c
   3 *
   4 * Scheduling for synchronous and asynchronous RPC requests.
   5 *
   6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
   7 *
   8 * TCP NFS related read + write fixes
   9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  10 */
  11
  12#include <linux/module.h>
  13
  14#include <linux/sched.h>
  15#include <linux/interrupt.h>
  16#include <linux/slab.h>
  17#include <linux/mempool.h>
  18#include <linux/smp.h>
  19#include <linux/spinlock.h>
  20#include <linux/mutex.h>
  21#include <linux/freezer.h>
  22
  23#include <linux/sunrpc/clnt.h>
  24
  25#include "sunrpc.h"
  26
  27#ifdef RPC_DEBUG
  28#define RPCDBG_FACILITY		RPCDBG_SCHED
  29#endif
  30
  31#define CREATE_TRACE_POINTS
  32#include <trace/events/sunrpc.h>
  33
  34/*
  35 * RPC slabs and memory pools
  36 */
  37#define RPC_BUFFER_MAXSIZE	(2048)
  38#define RPC_BUFFER_POOLSIZE	(8)
  39#define RPC_TASK_POOLSIZE	(8)
  40static struct kmem_cache	*rpc_task_slabp __read_mostly;
  41static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
  42static mempool_t	*rpc_task_mempool __read_mostly;
  43static mempool_t	*rpc_buffer_mempool __read_mostly;
  44
  45static void			rpc_async_schedule(struct work_struct *);
  46static void			 rpc_release_task(struct rpc_task *task);
  47static void __rpc_queue_timer_fn(unsigned long ptr);
  48
  49/*
  50 * RPC tasks sit here while waiting for conditions to improve.
  51 */
  52static struct rpc_wait_queue delay_queue;
  53
  54/*
  55 * rpciod-related stuff
  56 */
  57struct workqueue_struct *rpciod_workqueue;
  58
  59/*
  60 * Disable the timer for a given RPC task. Should be called with
  61 * queue->lock and bh_disabled in order to avoid races within
  62 * rpc_run_timer().
  63 */
  64static void
  65__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  66{
  67	if (task->tk_timeout == 0)
  68		return;
  69	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
  70	task->tk_timeout = 0;
  71	list_del(&task->u.tk_wait.timer_list);
  72	if (list_empty(&queue->timer_list.list))
  73		del_timer(&queue->timer_list.timer);
  74}
  75
  76static void
  77rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
  78{
  79	queue->timer_list.expires = expires;
  80	mod_timer(&queue->timer_list.timer, expires);
  81}
  82
  83/*
  84 * Set up a timer for the current task.
  85 */
  86static void
  87__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  88{
  89	if (!task->tk_timeout)
  90		return;
  91
  92	dprintk("RPC: %5u setting alarm for %lu ms\n",
  93			task->tk_pid, task->tk_timeout * 1000 / HZ);
  94
  95	task->u.tk_wait.expires = jiffies + task->tk_timeout;
  96	if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
  97		rpc_set_queue_timer(queue, task->u.tk_wait.expires);
  98	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
  99}
 100
 101/*
 102 * Add new request to a priority queue.
 103 */
 104static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
 105		struct rpc_task *task,
 106		unsigned char queue_priority)
 107{
 108	struct list_head *q;
 109	struct rpc_task *t;
 110
 111	INIT_LIST_HEAD(&task->u.tk_wait.links);
 112	q = &queue->tasks[queue_priority];
 113	if (unlikely(queue_priority > queue->maxpriority))
 114		q = &queue->tasks[queue->maxpriority];
 115	list_for_each_entry(t, q, u.tk_wait.list) {
 116		if (t->tk_owner == task->tk_owner) {
 117			list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
 118			return;
 119		}
 120	}
 121	list_add_tail(&task->u.tk_wait.list, q);
 122}
 123
 124/*
 125 * Add new request to wait queue.
 126 *
 127 * Swapper tasks always get inserted at the head of the queue.
 128 * This should avoid many nasty memory deadlocks and hopefully
 129 * improve overall performance.
 130 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
 131 */
 132static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
 133		struct rpc_task *task,
 134		unsigned char queue_priority)
 135{
 136	BUG_ON (RPC_IS_QUEUED(task));
 137
 138	if (RPC_IS_PRIORITY(queue))
 139		__rpc_add_wait_queue_priority(queue, task, queue_priority);
 140	else if (RPC_IS_SWAPPER(task))
 141		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
 142	else
 143		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
 144	task->tk_waitqueue = queue;
 145	queue->qlen++;
 146	rpc_set_queued(task);
 147
 148	dprintk("RPC: %5u added to queue %p \"%s\"\n",
 149			task->tk_pid, queue, rpc_qname(queue));
 150}
 151
 152/*
 153 * Remove request from a priority queue.
 154 */
 155static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
 156{
 157	struct rpc_task *t;
 158
 159	if (!list_empty(&task->u.tk_wait.links)) {
 160		t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
 161		list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
 162		list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
 163	}
 164}
 165
 166/*
 167 * Remove request from queue.
 168 * Note: must be called with spin lock held.
 169 */
 170static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
 171{
 172	__rpc_disable_timer(queue, task);
 173	if (RPC_IS_PRIORITY(queue))
 174		__rpc_remove_wait_queue_priority(task);
 175	list_del(&task->u.tk_wait.list);
 176	queue->qlen--;
 177	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
 178			task->tk_pid, queue, rpc_qname(queue));
 179}
 180
 181static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
 182{
 183	queue->priority = priority;
 184	queue->count = 1 << (priority * 2);
 185}
 186
 187static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
 188{
 189	queue->owner = pid;
 190	queue->nr = RPC_BATCH_COUNT;
 191}
 192
 193static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
 194{
 195	rpc_set_waitqueue_priority(queue, queue->maxpriority);
 196	rpc_set_waitqueue_owner(queue, 0);
 197}
 198
 199static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
 200{
 201	int i;
 202
 203	spin_lock_init(&queue->lock);
 204	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
 205		INIT_LIST_HEAD(&queue->tasks[i]);
 206	queue->maxpriority = nr_queues - 1;
 207	rpc_reset_waitqueue_priority(queue);
 208	queue->qlen = 0;
 209	setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
 210	INIT_LIST_HEAD(&queue->timer_list.list);
 211	rpc_assign_waitqueue_name(queue, qname);
 
 
 212}
 213
 214void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 215{
 216	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
 217}
 218EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
 219
 220void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 221{
 222	__rpc_init_priority_wait_queue(queue, qname, 1);
 223}
 224EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
 225
 226void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
 227{
 228	del_timer_sync(&queue->timer_list.timer);
 229}
 230EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
 231
 232static int rpc_wait_bit_killable(void *word)
 233{
 234	if (fatal_signal_pending(current))
 235		return -ERESTARTSYS;
 236	freezable_schedule();
 237	return 0;
 238}
 239
 240#ifdef RPC_DEBUG
 241static void rpc_task_set_debuginfo(struct rpc_task *task)
 242{
 243	static atomic_t rpc_pid;
 244
 245	task->tk_pid = atomic_inc_return(&rpc_pid);
 246}
 247#else
 248static inline void rpc_task_set_debuginfo(struct rpc_task *task)
 249{
 250}
 251#endif
 252
 253static void rpc_set_active(struct rpc_task *task)
 254{
 255	trace_rpc_task_begin(task->tk_client, task, NULL);
 256
 257	rpc_task_set_debuginfo(task);
 258	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 259}
 260
 261/*
 262 * Mark an RPC call as having completed by clearing the 'active' bit
 263 * and then waking up all tasks that were sleeping.
 264 */
 265static int rpc_complete_task(struct rpc_task *task)
 266{
 267	void *m = &task->tk_runstate;
 268	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
 269	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
 270	unsigned long flags;
 271	int ret;
 272
 273	trace_rpc_task_complete(task->tk_client, task, NULL);
 274
 275	spin_lock_irqsave(&wq->lock, flags);
 276	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 277	ret = atomic_dec_and_test(&task->tk_count);
 278	if (waitqueue_active(wq))
 279		__wake_up_locked_key(wq, TASK_NORMAL, &k);
 280	spin_unlock_irqrestore(&wq->lock, flags);
 281	return ret;
 282}
 283
 284/*
 285 * Allow callers to wait for completion of an RPC call
 286 *
 287 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
 288 * to enforce taking of the wq->lock and hence avoid races with
 289 * rpc_complete_task().
 290 */
 291int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
 292{
 293	if (action == NULL)
 294		action = rpc_wait_bit_killable;
 295	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
 296			action, TASK_KILLABLE);
 297}
 298EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
 299
 300/*
 301 * Make an RPC task runnable.
 302 *
 303 * Note: If the task is ASYNC, this must be called with
 304 * the spinlock held to protect the wait queue operation.
 305 */
 306static void rpc_make_runnable(struct rpc_task *task)
 307{
 308	rpc_clear_queued(task);
 309	if (rpc_test_and_set_running(task))
 310		return;
 311	if (RPC_IS_ASYNC(task)) {
 312		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 313		queue_work(rpciod_workqueue, &task->u.tk_work);
 314	} else
 315		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
 316}
 317
 318/*
 319 * Prepare for sleeping on a wait queue.
 320 * By always appending tasks to the list we ensure FIFO behavior.
 321 * NB: An RPC task will only receive interrupt-driven events as long
 322 * as it's on a wait queue.
 323 */
 324static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
 325		struct rpc_task *task,
 326		rpc_action action,
 327		unsigned char queue_priority)
 328{
 329	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
 330			task->tk_pid, rpc_qname(q), jiffies);
 331
 332	trace_rpc_task_sleep(task->tk_client, task, q);
 333
 334	__rpc_add_wait_queue(q, task, queue_priority);
 335
 336	BUG_ON(task->tk_callback != NULL);
 337	task->tk_callback = action;
 338	__rpc_add_timer(q, task);
 339}
 340
 341void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 342				rpc_action action)
 343{
 344	/* We shouldn't ever put an inactive task to sleep */
 345	BUG_ON(!RPC_IS_ACTIVATED(task));
 346
 347	/*
 348	 * Protect the queue operations.
 349	 */
 350	spin_lock_bh(&q->lock);
 351	__rpc_sleep_on_priority(q, task, action, task->tk_priority);
 352	spin_unlock_bh(&q->lock);
 353}
 354EXPORT_SYMBOL_GPL(rpc_sleep_on);
 355
 356void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
 357		rpc_action action, int priority)
 358{
 359	/* We shouldn't ever put an inactive task to sleep */
 360	BUG_ON(!RPC_IS_ACTIVATED(task));
 361
 362	/*
 363	 * Protect the queue operations.
 364	 */
 365	spin_lock_bh(&q->lock);
 366	__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
 367	spin_unlock_bh(&q->lock);
 368}
 369
 370/**
 371 * __rpc_do_wake_up_task - wake up a single rpc_task
 372 * @queue: wait queue
 373 * @task: task to be woken up
 374 *
 375 * Caller must hold queue->lock, and have cleared the task queued flag.
 376 */
 377static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 378{
 379	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
 380			task->tk_pid, jiffies);
 381
 382	/* Has the task been executed yet? If not, we cannot wake it up! */
 383	if (!RPC_IS_ACTIVATED(task)) {
 384		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
 385		return;
 386	}
 387
 388	trace_rpc_task_wakeup(task->tk_client, task, queue);
 389
 390	__rpc_remove_wait_queue(queue, task);
 391
 392	rpc_make_runnable(task);
 393
 394	dprintk("RPC:       __rpc_wake_up_task done\n");
 395}
 396
 397/*
 398 * Wake up a queued task while the queue lock is being held
 399 */
 400static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
 401{
 402	if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
 403		__rpc_do_wake_up_task(queue, task);
 404}
 405
 406/*
 407 * Tests whether rpc queue is empty
 408 */
 409int rpc_queue_empty(struct rpc_wait_queue *queue)
 410{
 411	int res;
 412
 413	spin_lock_bh(&queue->lock);
 414	res = queue->qlen;
 415	spin_unlock_bh(&queue->lock);
 416	return res == 0;
 417}
 418EXPORT_SYMBOL_GPL(rpc_queue_empty);
 419
 420/*
 421 * Wake up a task on a specific queue
 422 */
 423void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 424{
 425	spin_lock_bh(&queue->lock);
 426	rpc_wake_up_task_queue_locked(queue, task);
 427	spin_unlock_bh(&queue->lock);
 428}
 429EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
 430
 431/*
 432 * Wake up the next task on a priority queue.
 433 */
 434static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
 435{
 436	struct list_head *q;
 437	struct rpc_task *task;
 438
 439	/*
 440	 * Service a batch of tasks from a single owner.
 441	 */
 442	q = &queue->tasks[queue->priority];
 443	if (!list_empty(q)) {
 444		task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
 445		if (queue->owner == task->tk_owner) {
 446			if (--queue->nr)
 447				goto out;
 448			list_move_tail(&task->u.tk_wait.list, q);
 449		}
 450		/*
 451		 * Check if we need to switch queues.
 452		 */
 453		if (--queue->count)
 454			goto new_owner;
 455	}
 456
 457	/*
 458	 * Service the next queue.
 459	 */
 460	do {
 461		if (q == &queue->tasks[0])
 462			q = &queue->tasks[queue->maxpriority];
 463		else
 464			q = q - 1;
 465		if (!list_empty(q)) {
 466			task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
 467			goto new_queue;
 468		}
 469	} while (q != &queue->tasks[queue->priority]);
 470
 471	rpc_reset_waitqueue_priority(queue);
 472	return NULL;
 473
 474new_queue:
 475	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
 476new_owner:
 477	rpc_set_waitqueue_owner(queue, task->tk_owner);
 478out:
 
 479	return task;
 480}
 481
 482static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
 483{
 484	if (RPC_IS_PRIORITY(queue))
 485		return __rpc_find_next_queued_priority(queue);
 486	if (!list_empty(&queue->tasks[0]))
 487		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
 488	return NULL;
 489}
 490
 491/*
 492 * Wake up the first task on the wait queue.
 493 */
 494struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
 495		bool (*func)(struct rpc_task *, void *), void *data)
 496{
 497	struct rpc_task	*task = NULL;
 498
 499	dprintk("RPC:       wake_up_first(%p \"%s\")\n",
 500			queue, rpc_qname(queue));
 501	spin_lock_bh(&queue->lock);
 502	task = __rpc_find_next_queued(queue);
 503	if (task != NULL) {
 504		if (func(task, data))
 
 505			rpc_wake_up_task_queue_locked(queue, task);
 506		else
 507			task = NULL;
 508	}
 509	spin_unlock_bh(&queue->lock);
 510
 511	return task;
 512}
 513EXPORT_SYMBOL_GPL(rpc_wake_up_first);
 514
 515static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
 516{
 517	return true;
 518}
 519
 520/*
 521 * Wake up the next task on the wait queue.
 522*/
 523struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
 524{
 525	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
 526}
 527EXPORT_SYMBOL_GPL(rpc_wake_up_next);
 528
 529/**
 530 * rpc_wake_up - wake up all rpc_tasks
 531 * @queue: rpc_wait_queue on which the tasks are sleeping
 532 *
 533 * Grabs queue->lock
 534 */
 535void rpc_wake_up(struct rpc_wait_queue *queue)
 536{
 
 537	struct list_head *head;
 538
 539	spin_lock_bh(&queue->lock);
 540	head = &queue->tasks[queue->maxpriority];
 541	for (;;) {
 542		while (!list_empty(head)) {
 543			struct rpc_task *task;
 544			task = list_first_entry(head,
 545					struct rpc_task,
 546					u.tk_wait.list);
 547			rpc_wake_up_task_queue_locked(queue, task);
 548		}
 549		if (head == &queue->tasks[0])
 550			break;
 551		head--;
 552	}
 553	spin_unlock_bh(&queue->lock);
 554}
 555EXPORT_SYMBOL_GPL(rpc_wake_up);
 556
 557/**
 558 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
 559 * @queue: rpc_wait_queue on which the tasks are sleeping
 560 * @status: status value to set
 561 *
 562 * Grabs queue->lock
 563 */
 564void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 565{
 
 566	struct list_head *head;
 567
 568	spin_lock_bh(&queue->lock);
 569	head = &queue->tasks[queue->maxpriority];
 570	for (;;) {
 571		while (!list_empty(head)) {
 572			struct rpc_task *task;
 573			task = list_first_entry(head,
 574					struct rpc_task,
 575					u.tk_wait.list);
 576			task->tk_status = status;
 577			rpc_wake_up_task_queue_locked(queue, task);
 578		}
 579		if (head == &queue->tasks[0])
 580			break;
 581		head--;
 582	}
 583	spin_unlock_bh(&queue->lock);
 584}
 585EXPORT_SYMBOL_GPL(rpc_wake_up_status);
 586
 587static void __rpc_queue_timer_fn(unsigned long ptr)
 588{
 589	struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
 590	struct rpc_task *task, *n;
 591	unsigned long expires, now, timeo;
 592
 593	spin_lock(&queue->lock);
 594	expires = now = jiffies;
 595	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
 596		timeo = task->u.tk_wait.expires;
 597		if (time_after_eq(now, timeo)) {
 598			dprintk("RPC: %5u timeout\n", task->tk_pid);
 599			task->tk_status = -ETIMEDOUT;
 600			rpc_wake_up_task_queue_locked(queue, task);
 601			continue;
 602		}
 603		if (expires == now || time_after(expires, timeo))
 604			expires = timeo;
 605	}
 606	if (!list_empty(&queue->timer_list.list))
 607		rpc_set_queue_timer(queue, expires);
 608	spin_unlock(&queue->lock);
 609}
 610
 611static void __rpc_atrun(struct rpc_task *task)
 612{
 613	task->tk_status = 0;
 614}
 615
 616/*
 617 * Run a task at a later time
 618 */
 619void rpc_delay(struct rpc_task *task, unsigned long delay)
 620{
 621	task->tk_timeout = delay;
 622	rpc_sleep_on(&delay_queue, task, __rpc_atrun);
 623}
 624EXPORT_SYMBOL_GPL(rpc_delay);
 625
 626/*
 627 * Helper to call task->tk_ops->rpc_call_prepare
 628 */
 629void rpc_prepare_task(struct rpc_task *task)
 630{
 631	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 632}
 633
 634static void
 635rpc_init_task_statistics(struct rpc_task *task)
 636{
 637	/* Initialize retry counters */
 638	task->tk_garb_retry = 2;
 639	task->tk_cred_retry = 2;
 640	task->tk_rebind_retry = 2;
 641
 642	/* starting timestamp */
 643	task->tk_start = ktime_get();
 644}
 645
 646static void
 647rpc_reset_task_statistics(struct rpc_task *task)
 648{
 649	task->tk_timeouts = 0;
 650	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
 651
 652	rpc_init_task_statistics(task);
 653}
 654
 655/*
 656 * Helper that calls task->tk_ops->rpc_call_done if it exists
 657 */
 658void rpc_exit_task(struct rpc_task *task)
 659{
 660	task->tk_action = NULL;
 661	if (task->tk_ops->rpc_call_done != NULL) {
 662		task->tk_ops->rpc_call_done(task, task->tk_calldata);
 663		if (task->tk_action != NULL) {
 664			WARN_ON(RPC_ASSASSINATED(task));
 665			/* Always release the RPC slot and buffer memory */
 666			xprt_release(task);
 667			rpc_reset_task_statistics(task);
 668		}
 669	}
 670}
 671
 672void rpc_exit(struct rpc_task *task, int status)
 673{
 674	task->tk_status = status;
 675	task->tk_action = rpc_exit_task;
 676	if (RPC_IS_QUEUED(task))
 677		rpc_wake_up_queued_task(task->tk_waitqueue, task);
 678}
 679EXPORT_SYMBOL_GPL(rpc_exit);
 680
 681void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
 682{
 683	if (ops->rpc_release != NULL)
 684		ops->rpc_release(calldata);
 685}
 686
 687/*
 688 * This is the RPC `scheduler' (or rather, the finite state machine).
 689 */
 690static void __rpc_execute(struct rpc_task *task)
 691{
 692	struct rpc_wait_queue *queue;
 693	int task_is_async = RPC_IS_ASYNC(task);
 694	int status = 0;
 695
 696	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
 697			task->tk_pid, task->tk_flags);
 698
 699	BUG_ON(RPC_IS_QUEUED(task));
 700
 701	for (;;) {
 702		void (*do_action)(struct rpc_task *);
 703
 704		/*
 705		 * Execute any pending callback first.
 706		 */
 707		do_action = task->tk_callback;
 708		task->tk_callback = NULL;
 709		if (do_action == NULL) {
 710			/*
 711			 * Perform the next FSM step.
 712			 * tk_action may be NULL if the task has been killed.
 713			 * In particular, note that rpc_killall_tasks may
 714			 * do this at any time, so beware when dereferencing.
 715			 */
 716			do_action = task->tk_action;
 717			if (do_action == NULL)
 718				break;
 719		}
 720		trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
 721		do_action(task);
 722
 723		/*
 724		 * Lockless check for whether task is sleeping or not.
 725		 */
 726		if (!RPC_IS_QUEUED(task))
 727			continue;
 728		/*
 729		 * The queue->lock protects against races with
 730		 * rpc_make_runnable().
 731		 *
 732		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
 733		 * rpc_task, rpc_make_runnable() can assign it to a
 734		 * different workqueue. We therefore cannot assume that the
 735		 * rpc_task pointer may still be dereferenced.
 736		 */
 737		queue = task->tk_waitqueue;
 738		spin_lock_bh(&queue->lock);
 739		if (!RPC_IS_QUEUED(task)) {
 740			spin_unlock_bh(&queue->lock);
 741			continue;
 742		}
 743		rpc_clear_running(task);
 744		spin_unlock_bh(&queue->lock);
 745		if (task_is_async)
 746			return;
 747
 748		/* sync task: sleep here */
 749		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
 750		status = out_of_line_wait_on_bit(&task->tk_runstate,
 751				RPC_TASK_QUEUED, rpc_wait_bit_killable,
 752				TASK_KILLABLE);
 753		if (status == -ERESTARTSYS) {
 754			/*
 755			 * When a sync task receives a signal, it exits with
 756			 * -ERESTARTSYS. In order to catch any callbacks that
 757			 * clean up after sleeping on some queue, we don't
 758			 * break the loop here, but go around once more.
 759			 */
 760			dprintk("RPC: %5u got signal\n", task->tk_pid);
 761			task->tk_flags |= RPC_TASK_KILLED;
 762			rpc_exit(task, -ERESTARTSYS);
 763		}
 764		rpc_set_running(task);
 765		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
 766	}
 767
 768	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
 769			task->tk_status);
 770	/* Release all resources associated with the task */
 771	rpc_release_task(task);
 772}
 773
 774/*
 775 * User-visible entry point to the scheduler.
 776 *
 777 * This may be called recursively if e.g. an async NFS task updates
 778 * the attributes and finds that dirty pages must be flushed.
 779 * NOTE: Upon exit of this function the task is guaranteed to be
 780 *	 released. In particular note that tk_release() will have
 781 *	 been called, so your task memory may have been freed.
 782 */
 783void rpc_execute(struct rpc_task *task)
 784{
 785	rpc_set_active(task);
 786	rpc_make_runnable(task);
 787	if (!RPC_IS_ASYNC(task))
 788		__rpc_execute(task);
 789}
 790
 791static void rpc_async_schedule(struct work_struct *work)
 792{
 793	current->flags |= PF_FSTRANS;
 794	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
 795	current->flags &= ~PF_FSTRANS;
 796}
 797
 798/**
 799 * rpc_malloc - allocate an RPC buffer
 800 * @task: RPC task that will use this buffer
 801 * @size: requested byte size
 802 *
 803 * To prevent rpciod from hanging, this allocator never sleeps,
 804 * returning NULL if the request cannot be serviced immediately.
 805 * The caller can arrange to sleep in a way that is safe for rpciod.
 806 *
 807 * Most requests are 'small' (under 2KiB) and can be serviced from a
 808 * mempool, ensuring that NFS reads and writes can always proceed,
 809 * and that there is good locality of reference for these buffers.
 810 *
 811 * In order to avoid memory starvation triggering more writebacks of
 812 * NFS requests, we avoid using GFP_KERNEL.
 813 */
 814void *rpc_malloc(struct rpc_task *task, size_t size)
 815{
 816	struct rpc_buffer *buf;
 817	gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
 818
 819	size += sizeof(struct rpc_buffer);
 820	if (size <= RPC_BUFFER_MAXSIZE)
 821		buf = mempool_alloc(rpc_buffer_mempool, gfp);
 822	else
 823		buf = kmalloc(size, gfp);
 824
 825	if (!buf)
 826		return NULL;
 827
 828	buf->len = size;
 829	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
 830			task->tk_pid, size, buf);
 831	return &buf->data;
 832}
 833EXPORT_SYMBOL_GPL(rpc_malloc);
 834
 835/**
 836 * rpc_free - free buffer allocated via rpc_malloc
 837 * @buffer: buffer to free
 838 *
 839 */
 840void rpc_free(void *buffer)
 841{
 842	size_t size;
 843	struct rpc_buffer *buf;
 844
 845	if (!buffer)
 846		return;
 847
 848	buf = container_of(buffer, struct rpc_buffer, data);
 849	size = buf->len;
 850
 851	dprintk("RPC:       freeing buffer of size %zu at %p\n",
 852			size, buf);
 853
 854	if (size <= RPC_BUFFER_MAXSIZE)
 855		mempool_free(buf, rpc_buffer_mempool);
 856	else
 857		kfree(buf);
 858}
 859EXPORT_SYMBOL_GPL(rpc_free);
 860
 861/*
 862 * Creation and deletion of RPC task structures
 863 */
 864static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
 865{
 866	memset(task, 0, sizeof(*task));
 867	atomic_set(&task->tk_count, 1);
 868	task->tk_flags  = task_setup_data->flags;
 869	task->tk_ops = task_setup_data->callback_ops;
 870	task->tk_calldata = task_setup_data->callback_data;
 871	INIT_LIST_HEAD(&task->tk_task);
 872
 
 
 
 
 
 873	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
 874	task->tk_owner = current->tgid;
 875
 876	/* Initialize workqueue for async tasks */
 877	task->tk_workqueue = task_setup_data->workqueue;
 878
 879	if (task->tk_ops->rpc_call_prepare != NULL)
 880		task->tk_action = rpc_prepare_task;
 881
 882	rpc_init_task_statistics(task);
 
 883
 884	dprintk("RPC:       new task initialized, procpid %u\n",
 885				task_pid_nr(current));
 886}
 887
 888static struct rpc_task *
 889rpc_alloc_task(void)
 890{
 891	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
 892}
 893
 894/*
 895 * Create a new task for the specified client.
 896 */
 897struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
 898{
 899	struct rpc_task	*task = setup_data->task;
 900	unsigned short flags = 0;
 901
 902	if (task == NULL) {
 903		task = rpc_alloc_task();
 904		if (task == NULL) {
 905			rpc_release_calldata(setup_data->callback_ops,
 906					setup_data->callback_data);
 907			return ERR_PTR(-ENOMEM);
 908		}
 909		flags = RPC_TASK_DYNAMIC;
 910	}
 911
 912	rpc_init_task(task, setup_data);
 913	task->tk_flags |= flags;
 914	dprintk("RPC:       allocated task %p\n", task);
 915	return task;
 916}
 917
 918static void rpc_free_task(struct rpc_task *task)
 919{
 920	const struct rpc_call_ops *tk_ops = task->tk_ops;
 921	void *calldata = task->tk_calldata;
 922
 923	if (task->tk_flags & RPC_TASK_DYNAMIC) {
 924		dprintk("RPC: %5u freeing task\n", task->tk_pid);
 925		mempool_free(task, rpc_task_mempool);
 926	}
 927	rpc_release_calldata(tk_ops, calldata);
 928}
 929
 930static void rpc_async_release(struct work_struct *work)
 931{
 932	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
 933}
 934
 935static void rpc_release_resources_task(struct rpc_task *task)
 936{
 937	if (task->tk_rqstp)
 938		xprt_release(task);
 939	if (task->tk_msg.rpc_cred) {
 940		put_rpccred(task->tk_msg.rpc_cred);
 941		task->tk_msg.rpc_cred = NULL;
 942	}
 943	rpc_task_release_client(task);
 944}
 945
 946static void rpc_final_put_task(struct rpc_task *task,
 947		struct workqueue_struct *q)
 948{
 949	if (q != NULL) {
 950		INIT_WORK(&task->u.tk_work, rpc_async_release);
 951		queue_work(q, &task->u.tk_work);
 952	} else
 953		rpc_free_task(task);
 954}
 955
 956static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
 957{
 958	if (atomic_dec_and_test(&task->tk_count)) {
 959		rpc_release_resources_task(task);
 960		rpc_final_put_task(task, q);
 961	}
 962}
 963
 964void rpc_put_task(struct rpc_task *task)
 965{
 966	rpc_do_put_task(task, NULL);
 967}
 968EXPORT_SYMBOL_GPL(rpc_put_task);
 969
 970void rpc_put_task_async(struct rpc_task *task)
 971{
 972	rpc_do_put_task(task, task->tk_workqueue);
 973}
 974EXPORT_SYMBOL_GPL(rpc_put_task_async);
 975
 976static void rpc_release_task(struct rpc_task *task)
 977{
 978	dprintk("RPC: %5u release task\n", task->tk_pid);
 979
 980	BUG_ON (RPC_IS_QUEUED(task));
 981
 982	rpc_release_resources_task(task);
 983
 984	/*
 985	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
 986	 * so it should be safe to use task->tk_count as a test for whether
 987	 * or not any other processes still hold references to our rpc_task.
 988	 */
 989	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
 990		/* Wake up anyone who may be waiting for task completion */
 991		if (!rpc_complete_task(task))
 992			return;
 993	} else {
 994		if (!atomic_dec_and_test(&task->tk_count))
 995			return;
 996	}
 997	rpc_final_put_task(task, task->tk_workqueue);
 998}
 999
1000int rpciod_up(void)
1001{
1002	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1003}
1004
1005void rpciod_down(void)
1006{
1007	module_put(THIS_MODULE);
1008}
1009
1010/*
1011 * Start up the rpciod workqueue.
1012 */
1013static int rpciod_start(void)
1014{
1015	struct workqueue_struct *wq;
1016
1017	/*
1018	 * Create the rpciod thread and wait for it to start.
1019	 */
1020	dprintk("RPC:       creating workqueue rpciod\n");
1021	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
1022	rpciod_workqueue = wq;
1023	return rpciod_workqueue != NULL;
1024}
1025
1026static void rpciod_stop(void)
1027{
1028	struct workqueue_struct *wq = NULL;
1029
1030	if (rpciod_workqueue == NULL)
1031		return;
1032	dprintk("RPC:       destroying workqueue rpciod\n");
1033
1034	wq = rpciod_workqueue;
1035	rpciod_workqueue = NULL;
1036	destroy_workqueue(wq);
1037}
1038
1039void
1040rpc_destroy_mempool(void)
1041{
1042	rpciod_stop();
1043	if (rpc_buffer_mempool)
1044		mempool_destroy(rpc_buffer_mempool);
1045	if (rpc_task_mempool)
1046		mempool_destroy(rpc_task_mempool);
1047	if (rpc_task_slabp)
1048		kmem_cache_destroy(rpc_task_slabp);
1049	if (rpc_buffer_slabp)
1050		kmem_cache_destroy(rpc_buffer_slabp);
1051	rpc_destroy_wait_queue(&delay_queue);
1052}
1053
1054int
1055rpc_init_mempool(void)
1056{
1057	/*
1058	 * The following is not strictly a mempool initialisation,
1059	 * but there is no harm in doing it here
1060	 */
1061	rpc_init_wait_queue(&delay_queue, "delayq");
1062	if (!rpciod_start())
1063		goto err_nomem;
1064
1065	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1066					     sizeof(struct rpc_task),
1067					     0, SLAB_HWCACHE_ALIGN,
1068					     NULL);
1069	if (!rpc_task_slabp)
1070		goto err_nomem;
1071	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1072					     RPC_BUFFER_MAXSIZE,
1073					     0, SLAB_HWCACHE_ALIGN,
1074					     NULL);
1075	if (!rpc_buffer_slabp)
1076		goto err_nomem;
1077	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1078						    rpc_task_slabp);
1079	if (!rpc_task_mempool)
1080		goto err_nomem;
1081	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1082						      rpc_buffer_slabp);
1083	if (!rpc_buffer_mempool)
1084		goto err_nomem;
1085	return 0;
1086err_nomem:
1087	rpc_destroy_mempool();
1088	return -ENOMEM;
1089}